You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: optimum/intel/openvino/quantization.py
+2-3Lines changed: 2 additions & 3 deletions
Original file line number
Diff line number
Diff line change
@@ -164,11 +164,10 @@ def quantize(
164
164
ifsave_directoryisNone:
165
165
# TODO : can be set to self.model.config.name_or_path for OVModels when not provided
166
166
raiseValueError("`save_directory` needs to be specified")
167
-
168
167
ifweights_only:
169
168
logger.warning(
170
-
"Weight only quantization will be deprecated in the `OVQuantizer` in optimum-intel next release. "
171
-
"To apply quantization on your wrights, please set `load_in_8bit=True` when loading your model with `from_pretrained()` or set `--int8` use when exporting your model with the CLI."
169
+
"Applying weight only quantization using the `OVQuantizer` will be deprecated in the next release of optimum-intel. "
170
+
"To apply weight only quantization, please set `load_in_8bit=True` when loading your model with `from_pretrained()` or set `--int8` use when exporting your model with the CLI."
0 commit comments