Skip to content

Commit

Permalink
Fix quantizartion test (#476)
Browse files Browse the repository at this point in the history
* fix test

* remove warning
  • Loading branch information
echarlaix authored Nov 7, 2023
1 parent e9230ff commit 7879566
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 5 deletions.
4 changes: 0 additions & 4 deletions optimum/intel/openvino/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,10 +165,6 @@ def quantize(
# TODO : can be set to self.model.config.name_or_path for OVModels when not provided
raise ValueError("`save_directory` needs to be specified")
if weights_only:
logger.warning(
"Applying weight only quantization using the `OVQuantizer` will be deprecated in the next release of optimum-intel. "
"To apply weight only quantization, please set `load_in_8bit=True` when loading your model with `from_pretrained()` or set `--int8` use when exporting your model with the CLI."
)
if calibration_dataset is not None:
logger.warning(
"`calibration_dataset` was provided but will not be used as `weights_only` is set to `True`."
Expand Down
5 changes: 4 additions & 1 deletion tests/openvino/test_exporters_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,4 +111,7 @@ def test_exporters_cli_int8(self, task: str, model_type: str):
expected_int8 = _ARCHITECTURES_TO_EXPECTED_INT8[model_type]
for i, model in enumerate(models):
_, num_int8 = get_num_quantized_nodes(model)
self.assertEqual(expected_int8[i], num_int8)
expected = expected_int8[i]
if task == "text-generation":
expected -= 1
self.assertEqual(expected, num_int8)

0 comments on commit 7879566

Please sign in to comment.