Skip to content

Commit

Permalink
Fix test compatibilty with latest openvino nncf release (#689)
Browse files Browse the repository at this point in the history
  • Loading branch information
echarlaix authored Apr 25, 2024
1 parent 47281f9 commit b383ffb
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 14 deletions.
4 changes: 2 additions & 2 deletions tests/openvino/test_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,12 +548,12 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase):
@parameterized.expand(SUPPORTED_ARCHITECTURES)
def test_compare_to_transformers(self, model_arch):
model_id = MODEL_NAMES[model_arch]
not_stateful = ["gpt_bigcode"]
not_stateful = []
if is_openvino_version("<", "2024.0"):
not_stateful.append("mixtral")

if is_openvino_version("<", "2024.1"):
not_stateful.extend(["llama", "gemma"])
not_stateful.extend(["llama", "gemma", "gpt_bigcode"])

if "gptq" in model_arch:
self.skipTest("GPTQ model loading unsupported with AutoModelForCausalLM")
Expand Down
2 changes: 1 addition & 1 deletion tests/openvino/test_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,7 @@ def preprocess_function(examples, tokenizer):


class OVTrainerTest(unittest.TestCase):
SUPPORTED_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS = (("distilbert-base-uncased", 50, 38),)
SUPPORTED_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS = (("distilbert-base-uncased", 49, 38),)

@parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS)
def test_aware_training_quantization(self, model_name, expected_fake_quantize, expected_int8):
Expand Down
22 changes: 11 additions & 11 deletions tests/openvino/test_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,30 +322,30 @@ def tearDown(self):
"default_quantization": OVTrainerTestDescriptor(
model_id="hf-internal-testing/tiny-random-bert",
nncf_compression_config=DEFAULT_QUANTIZATION_CONFIG,
expected_fake_quantize=44,
expected_fake_quantize=34,
expected_int8=32,
compression_metrics=["compression_loss"],
),
"distillation,default_quantization": OVTrainerTestDescriptor(
model_id="hf-internal-testing/tiny-random-bert",
teacher_model_id="hf-internal-testing/tiny-random-bert",
nncf_compression_config=DEFAULT_QUANTIZATION_CONFIG,
expected_fake_quantize=44,
expected_fake_quantize=34,
expected_int8=32,
compression_metrics=["compression_loss", "distillation_loss", "task_loss"],
),
"customized_quantization": OVTrainerTestDescriptor(
model_id="hf-internal-testing/tiny-random-bert",
nncf_compression_config=CUSTOMIZED_QUANTIZATION_CONFIG,
expected_fake_quantize=44,
expected_fake_quantize=34,
expected_int8=32,
compression_metrics=["compression_loss"],
),
"distillation,customized_quantization": OVTrainerTestDescriptor(
model_id="hf-internal-testing/tiny-random-bert",
teacher_model_id="hf-internal-testing/tiny-random-bert",
nncf_compression_config=CUSTOMIZED_QUANTIZATION_CONFIG,
expected_fake_quantize=44,
expected_fake_quantize=34,
expected_int8=32,
compression_metrics=["compression_loss", "distillation_loss", "task_loss"],
),
Expand Down Expand Up @@ -418,7 +418,7 @@ def tearDown(self):
"default_quantization,unstructured_movement_sparsity": OVTrainerTestDescriptor(
model_id="hf-internal-testing/tiny-random-bert",
nncf_compression_config=[DEFAULT_QUANTIZATION_CONFIG, UNSTRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_BERT],
expected_fake_quantize=44,
expected_fake_quantize=34,
expected_int8=32,
expected_binary_masks=60,
compression_metrics=["compression_loss"],
Expand All @@ -429,7 +429,7 @@ def tearDown(self):
CUSTOMIZED_QUANTIZATION_CONFIG,
UNSTRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_BERT,
],
expected_fake_quantize=44,
expected_fake_quantize=34,
expected_int8=32,
expected_binary_masks=60,
compression_metrics=["compression_loss"],
Expand All @@ -438,7 +438,7 @@ def tearDown(self):
model_id="hf-internal-testing/tiny-random-bert",
teacher_model_id="hf-internal-testing/tiny-random-bert",
nncf_compression_config=[DEFAULT_QUANTIZATION_CONFIG, UNSTRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_BERT],
expected_fake_quantize=44,
expected_fake_quantize=34,
expected_int8=32,
expected_binary_masks=60,
compression_metrics=["compression_loss", "distillation_loss", "task_loss"],
Expand All @@ -450,7 +450,7 @@ def tearDown(self):
CUSTOMIZED_QUANTIZATION_CONFIG,
UNSTRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_BERT,
],
expected_fake_quantize=44,
expected_fake_quantize=34,
expected_int8=32,
expected_binary_masks=60,
compression_metrics=["compression_loss", "distillation_loss", "task_loss"],
Expand Down Expand Up @@ -730,7 +730,7 @@ def check_ovmodel_reshaping(self, ovmodel: OVModel):
"quantization": OVTrainerTestDescriptor(
model_id="hf-internal-testing/tiny-random-Wav2Vec2Model",
nncf_compression_config=[QUANTIZATION_CONFIG_FOR_WAV2VEC2],
expected_fake_quantize=48,
expected_fake_quantize=40,
expected_int8=30,
compression_metrics=["compression_loss"],
),
Expand All @@ -757,7 +757,7 @@ def check_ovmodel_reshaping(self, ovmodel: OVModel):
"quantization,unstructured_movement_sparsity": OVTrainerTestDescriptor(
model_id="hf-internal-testing/tiny-random-Wav2Vec2Model",
nncf_compression_config=[QUANTIZATION_CONFIG_FOR_WAV2VEC2, UNSTRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_WAV2VEC2],
expected_fake_quantize=48,
expected_fake_quantize=40,
expected_int8=30,
expected_binary_masks=48,
compression_metrics=["compression_loss"],
Expand All @@ -775,7 +775,7 @@ def check_ovmodel_reshaping(self, ovmodel: OVModel):
model_id="hf-internal-testing/tiny-random-Wav2Vec2Model",
teacher_model_id="hf-internal-testing/tiny-random-Wav2Vec2Model",
nncf_compression_config=[QUANTIZATION_CONFIG_FOR_WAV2VEC2, UNSTRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_WAV2VEC2],
expected_fake_quantize=48,
expected_fake_quantize=40,
expected_int8=30,
expected_binary_masks=48,
compression_metrics=["compression_loss", "distillation_loss", "task_loss"],
Expand Down

0 comments on commit b383ffb

Please sign in to comment.