From bbdd34cbad58c048afaa60ffdafc2aac825291c3 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Thu, 7 Mar 2024 08:35:06 -0500 Subject: [PATCH 1/4] change model output parameter to last_hidden_states --- optimum/intel/ipex/modeling_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 2b6b569343..a013112425 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -282,7 +282,7 @@ def forward( inputs["attention_mask"] = attention_mask outputs = self._call_model(**inputs) - return ModelOutput(**outputs) if isinstance(outputs, dict) else ModelOutput(logits=outputs[0]) + return ModelOutput(**outputs) if isinstance(outputs, dict) else ModelOutput(last_hidden_state=outputs[0]) class IPEXModelForQuestionAnswering(IPEXModel): From 1b8d76a3399fd7a87a554aaa6efb1d6f83e1db7d Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Thu, 7 Mar 2024 08:42:13 -0500 Subject: [PATCH 2/4] update ipex model testiong --- tests/ipex/test_modeling.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/ipex/test_modeling.py b/tests/ipex/test_modeling.py index ffc2ca6a89..743557d61a 100644 --- a/tests/ipex/test_modeling.py +++ b/tests/ipex/test_modeling.py @@ -126,10 +126,9 @@ def test_compare_to_transformers(self, model_arch): with torch.no_grad(): transformers_outputs = transformers_model(**tokens) outputs = ipex_model(**tokens) - # Compare tensor outputs - for output_name in {"logits", "last_hidden_state"}: - if output_name in transformers_outputs: - self.assertTrue(torch.allclose(outputs[output_name], transformers_outputs[output_name], atol=1e-4)) + self.assertTrue( + torch.allclose(outputs["last_hidden_state"], transformers_outputs["last_hidden_state"], atol=1e-4) + ) @parameterized.expand(SUPPORTED_ARCHITECTURES) def test_pipeline(self, model_arch): From ac809f8b216ff0c9edf7958bf94a1c53a926d172 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Thu, 7 Mar 2024 08:54:16 -0500 Subject: [PATCH 3/4] update testing --- optimum/intel/ipex/modeling_base.py | 4 ++-- tests/ipex/test_modeling.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index a013112425..608afa0805 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -193,7 +193,7 @@ def forward( inputs["token_type_ids"] = token_type_ids outputs = self._call_model(**inputs) - return ModelOutput(**outputs) if isinstance(outputs, dict) else ModelOutput(logits=outputs[0]) + return ModelOutput(**outputs) if isinstance(outputs, dict) else ModelOutput(last_hidden_state=outputs[0]) def eval(self): self.model.eval() @@ -282,7 +282,7 @@ def forward( inputs["attention_mask"] = attention_mask outputs = self._call_model(**inputs) - return ModelOutput(**outputs) if isinstance(outputs, dict) else ModelOutput(last_hidden_state=outputs[0]) + return ModelOutput(**outputs) if isinstance(outputs, dict) else ModelOutput(logits=outputs[0]) class IPEXModelForQuestionAnswering(IPEXModel): diff --git a/tests/ipex/test_modeling.py b/tests/ipex/test_modeling.py index 743557d61a..ffc2ca6a89 100644 --- a/tests/ipex/test_modeling.py +++ b/tests/ipex/test_modeling.py @@ -126,9 +126,10 @@ def test_compare_to_transformers(self, model_arch): with torch.no_grad(): transformers_outputs = transformers_model(**tokens) outputs = ipex_model(**tokens) - self.assertTrue( - torch.allclose(outputs["last_hidden_state"], transformers_outputs["last_hidden_state"], atol=1e-4) - ) + # Compare tensor outputs + for output_name in {"logits", "last_hidden_state"}: + if output_name in transformers_outputs: + self.assertTrue(torch.allclose(outputs[output_name], transformers_outputs[output_name], atol=1e-4)) @parameterized.expand(SUPPORTED_ARCHITECTURES) def test_pipeline(self, model_arch): From b9c9736ae2d64fdbe9c5cf8d692a003a3fa8ec42 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Fri, 8 Mar 2024 05:29:33 -0500 Subject: [PATCH 4/4] add output name to ipex model --- optimum/intel/ipex/modeling_base.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 608afa0805..9928977ead 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -58,6 +58,7 @@ class IPEXModel(OptimizedModel): export_feature = "feature-extraction" base_model_prefix = "ipex_model" main_input_name = "input_ids" + output_name = "last_hidden_state" def __init__( self, @@ -193,7 +194,12 @@ def forward( inputs["token_type_ids"] = token_type_ids outputs = self._call_model(**inputs) - return ModelOutput(**outputs) if isinstance(outputs, dict) else ModelOutput(last_hidden_state=outputs[0]) + if isinstance(outputs, dict): + model_output = ModelOutput(**outputs) + else: + model_output = ModelOutput() + model_output[self.output_name] = outputs[0] + return model_output def eval(self): self.model.eval() @@ -235,16 +241,19 @@ def _init_warmup(self): class IPEXModelForSequenceClassification(IPEXModel): auto_model_class = AutoModelForSequenceClassification export_feature = "text-classification" + output_name = "logits" class IPEXModelForTokenClassification(IPEXModel): auto_model_class = AutoModelForTokenClassification export_feature = "token-classification" + output_name = "logits" class IPEXModelForMaskedLM(IPEXModel): auto_model_class = AutoModelForMaskedLM export_feature = "fill-mask" + output_name = "logits" class IPEXModelForImageClassification(IPEXModel):