Skip to content

Commit

Permalink
add test
Browse files Browse the repository at this point in the history
  • Loading branch information
echarlaix committed Jun 10, 2024
1 parent 286eb92 commit 4239b19
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 10 deletions.
21 changes: 11 additions & 10 deletions optimum/intel/openvino/modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def forward(
if not np_inputs:
input_ids = np.array(input_ids)
attention_mask = np.array(attention_mask)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else np.zeros_like(input_ids)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else token_type_ids

inputs = {
"input_ids": input_ids,
Expand All @@ -205,7 +205,7 @@ def forward(

# Add the token_type_ids when needed
if "token_type_ids" in self.input_names:
inputs["token_type_ids"] = token_type_ids
inputs["token_type_ids"] = token_type_ids if token_type_ids is not None else np.zeros_like(input_ids)

outputs = self._inference(inputs)
logits = torch.from_numpy(outputs["logits"]).to(self.device) if not np_inputs else outputs["logits"]
Expand Down Expand Up @@ -261,7 +261,7 @@ def forward(
if not np_inputs:
input_ids = np.array(input_ids)
attention_mask = np.array(attention_mask)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else np.zeros_like(input_ids)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else token_type_ids

inputs = {
"input_ids": input_ids,
Expand All @@ -270,7 +270,7 @@ def forward(

# Add the token_type_ids when needed
if "token_type_ids" in self.input_names:
inputs["token_type_ids"] = token_type_ids
inputs["token_type_ids"] = token_type_ids if token_type_ids is not None else np.zeros_like(input_ids)

outputs = self._inference(inputs)
start_logits = (
Expand Down Expand Up @@ -330,7 +330,8 @@ def forward(
if not np_inputs:
input_ids = np.array(input_ids)
attention_mask = np.array(attention_mask)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else np.zeros_like(input_ids)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else token_type_ids


inputs = {
"input_ids": input_ids,
Expand All @@ -339,7 +340,7 @@ def forward(

# Add the token_type_ids when needed
if "token_type_ids" in self.input_names:
inputs["token_type_ids"] = token_type_ids
inputs["token_type_ids"] = token_type_ids if token_type_ids is not None else np.zeros_like(input_ids)

outputs = self._inference(inputs)
logits = torch.from_numpy(outputs["logits"]).to(self.device) if not np_inputs else outputs["logits"]
Expand Down Expand Up @@ -394,7 +395,7 @@ def forward(
if not np_inputs:
input_ids = np.array(input_ids)
attention_mask = np.array(attention_mask)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else np.zeros_like(input_ids)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else token_type_ids

inputs = {
"input_ids": input_ids,
Expand All @@ -403,7 +404,7 @@ def forward(

# Add the token_type_ids when needed
if "token_type_ids" in self.input_names:
inputs["token_type_ids"] = token_type_ids
inputs["token_type_ids"] = token_type_ids if token_type_ids is not None else np.zeros_like(input_ids)

outputs = self._inference(inputs)
last_hidden_state = (
Expand Down Expand Up @@ -527,7 +528,7 @@ def forward(
if not np_inputs:
input_ids = np.array(input_ids)
attention_mask = np.array(attention_mask)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else np.zeros_like(input_ids)
token_type_ids = np.array(token_type_ids) if token_type_ids is not None else token_type_ids

inputs = {
"input_ids": input_ids,
Expand All @@ -536,7 +537,7 @@ def forward(

# Add the token_type_ids when needed
if "token_type_ids" in self.input_names:
inputs["token_type_ids"] = token_type_ids
inputs["token_type_ids"] = token_type_ids if token_type_ids is not None else np.zeros_like(input_ids)

outputs = self._inference(inputs)
logits = torch.from_numpy(outputs["logits"]).to(self.device) if not np_inputs else outputs["logits"]
Expand Down
20 changes: 20 additions & 0 deletions tests/openvino/test_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,6 +463,26 @@ def test_pipeline(self, model_arch):
del pipe
gc.collect()

def test_default_token_type_ids(self):
model_id = MODEL_NAMES["bert"]
model = OVModelForTokenClassification.from_pretrained(model_id, export=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokens = tokenizer("this is a simple input", return_tensors="np")
self.assertTrue("token_type_ids" in model.input_names)
token_type_ids = tokens.pop("token_type_ids")
outs = model(token_type_ids=token_type_ids, **tokens)
outs_without_token_type_ids = model(**tokens)
self.assertTrue(np.allclose(outs.logits, outs_without_token_type_ids.logits))

tokens["attention_mask"] = None
with self.assertRaises(Exception) as context:
_ = model(**tokens)

self.assertIn("Got unexpected inputs: ", str(context.exception))

del model
gc.collect()


class OVModelForFeatureExtractionIntegrationTest(unittest.TestCase):
SUPPORTED_ARCHITECTURES = (
Expand Down

0 comments on commit 4239b19

Please sign in to comment.