Skip to content

Commit 974cc51

Browse files
committed
Bump iree to 20231130.724
To solve the batchnorm2d issue nod-ai#110 Xfail llama_test becasue missing ops from torch to linalg
1 parent 0c658bd commit 974cc51

File tree

3 files changed

+4
-23
lines changed

3 files changed

+4
-23
lines changed

requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,5 @@
77
-r pytorch-cpu-requirements.txt
88
-r torchvision-requirements.txt
99

10-
iree-compiler==20231121.715
11-
iree-runtime==20231121.715
10+
iree-compiler==20231130.724
11+
iree-runtime==20231130.724

tests/dynamo/llama_test.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212

1313
import math
1414
import unittest
15+
import pytest
1516
from dataclasses import dataclass
1617
from typing import Any, Optional, Tuple
1718

@@ -314,6 +315,7 @@ def main():
314315
opt(example_tokens, start_pos)
315316

316317

318+
@pytest.mark.xfail(reason="https://github.com/nod-ai/SHARK-Turbine/issues/221")
317319
class ModelTests(unittest.TestCase):
318320
def testLLama(self):
319321
main()

tests/importers/onnx_importer/import_smoke_test.py

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -40,16 +40,6 @@
4040
"node_test_ai_onnx_ml_label_encoder_string_int_no_default_model",
4141
"node_test_ai_onnx_ml_label_encoder_tensor_mapping_model",
4242
"node_test_ai_onnx_ml_label_encoder_tensor_value_only_mapping_model",
43-
"node_test_bitshift_left_uint16_model",
44-
"node_test_bitshift_left_uint32_model",
45-
"node_test_bitshift_left_uint64_model",
46-
"node_test_bitshift_right_uint16_model",
47-
"node_test_bitshift_right_uint32_model",
48-
"node_test_bitshift_right_uint64_model",
49-
"node_test_bitwise_and_ui64_bcast_3v1d_model",
50-
"node_test_bitwise_not_3d_model",
51-
"node_test_bitwise_or_ui64_bcast_3v1d_model",
52-
"node_test_bitwise_xor_ui64_bcast_3v1d_model",
5343
"node_test_cast_FLOAT16_to_FLOAT8E4M3FNUZ_model",
5444
"node_test_cast_FLOAT16_to_FLOAT8E4M3FN_model",
5545
"node_test_cast_FLOAT16_to_FLOAT8E5M2FNUZ_model",
@@ -166,15 +156,6 @@
166156
"node_test_lstm_defaults_model",
167157
"node_test_lstm_with_initial_bias_model",
168158
"node_test_lstm_with_peepholes_model",
169-
"node_test_max_uint16_model",
170-
"node_test_max_uint32_model",
171-
"node_test_max_uint64_model",
172-
"node_test_min_uint16_model",
173-
"node_test_min_uint32_model",
174-
"node_test_min_uint64_model",
175-
"node_test_mod_uint16_model",
176-
"node_test_mod_uint32_model",
177-
"node_test_mod_uint64_model",
178159
"node_test_optional_get_element_optional_sequence_model",
179160
"node_test_optional_get_element_optional_tensor_model",
180161
"node_test_optional_get_element_sequence_model",
@@ -183,8 +164,6 @@
183164
"node_test_optional_has_element_empty_optional_input_model",
184165
"node_test_optional_has_element_optional_input_model",
185166
"node_test_optional_has_element_tensor_input_model",
186-
"node_test_pow_types_float32_uint32_model",
187-
"node_test_pow_types_float32_uint64_model",
188167
"node_test_quantizelinear_e4m3fn_model",
189168
"node_test_quantizelinear_e5m2_model",
190169
"node_test_range_float_type_positive_delta_expanded_model",

0 commit comments

Comments
 (0)