From e9a156789bf3836d9e85d5d39adfe3169e14b8e3 Mon Sep 17 00:00:00 2001 From: Nikolay Shchegolev Date: Wed, 28 Jan 2026 13:10:18 +0400 Subject: [PATCH] [TEST] SubgraphBaseTest execution time optimizations --- .../tests/skip_tests_config.cpp | 8 +- src/core/src/preprocess/pre_post_process.cpp | 76 +- .../onnx/tests/skip_tests_config.cpp | 10 +- .../paddle/tests/skip_tests_config.cpp | 20 +- .../tests/functional/skip_tests_config.cpp | 6 +- .../skip_tests_config.cpp | 44 +- .../tests/functional/skip_tests_config.cpp | 34 +- .../tests/functional/skip_tests_config.cpp | 28 +- .../skip_tests_config.cpp | 1302 ++++++++--------- .../skip_tests_config.cpp | 366 ++--- .../subgraph_tests/dynamic/kv_cache.cpp | 4 +- .../subgraph_tests/dynamic/kv_cache_sdpa.cpp | 2 +- .../ov_infer_request/compile_and_infer.hpp | 2 +- .../behavior/ov_plugin/life_time.hpp | 2 +- .../dma_buf_remote_run.hpp | 6 +- .../remote_tensor_tests/dx12_remote_run.hpp | 6 +- .../remote_tensor_tests/remote_run.hpp | 50 +- .../skip_tests_config.cpp | 12 +- .../tests/functional/skip_tests_config.cpp | 148 +- .../compiled_model/compiled_model_base.hpp | 2 +- .../ov_infer_request/properties_tests.hpp | 2 +- .../base/ov_behavior_test_utils.hpp | 2 +- .../base/utils/compare_results.hpp | 2 +- .../base/utils/generate_inputs.hpp | 2 +- .../base_func_tests/src/base/ov_subgraph.cpp | 6 +- .../src/base/utils/compare_results.cpp | 4 +- .../src/base/utils/generate_inputs.cpp | 5 +- .../base_func_tests/src/base/utils/ranges.cpp | 2 +- .../behavior/compiled_model/import_export.cpp | 2 +- .../ov_infer_request/inference_chaining.cpp | 10 +- .../behavior/ov_infer_request/io_tensor.cpp | 2 +- .../ov_infer_request/iteration_chaining.cpp | 4 +- .../ov_infer_request/memory_states.cpp | 2 +- .../src/behavior/ov_infer_request/wait.cpp | 2 +- .../src/behavior/ov_plugin/life_time.cpp | 2 +- .../tests/skip_tests_config.cpp | 6 +- .../src/skip_tests_config.cpp | 17 +- .../src/read_ir/read_ir.cpp | 2 +- .../tests/skip_tests_config.cpp | 5 +- .../skip_tests_config.hpp | 2 +- .../src/skip_tests_config.cpp | 18 +- 41 files changed, 1130 insertions(+), 1097 deletions(-) diff --git a/src/common/transformations/tests/skip_tests_config.cpp b/src/common/transformations/tests/skip_tests_config.cpp index 0266844f7ffe34..c9e138460aba67 100644 --- a/src/common/transformations/tests/skip_tests_config.cpp +++ b/src/common/transformations/tests/skip_tests_config.cpp @@ -7,9 +7,11 @@ #include #include -std::vector disabledTestPatterns() { - return { +const std::vector& disabled_test_patterns() { + const static std::vector patterns{ // TODO: task 32568, enable after supporting constants outputs in plugins - ".*TransformationTests\\.ConstFoldingPriorBox.*", + std::regex(".*TransformationTests\\.ConstFoldingPriorBox.*"), }; + + return patterns; } diff --git a/src/core/src/preprocess/pre_post_process.cpp b/src/core/src/preprocess/pre_post_process.cpp index df24fab6796bdb..d29e25308853b6 100644 --- a/src/core/src/preprocess/pre_post_process.cpp +++ b/src/core/src/preprocess/pre_post_process.cpp @@ -91,39 +91,49 @@ void transformation_pipeline(std::shared_ptr& model) { RTInfoCache rt_info_cache; rt_info_cache.store(model); - Manager manager("pre_post_processing"); - manager.set_per_pass_validation(false); - - // prerequisite: the model structure optimization before applying of the markup - REGISTER_PASS(manager, SharedOpOptimization) - - // 1. Set "disable_const_folding" attribute - // we have to add a call into the PrePostProcessing, it runs before compile_model call - REGISTER_PASS(manager, MarkGatherSubgraph, element::TypeVector{element::f8e4m3}, element::TypeVector{element::u4}); - REGISTER_PASS(manager, - MarkDequantization, - TypeVector{i32, u32, i16, u16, i8, u8, u6, i4, u4, u3, u2, u1, nf4, f4e2m1, f8e4m3, f8e5m2, f8e8m0}); - REGISTER_PASS(manager, DisableShapeOfConstantFolding, false); - REGISTER_PASS(manager, DisableRandomUniformConstantFolding) - // Mark quantized and f16/bf16 compressed constants to prevent CF for them, - // so that not extra memory is used for intermediate decompressed constants. - REGISTER_PASS(manager, MarkCompressedFloatConstants); - REGISTER_PASS(manager, DisableDecompressionConvertConstantFolding); - - // 2. Fusion transformations: - REGISTER_PASS(manager, ConvertDivideWithConstant) - auto fusions = manager.register_pass(); - // Gelu fusion have to be executed before MulConv fusion because Mul(X, 0.5) might be fused to Conv weights - ADD_MATCHER(fusions, GeluFusion) - ADD_MATCHER(fusions, MultiplyConvolutionFusion) - ADD_MATCHER(fusions, MultiplyGroupConvolutionFusion) - ADD_MATCHER(fusions, MultiplyConvolutionBackpropDataFusion) - ADD_MATCHER(fusions, MultiplyGroupConvolutionBackpropDataFusion) - fusions->set_name("ov::pass::MultiplyFusions"); - REGISTER_PASS(manager, ReverseInputChannelsFusion) - - // 3. CF call due to detected perf degradations - REGISTER_PASS(manager, ConstantFolding) + auto get_manager = []() { + Manager manager("pre_post_processing"); + manager.set_per_pass_validation(false); + + // prerequisite: the model structure optimization before applying of the markup + REGISTER_PASS(manager, SharedOpOptimization) + + // 1. Set "disable_const_folding" attribute + // we have to add a call into the PrePostProcessing, it runs before compile_model call + REGISTER_PASS(manager, + MarkGatherSubgraph, + element::TypeVector{element::f8e4m3}, + element::TypeVector{element::u4}); + REGISTER_PASS( + manager, + MarkDequantization, + TypeVector{i32, u32, i16, u16, i8, u8, u6, i4, u4, u3, u2, u1, nf4, f4e2m1, f8e4m3, f8e5m2, f8e8m0}); + REGISTER_PASS(manager, DisableShapeOfConstantFolding, false); + REGISTER_PASS(manager, DisableRandomUniformConstantFolding) + // Mark quantized and f16/bf16 compressed constants to prevent CF for them, + // so that not extra memory is used for intermediate decompressed constants. + REGISTER_PASS(manager, MarkCompressedFloatConstants); + REGISTER_PASS(manager, DisableDecompressionConvertConstantFolding); + + // 2. Fusion transformations: + REGISTER_PASS(manager, ConvertDivideWithConstant) + auto fusions = manager.register_pass(); + // Gelu fusion have to be executed before MulConv fusion because Mul(X, 0.5) might be fused to Conv weights + ADD_MATCHER(fusions, GeluFusion) + ADD_MATCHER(fusions, MultiplyConvolutionFusion) + ADD_MATCHER(fusions, MultiplyGroupConvolutionFusion) + ADD_MATCHER(fusions, MultiplyConvolutionBackpropDataFusion) + ADD_MATCHER(fusions, MultiplyGroupConvolutionBackpropDataFusion) + fusions->set_name("ov::pass::MultiplyFusions"); + REGISTER_PASS(manager, ReverseInputChannelsFusion) + + // 3. CF call due to detected perf degradations + REGISTER_PASS(manager, ConstantFolding) + + return manager; + }; + static Manager manager = get_manager(); + manager.run_passes(model); // 4. Restore old RT info to not affect plugin compilation diff --git a/src/frontends/onnx/tests/skip_tests_config.cpp b/src/frontends/onnx/tests/skip_tests_config.cpp index 2212d5f27dc331..ed2e7c4af86fb2 100644 --- a/src/frontends/onnx/tests/skip_tests_config.cpp +++ b/src/frontends/onnx/tests/skip_tests_config.cpp @@ -7,13 +7,15 @@ #include #include -std::vector disabledTestPatterns() { - return { +const std::vector& disabled_test_patterns() { + const static std::vector patterns{ #ifdef OPENVINO_STATIC_LIBRARY // Disable tests for static libraries - ".*FrontendLibCloseTest.*", + std::regex(".*FrontendLibCloseTest.*"), #endif // CVS-123201 - ".*testUnloadLibBeforeDeletingDependentObject.*", + std::regex(".*testUnloadLibBeforeDeletingDependentObject.*"), }; + + return patterns; } diff --git a/src/frontends/paddle/tests/skip_tests_config.cpp b/src/frontends/paddle/tests/skip_tests_config.cpp index 2567db30bbaf87..e8c47b2f37a0a6 100644 --- a/src/frontends/paddle/tests/skip_tests_config.cpp +++ b/src/frontends/paddle/tests/skip_tests_config.cpp @@ -7,18 +7,20 @@ #include #include -std::vector disabledTestPatterns() { - return { +const std::vector& disabled_test_patterns() { + const static std::vector patterns{ #ifdef OPENVINO_STATIC_LIBRARY // Disable tests for static libraries - ".*FrontendLibCloseTest.*", + std::regex(".*FrontendLibCloseTest.*"), #endif - ".*testUnloadLibBeforeDeletingDependentObject.*", + std::regex(".*testUnloadLibBeforeDeletingDependentObject.*"), // CVS-130605, CVS-170348 - ".*paddle_yolo_box_uneven_wh_yolo_box_uneven_wh_pdmodel.*", - ".*paddle_loop_dyn_loop_dyn_pdmodel.*", - ".*paddle_scatter_test_1_scatter_test_1_pdmodel.*", - ".*paddle_top_k_.*", - ".*generate_proposals.*", + std::regex(".*paddle_yolo_box_uneven_wh_yolo_box_uneven_wh_pdmodel.*"), + std::regex(".*paddle_loop_dyn_loop_dyn_pdmodel.*"), + std::regex(".*paddle_scatter_test_1_scatter_test_1_pdmodel.*"), + std::regex(".*paddle_top_k_.*"), + std::regex(".*generate_proposals.*"), }; + + return patterns; } diff --git a/src/inference/tests/functional/skip_tests_config.cpp b/src/inference/tests/functional/skip_tests_config.cpp index 18ad06b148782b..0ae340ed421e28 100644 --- a/src/inference/tests/functional/skip_tests_config.cpp +++ b/src/inference/tests/functional/skip_tests_config.cpp @@ -7,6 +7,8 @@ #include #include -std::vector disabledTestPatterns() { - return {}; +const std::vector& disabled_test_patterns() { + const static std::vector patterns{}; + + return patterns; } diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp index c17c52258836e0..9cc883ead9ced8 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -9,38 +9,42 @@ #include "openvino/core/visibility.hpp" -std::vector disabledTestPatterns() { - std::vector retVector{ +const std::vector& disabled_test_patterns() { + const static std::vector patterns{ // Not implemented yet: - R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)", - R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)", - R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)", + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)"), + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)"), + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)"), // requires export_model be implemented - R"(.*Behavior.*OVCompiledModelBaseTest.*import_from_weightless_blob.*targetDevice=(MULTI|AUTO).*)", - R"(.*Behavior.*OVCompiledModelBaseTest.*compile_from.*_blob.*targetDevice=(MULTI|AUTO).*)", - R"(.*Behavior.*OVCompiledModelBaseTest.*use_blob_hint.*targetDevice=(MULTI|AUTO).*)", + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*import_from_weightless_blob.*targetDevice=(MULTI|AUTO).*)"), + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*compile_from.*_blob.*targetDevice=(MULTI|AUTO).*)"), + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*use_blob_hint.*targetDevice=(MULTI|AUTO).*)"), // unsupported metrics - R"(.*smoke_AutoOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", + std::regex( + R"(.*smoke_AutoOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)"), // Issue: // New API tensor tests - R"(.*OVInferRequestCheckTensorPrecision.*type=i4.*)", - R"(.*OVInferRequestCheckTensorPrecision.*type=u1.*)", - R"(.*OVInferRequestCheckTensorPrecision.*type=u4.*)", + std::regex(R"(.*OVInferRequestCheckTensorPrecision.*type=i4.*)"), + std::regex(R"(.*OVInferRequestCheckTensorPrecision.*type=u1.*)"), + std::regex(R"(.*OVInferRequestCheckTensorPrecision.*type=u4.*)"), // AUTO does not support import / export - R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)", - R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", - R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)", + std::regex( + R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)"), + std::regex(R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)"), + std::regex(R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)"), // template plugin doesn't support this case - R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)", + std::regex(R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)"), // model import is not supported - R"(.*OVCompiledModelBaseTest.import_from_.*)"}; + std::regex(R"(.*OVCompiledModelBaseTest.import_from_.*)"), #if !defined(OPENVINO_ARCH_X86_64) - // very time-consuming test - retVector.emplace_back(R"(.*OVInferConsistencyTest.*)"); + // very time-consuming test + std::regex(R"(.*OVInferConsistencyTest.*)"), #endif - return retVector; + }; + + return patterns; } diff --git a/src/plugins/auto_batch/tests/functional/skip_tests_config.cpp b/src/plugins/auto_batch/tests/functional/skip_tests_config.cpp index 36a325c710ab3e..e90c6c132bdfe1 100644 --- a/src/plugins/auto_batch/tests/functional/skip_tests_config.cpp +++ b/src/plugins/auto_batch/tests/functional/skip_tests_config.cpp @@ -7,31 +7,31 @@ #include #include -std::vector disabledTestPatterns() { - std::vector disabled_items = { +const std::vector& disabled_test_patterns() { + const static std::vector patterns{ // TODO: for CVS-68949 // Not implemented yet: - R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", - R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)", - R"(.*OVExecutableNetworkBaseTest.*CanSetConfigToExecNet.*)", - R"(.*OVExecutableNetworkBaseTest.*CanSetConfigToExecNetAndCheckConfigAndCheck.*)", + std::regex(R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)"), + std::regex(R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)"), + std::regex(R"(.*OVExecutableNetworkBaseTest.*CanSetConfigToExecNet.*)"), + std::regex(R"(.*OVExecutableNetworkBaseTest.*CanSetConfigToExecNetAndCheckConfigAndCheck.*)"), // Not supported by TEMPLATE plugin - R"(.*OVExecutableNetworkBaseTest.*CheckExecGraphInfo.*)", + std::regex(R"(.*OVExecutableNetworkBaseTest.*CheckExecGraphInfo.*)"), // Issue: 90539 - R"(.*OVInferRequestIOTensorTest.InferStaticNetworkSetChangedInputTensorThrow.*)", - R"(.*OVInferRequestIOTensorTest.canInferAfterIOBlobReallocation.*)", - R"(.*VirtualPlugin.*BehaviorTests.*OVHoldersTest.*)", + std::regex(R"(.*OVInferRequestIOTensorTest.InferStaticNetworkSetChangedInputTensorThrow.*)"), + std::regex(R"(.*OVInferRequestIOTensorTest.canInferAfterIOBlobReallocation.*)"), + std::regex(R"(.*VirtualPlugin.*BehaviorTests.*OVHoldersTest.*)"), // BATCH plugin doesn't support this case - R"(.*LoadNetworkCreateDefaultExecGraphResult.*)", + std::regex(R"(.*LoadNetworkCreateDefaultExecGraphResult.*)"), // BATCH/TEMPLATE plugin doesn't support this case - R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)", + std::regex(R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)"), // requires export_model be implemented - R"(.*Behavior.*OVCompiledModelBaseTest.*import_from_weightless_blob.*targetDevice=(BATCH).*)", - R"(.*Behavior.*OVCompiledModelBaseTest.*compile_from.*_blob.*targetDevice=(BATCH).*)", - R"(.*Behavior.*OVCompiledModelBaseTest.*use_blob_hint.*targetDevice=(BATCH).*)", + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*import_from_weightless_blob.*targetDevice=(BATCH).*)"), + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*compile_from.*_blob.*targetDevice=(BATCH).*)"), + std::regex(R"(.*Behavior.*OVCompiledModelBaseTest.*use_blob_hint.*targetDevice=(BATCH).*)"), // model import is not supported - R"(.*OVCompiledModelBaseTest.import_from_.*)" + std::regex(R"(.*OVCompiledModelBaseTest.import_from_.*)") }; - return disabled_items; + return patterns; } diff --git a/src/plugins/hetero/tests/functional/skip_tests_config.cpp b/src/plugins/hetero/tests/functional/skip_tests_config.cpp index 324dfaa3032291..d03955943f42be 100644 --- a/src/plugins/hetero/tests/functional/skip_tests_config.cpp +++ b/src/plugins/hetero/tests/functional/skip_tests_config.cpp @@ -9,18 +9,22 @@ #include "openvino/core/core_visibility.hpp" -std::vector disabledTestPatterns() { - std::vector retVector{ - R"(.*smoke_(Multi|Auto|Hetero)_BehaviorTests.*OVPropertiesTests.*SetCorrectProperties.*)", - R"(.*smoke_(Multi|Auto|Hetero)_BehaviorTests.*OVPropertiesTests.*canSetPropertyAndCheckGetProperty.*)", - R"(.*OVInferRequestCheckTensorPrecision.*get(Input|Output|Inputs|Outputs)From.*FunctionWith(Single|Several).*type=(u4|u1|i4|boolean).*)", - R"(.*OVGetMetricPropsTest.*OVGetMetricPropsTest.*GetMetricAndPrintNoThrow_AVAILABLE_DEVICES.*)", +const std::vector& disabled_test_patterns() { + const static std::vector patterns{ + std::regex(R"(.*smoke_(Multi|Auto|Hetero)_BehaviorTests.*OVPropertiesTests.*SetCorrectProperties.*)"), + std::regex( + R"(.*smoke_(Multi|Auto|Hetero)_BehaviorTests.*OVPropertiesTests.*canSetPropertyAndCheckGetProperty.*)"), + std::regex( + R"(.*OVInferRequestCheckTensorPrecision.*get(Input|Output|Inputs|Outputs)From.*FunctionWith(Single|Several).*type=(u4|u1|i4|boolean).*)"), + std::regex(R"(.*OVGetMetricPropsTest.*OVGetMetricPropsTest.*GetMetricAndPrintNoThrow_AVAILABLE_DEVICES.*)"), // CACHE_MODE property is not supported on NPU - R"(.*OVCompiledModelBaseTest.*import_from_.*_blob.*targetDevice=(HETERO.NPU).*)", - R"(.*OVCompiledModelBaseTest.*compile_from_.*_blob.*targetDevice=(HETERO.NPU).*)", - R"(.*OVCompiledModelBaseTest.*compile_from_cached_weightless_blob.*targetDevice=(HETERO.NPU).*)", - R"(.*OVCompiledModelBaseTest.*use_blob_hint_.*targetDevice=CPU.*)", + std::regex(R"(.*OVCompiledModelBaseTest.*import_from_.*_blob.*targetDevice=(HETERO.NPU).*)"), + std::regex(R"(.*OVCompiledModelBaseTest.*compile_from_.*_blob.*targetDevice=(HETERO.NPU).*)"), + std::regex(R"(.*OVCompiledModelBaseTest.*compile_from_cached_weightless_blob.*targetDevice=(HETERO.NPU).*)"), + std::regex(R"(.*OVCompiledModelBaseTest.*use_blob_hint_.*targetDevice=CPU.*)"), // model import is not supported - R"(.*OVCompiledModelBaseTest.import_from_.*)"}; - return retVector; + std::regex(R"(.*OVCompiledModelBaseTest.import_from_.*)"), + }; + + return patterns; } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 87d7a1d854e885..bc11973ec07f40 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -15,689 +15,685 @@ #include "utils/cpu_test_utils.hpp" -std::vector disabledTestPatterns() { - std::vector retVector{ - // Skip platforms that do not support BF16 (i.e. sse, avx, avx2) - R"(.*(BF|bf)16.*(jit_avx(?!5)|jit_sse).*)", - // TODO: Incorrect blob sizes for node BinaryConvolution_X - R"(.*BinaryConvolutionLayerTest.*)", - // TODO: 53618. BF16 gemm ncsp convolution crash - R"(.*_GroupConv.*_inFmts=nc.*_primitive=jit_gemm.*ENFORCE_BF16=YES.*)", - // TODO: 157596 convolution bf16 leftover test case - R"(smoke_JIT_AVX512_DW_GroupConv/GroupConvolutionLayerCPUTest.*ndhwc.*jit_avx512_dw.*INFERENCE_PRECISION_HINT=bf16.*)", - R"(smoke_Conv_1D_1x1_BF16/ConvolutionLayerCPUTest\.CompareWithRefs/IS=\[\]_TS=\(\((1|2)\.6(4|7)\.7\)_\)_K\(1\)_S\(1\)_PB\(0\)_PE\(0\)_D=\(1\)_O=63_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=nhwc_outFmts=nhwc_primitive=jit_avx512_1x1_.*PluginConf_INFERENCE_PRECISION_HINT=bf16)", - R"(smoke_Conv_1D_1x1_BF16/ConvolutionLayerCPUTest\.CompareWithRefs/IS=\[1\.\.200\.64\.\?\]_TS=\(\(2\.64\.7\)_\(1\.64\.5\)_\)_K\(1\)_S\(1\)_PB\(0\)_PE\(0\)_D=\(1\)_O=63_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=nhwc_outFmts=nhwc_primitive=jit_avx512_1x1_.*PluginConf_INFERENCE_PRECISION_HINT=bf16)", - R"(smoke_Conv_1D_1x1_BF16/ConvolutionLayerCPUTest\.CompareWithRefs/IS=\[\?\.6(4|7)\.1\.\.200\]_TS=\(\(2\.6(4|7)\.7\)_\(1\.6(4|7)\.9\)_\)_K\(1\)_S\(1\)_PB\(0\)_PE\(0\)_D=\(1\)_O=63_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=nhwc_outFmts=nhwc_primitive=jit_avx512_1x1_.*PluginConf_INFERENCE_PRECISION_HINT=bf16)", - R"(smoke_GroupConv_brgemm_2D_BF16/GroupConvolutionLayerCPUTest\.CompareWithRefs/IS=\[\]_TS=\(\(1\.64\.7\.7\)_\)_K\(3\.3\)_S\(2\.2\)_PB\((0|1)\.(0|1)\)_PE\(0\.0\)_D=\(2\.2\)_O=64_G=2_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=nhwc_outFmts=nhwc_primitive=brgconv_avx512_amx_.*PluginConf_INFERENCE_PRECISION_HINT=bf16)", - R"(smoke_JIT_AVX512_DW_GroupConv/GroupConvolutionLayerCPUTest.*inFmts=nCdhw16c.*INFERENCE_PRECISION_HINT=bf16.*)", - R"(smoke_Conv_1D_BF16/ConvolutionLayerCPUTest.*IS=\[\].*K\(3\).*S\(2\).*PE\(0\).*D=\(1\).*O=6(3|4).*brgconv_avx512_amx.*)", - // TODO: 56827. Sporadic test failures - R"(.*smoke_Conv.+_FP32.ConvolutionLayerCPUTest\.CompareWithRefs.*TS=\(\(.\.67.+\).*inFmts=n.+c.*_primitive=jit_avx2.*)", - // incorrect jit_uni_planar_convolution with dilation = {1, 2, 1} and output channel 1 - R"(.*smoke_Convolution3D.*D=\(1.2.1\)_O=1.*)", +const std::vector& disabled_test_patterns() { + auto get_patterns = []() { + std::vector patterns{ + // Skip platforms that do not support BF16 (i.e. sse, avx, avx2) + std::regex(R"(.*(BF|bf)16.*(jit_avx(?!5)|jit_sse).*)"), + // TODO: Incorrect blob sizes for node BinaryConvolution_X + std::regex(R"(.*BinaryConvolutionLayerTest.*)"), + // TODO: 53618. BF16 gemm ncsp convolution crash + std::regex(R"(.*_GroupConv.*_inFmts=nc.*_primitive=jit_gemm.*ENFORCE_BF16=YES.*)"), + // TODO: 157596 convolution bf16 leftover test case + std::regex(R"(smoke_JIT_AVX512_DW_GroupConv/GroupConvolutionLayerCPUTest.*ndhwc.*jit_avx512_dw.*INFERENCE_PRECISION_HINT=bf16.*)"), + std::regex(R"(smoke_Conv_1D_1x1_BF16/ConvolutionLayerCPUTest\.CompareWithRefs/IS=\[\]_TS=\(\((1|2)\.6(4|7)\.7\)_\)_K\(1\)_S\(1\)_PB\(0\)_PE\(0\)_D=\(1\)_O=63_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=nhwc_outFmts=nhwc_primitive=jit_avx512_1x1_.*PluginConf_INFERENCE_PRECISION_HINT=bf16)"), + std::regex(R"(smoke_Conv_1D_1x1_BF16/ConvolutionLayerCPUTest\.CompareWithRefs/IS=\[1\.\.200\.64\.\?\]_TS=\(\(2\.64\.7\)_\(1\.64\.5\)_\)_K\(1\)_S\(1\)_PB\(0\)_PE\(0\)_D=\(1\)_O=63_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=nhwc_outFmts=nhwc_primitive=jit_avx512_1x1_.*PluginConf_INFERENCE_PRECISION_HINT=bf16)"), + std::regex(R"(smoke_Conv_1D_1x1_BF16/ConvolutionLayerCPUTest\.CompareWithRefs/IS=\[\?\.6(4|7)\.1\.\.200\]_TS=\(\(2\.6(4|7)\.7\)_\(1\.6(4|7)\.9\)_\)_K\(1\)_S\(1\)_PB\(0\)_PE\(0\)_D=\(1\)_O=63_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=nhwc_outFmts=nhwc_primitive=jit_avx512_1x1_.*PluginConf_INFERENCE_PRECISION_HINT=bf16)"), + std::regex(R"(smoke_GroupConv_brgemm_2D_BF16/GroupConvolutionLayerCPUTest\.CompareWithRefs/IS=\[\]_TS=\(\(1\.64\.7\.7\)_\)_K\(3\.3\)_S\(2\.2\)_PB\((0|1)\.(0|1)\)_PE\(0\.0\)_D=\(2\.2\)_O=64_G=2_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=nhwc_outFmts=nhwc_primitive=brgconv_avx512_amx_.*PluginConf_INFERENCE_PRECISION_HINT=bf16)"), + std::regex(R"(smoke_JIT_AVX512_DW_GroupConv/GroupConvolutionLayerCPUTest.*inFmts=nCdhw16c.*INFERENCE_PRECISION_HINT=bf16.*)"), + std::regex(R"(smoke_Conv_1D_BF16/ConvolutionLayerCPUTest.*IS=\[\].*K\(3\).*S\(2\).*PE\(0\).*D=\(1\).*O=6(3|4).*brgconv_avx512_amx.*)"), + // TODO: 56827. Sporadic test failures + std::regex(R"(.*smoke_Conv.+_FP32.ConvolutionLayerCPUTest\.CompareWithRefs.*TS=\(\(.\.67.+\).*inFmts=n.+c.*_primitive=jit_avx2.*)"), + // incorrect jit_uni_planar_convolution with dilation = {1, 2, 1} and output channel 1 + std::regex(R"(.*smoke_Convolution3D.*D=\(1.2.1\)_O=1.*)"), - // TODO: Issue: 35627. CPU Normalize supports from 2D to 4D blobs - R"(.*NormalizeL2_1D.*)", - R"(.*NormalizeL2_5D.*)", - // Issue: 59788. dnnl_normalize_nchw applies eps after sqrt for across_spatial - R"(.*NormalizeL2_.*axes=\(1.2.*_eps=100.*)", - R"(.*NormalizeL2_.*axes=\(2.1.*_eps=100.*)", - R"(.*NormalizeL2_.*axes=\(3.1.2.*_eps=100.*)", + // TODO: Issue: 35627. CPU Normalize supports from 2D to 4D blobs + std::regex(R"(.*NormalizeL2_1D.*)"), + std::regex(R"(.*NormalizeL2_5D.*)"), + // Issue: 59788. dnnl_normalize_nchw applies eps after sqrt for across_spatial + std::regex(R"(.*NormalizeL2_.*axes=\(1.2.*_eps=100.*)"), + std::regex(R"(.*NormalizeL2_.*axes=\(2.1.*_eps=100.*)"), + std::regex(R"(.*NormalizeL2_.*axes=\(3.1.2.*_eps=100.*)"), - // Not expected behavior - R"(.*Behavior.*CorrectConfigCheck.*(canSetConfigAndCheckGetConfig|canSetConfigTwiceAndCheckGetConfig).*CPU_BIND_THREAD=YES.*)", - // Issue: 72021 Unreasonable abs_threshold for comparing bf16 results - R"(.*smoke_Reduce.*type=(Prod|Min).*INFERENCE_PRECISION_HINT=(BF|bf)16.*)", + // Not expected behavior + std::regex(R"(.*Behavior.*CorrectConfigCheck.*(canSetConfigAndCheckGetConfig|canSetConfigTwiceAndCheckGetConfig).*CPU_BIND_THREAD=YES.*)"), + // Issue: 72021 Unreasonable abs_threshold for comparing bf16 results + std::regex(R"(.*smoke_Reduce.*type=(Prod|Min).*INFERENCE_PRECISION_HINT=(BF|bf)16.*)"), - // CPU does not support dynamic rank - // Issue: 66778 - R"(.*smoke_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", - R"(.*smoke_BehaviorTests.*DynamicOutputToDynamicInput.*)", - R"(.*smoke_BehaviorTests.*DynamicInputToDynamicOutput.*)", - // unsupported metrics - R"(.*OVGetMetricPropsTest.*OVGetMetricPropsTest.*(MAX_BATCH_SIZE).*)", - // supports only '' as device id - R"(.*OVClassQueryModelTest.*QueryModelWithDeviceID.*)", - // Issue 67214 - R"(smoke_PrePostProcess.*resize_and_convert_layout_i8.*)", - // Issue: 69086 - // need to add support convert BIN -> FP32 - // if we set output precision as BIN, when we create output blob precision looks like UNSPECIFIED - R"(.*smoke_FakeQuantizeLayerCPUTest.*bin.*)", - // Issue: 71756 - R"(.*GroupDeconv_2D_DW_BF16/GroupDeconvolutionLayerCPUTest.CompareWithRefs.*PRC=f32.*inFmts=nChw16c_outFmts=nChw16c_primitive=jit_avx512_dw_Fused=Multiply\(PerChannel\).Add\(PerChannel\)_PluginConf_INFERENCE_PRECISION_HINT=bf16*)", - R"(.*smoke_GroupDeconv_(2|3)D_Blocked_BF16.*S=(\(2\.2\)|\(2\.2\.2\))_PB=(\(0\.0\)|\(0\.0\.0\))_PE=(\(0\.0\)|\(0\.0\.0\))_D=(\(1\.1\)|\(1\.1\.1\))_.*_O=64_G=4.*)", - // Issue: - // New API tensor tests - R"(.*OVInferRequestCheckTensorPrecision.*type=u1.*)", - // Issue: 77390 - R"(.*LoopLayerCPUTest.*exec_cond=0.*)", - R"(.*LoopLayerCPUTest.*trip_count=0.*)", - R"(.*LoopForDiffShapesLayerCPUTest.*exec_cond=0.*)", - R"(.*LoopForDiffShapesLayerCPUTest.*trip_count=0.*)", - R"(.*LoopForConcatLayerCPUTest.*exec_cond=0.*)", - R"(.*LoopForConcatLayerCPUTest.*trip_count=0.*)", - // [ INFO ] Can't compile network without cache for .. with precision .. - R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*KSOFunction.*)", - R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*NonMaxSuppression.*)", - R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)", - // 94982. FP32->I32 conversion issue in the reference implementation. There can be some garbage in the rest of - // float values like 0.333333745. - // The kernel does not have such garbage. The diff 0.000000745 is taken into account in calculations and affects - // further type conversion. - // Reorder->GridSample->Reorder also does not work here. Potential fix is to use nearest conversion instead of - // truncation. - R"(.*GridSampleLayerTestCPU.*(BILINEAR|BICUBIC).*(i32|i8).*)", - R"(.*smoke_static/GridSampleLayerTestCPU.CompareWithRefs/.*_TS=.*(1.7.5.3|2.6.3.10).*_interpMode=NEAREST_padMode=REFLECTION_alignCorners=False_dataPrc=(f32|i32)_gridPrc=f32_.*)", - R"(.*smoke_static/GridSampleLayerTestCPU.CompareWithRefs/.*_TS=.*5.3.2.13.*_interpMode=BICUBIC_padMode=REFLECTION_alignCorners=True_dataPrc=f32_gridPrc=f32_.*)", - R"(.*smoke_static/GridSampleLayerTestCPU.CompareWithRefs/.*_TS=.*2.1.6.16.*_interpMode=NEAREST_padMode=(BORDER|REFLECTION)_alignCorners=(True|False)_dataPrc=(f32|i32)_gridPrc=f32_.*)", - R"(.*smoke_dynamic/GridSampleLayerTestCPU.CompareWithRefs/IS=\(\[2..15.\?.\?.\?\]_\[\?.3.7.2\]\)_.*_interpMode=NEAREST_padMode=REFLECTION_alignCorners=False_dataPrc=f32_gridPrc=f32_.*)", - R"(.*smoke_dynamic/GridSampleLayerTestCPU.CompareWithRefs/IS=\(\[\?.\?.\?.\?\]_\[\?.\?.\?.\?\]\).*interpMode=NEAREST_padMode=REFLECTION_alignCorners=False_dataPrc=f32_gridPrc=f32_.*)", - R"(.*smoke_dynamic/GridSampleLayerTestCPU.CompareWithRefs/IS=\(\[\?.3.\?.\?\]_\[\?.\?.\?.2\]\).*interpMode=BICUBIC_padMode=REFLECTION_alignCorners=True_dataPrc=f32_gridPrc=f32_.*)", - // AdaptiveAvgPool is converted into Reduce op for suitable parameters. CPU Reduce impl doesn't support non - // planar layout for 3D case - R"(.*StaticAdaPoolAvg3DLayoutTest.*OS=\(1\).*_inFmts=(nwc|nCw16c|nCw8c).*)", - // Issue: 111404 - R"(.*smoke_set1/GatherElementsCPUTest.*)", - // Issue: 111406 - R"(.*smoke_InterpolateLinearOnnx_Layout_Test/InterpolateLayerCPUTest.*)", - R"(.*smoke_InterpolateLinear_Layout_Test/InterpolateLayerCPUTest.*)", - R"(.*smoke_InterpolateCubic_Layout_Test/InterpolateLayerCPUTest.*)", - // Issue: 111412 - R"(.*smoke_Proposal_(Static|Dynamic)_Test_Case1/ProposalLayerCPUTest.*)", - // Issue: 111418 - R"(.*smoke_Snippets_ConvertStub/ConvertStub\.CompareWithRefImpl/IS.*_OT=\(bf16\)_#N=2_#S=2_targetDevice=CPU.*)", - R"(.*smoke_Snippets_Convert/Convert\.CompareWithRefImpl/IS.*_IT=\((f32|f16)\)_OT=\(u8\)_#N=1_#S=1_targetDevice=CPU.*)", - R"(.*smoke_Snippets_ConvertManyOnInputs/ConvertManyOnInputs\.CompareWithRefImpl/IS.*_IT=\(f32\.u8\)_OT=\(\)_#N=1_#S=1_targetDevice=CPU.*)", - // New plugin API doesn't support changes of pre-processing - R"(.*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", - R"(.*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", - // Plugin version was changed to ov::Version - R"(.*VersionTest.pluginCurrentVersionIsCorrect.*)", - // Issue: 114765 - R"(.*smoke_PSROIPoolingAverageLayoutTest/PSROIPoolingLayerCPUTest.*bf16.*)", - R"(.*smoke_PSROIPoolingBilinearLayoutTest/PSROIPoolingLayerCPUTest.*bf16.*)", - // Issue: 120222 - R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=1_axis=3_.*_modelType=f16_trgDev=CPU.*)", - R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=7_axis=3_.*_modelType=f16_trgDev=CPU.*)", - R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=1_axis=1_.*_modelType=f16_trgDev=CPU.*)", - R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=7_axis=1_.*_modelType=f16_trgDev=CPU.*)", - R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=18_.*_modelType=f16_trgDev=CPU.*)", - R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=21_.*_sort=value_modelType=f16_trgDev=CPU.*)", - // Issue: 121812 - R"(.*ConvertCPULayerTest.*outFmts=(nhwc|nChw8c|nChw16c).*)", - // Issue: 123320 - // Input precision bf16 is converted to fp32 by logic in core_config.cpp during ngraph reference test. - R"(.*FakeConvertLayerTest.*dataPrecision=bf16.*)", - // Need to generate sequence exactly in the i64 data type. Enable in scope of i64 enabling. - R"(.*RandomUniformLayerTestCPU.*OutPrc=i64.*)", - // Issue: 123815 (Tests are sensintive to available thread count on testing machines) - R"(.*smoke_Snippets_MHA_.?D_SplitDimensionM_static.*)", - // Issue: 126095 - R"(^smoke_Multinomial(?:Static|Dynamic)+(?:Log)*.*seed_g=0_seed_o=0.*device=CPU.*)", - // Issue: 129931 - R"(smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[.*,3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ .*18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[6,1,1,1\]_\{ .*1.52806e.*39, .*0.2, .*0.3, .*0.3, .*0.2, .*0.1 \}_\{ 1.52806e.*39, 0.2, 0.3, 0.3, 0.2, 0.1 \}\})", - // TODO: 141068 - R"(smoke_Snippets_FQDecomposition.*netPRC=f16_D=CPU.*)", - // Issue: 160734 - R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[1\]_\{ -18.7 \}_\{ 18.7 \}\}.*)", - // Issue: 160735 - R"(.*smoke_TestsDFT_4d/DFTLayerTest.Inference/.*TS=\{\((10.4.20.32.2|1.120.128.1.2)\)\}.*Precision=f32.*signal_size=\(\).*)", - // by calc abs_threshold with expected value - R"(.*smoke_.*_4D.*/GatherLayerTestCPU.CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)", - R"(.*smoke.*Mvn6LayerTest.Inference/.*TS.*1.10.5.7.8.*_ModelType=f32.*_Ax=\((2.3.4|-3.-2.-1)\).*)", - R"(.*smoke.*Mvn6LayerTest.Inference/.*TS.*2.55.*_ModelType=f32.*)", - R"(.*smoke_ConvWithZeroPointFuse/ConvWithZeroPointFuseSubgraphTest.CompareWithRefs.*)", - R"(.*smoke_FakeQuantize/FakeQuantizeLayerTest.Inference.*TS=.*3.49.7.5.6.*LEVELS=(255|256).*netPRC=f32.*)", - R"(.*smoke_FakeQuantize/FakeQuantizeLayerTest.Inference.*TS=.*(2.16.4.3.18|3.10.2.5.6|3.49.5.6|2.16.3.18|2.8.5.18|3.10.5.6|2.8.1.5.18).*LEVELS=255.*netPRC=f32.*)", - R"(.*smoke_FakeQuantize.*/FakeQuantizeLayerTest.Inference.*TS=.*3.4.2.5.*LEVELS=255.*)", - R"(.*smoke_FakeQuantizePerChannel.*/FakeQuantizeLayerTest.Inference.*TS=.*11.10.22.19.*LEVELS=(255|256).*netPRC=f32.*)", - R"(.*smoke_MVN_5D/Mvn6LayerTest.Inference.*TS=.*3.4.2.5.*LEVELS=255.*netPRC=f16.*)", - R"(.*smoke_static/ConvertFqRnnToQuantizedRnn.*2.1.5.*2.1.1.*2.1.1.*)", - R"(.*smoke_InterpolateBicubicPillow_Layout_Test/InterpolateLayerCPUTest.CompareWithRefs/ShapeCalcMode=sizes_IS=\[?.2..20.?.?\]_TS.*1.17.4.4.*2.3.10.12.*1.17.4.4.*Sizes.*4.4.*10.20.*10.4.*PARAMETER.*0.0.0.0.*0.0.1.1.*2.3.*)", - R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/.*_netType=bf16.*)", - R"(.*smoke_FuseScaleShiftAndFakeQuantize/FuseScaleShiftAndFakeQuantizeTest.CompareWithRefs/.*Scale=\[ 30 \]_Shift=\[ 17 \]_Intervals=\[ -1 \],\[ 5 \],\[ -5 \],\[ 1 \].*)", - R"(.*smoke_QuantizedConvolutionBatchNorm.*/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_quantize.*)", - R"(.*smoke_Param/RandomUniformLayerTestCPU.CompareWithRefs/IS=\{3\}_OS=\[4,3,210\]_Min=-50_Max=0_ShapePrc=.*_OutPrc=f32_GlobalSeed=8_OperationalSeed=(0|3).*)", - R"(.*smoke_Param/RandomUniformLayerTestCPU.CompareWithRefs/IS=\{3\}_OS=\[4,3,210\]_Min=-50_Max=50_ShapePrc=.*_OutPrc=f32_GlobalSeed=8_OperationalSeed=(5|3|0).*)", - R"(.*smoke_Param/RandomUniformLayerTestCPU.CompareWithRefs/IS=\{3\}_OS=\[4,3,210\]_Min=-50_Max=50_ShapePrc=.*_OutPrc=f32_GlobalSeed=0_OperationalSeed=5.*)", - R"(.*smoke_Param/RandomUniformLayerTestCPU.CompareWithRefs/IS=\{1\}_OS=\[500\]_Min=-50_Max=50_ShapePrc=.*_OutPrc=f32_GlobalSeed=0_OperationalSeed=5.*)", - R"(.*smoke.*/RNNCellCPUTest.CompareWithRefs.*activations=.*relu.*INFERENCE_PRECISION_HINT=bf16.*)", - R"(.*smoke_InterpolateBicubicPillow_Layout_Test/InterpolateLayerCPUTest.CompareWithRefs/ShapeCalcMode=sizes_IS=\[\?.2..20.\?.\?\]_TS=\(1.17.4.4\)_\(2.3.10.12\)_\(1.17.4.4\)_Sizes=\(4.4\)_\(10.20\)_\(10.4\)_PARAMETER.*P.*.1.1.*.*)", - R"(.*smoke_InterpolateBicubicPillow_Layout_Test/InterpolateLayerCPUTest.CompareWithRefs/ShapeCalcMode=scales_IS=\[\?.2..20.\?.\?\]_TS=\(1.11.4.4\)_\(2.7.6.5\)_\(1.11.4.4\)_Scales=\(1.25.0.75\)_CONSTANT_.*PB=\(0.0.0.0\)_PE=\(0.0.1.1\).*)", - R"(.*smoke_Conv_Sum_Broadcast_BF16/ConvSumInPlaceTest.CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)", - R"(.*smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*InterpolateMode=cubic_ShapeCalcMode=scales_CoordinateTransformMode=(pytorch_half_pixel|half_pixel).*netType=f32.*)", - R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.8.16\)_KS=\(1.5\)_OC=.*_ET=f32_targetDevice=CPU.*)", - R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.7.32\)_KS=\(1.3\)_OC=.*_ET=f32_targetDevice=CPU.*)", - R"(.*smoke_BasicNegative/RangeAddSubgraphTest.*Step=-0.1_ET=f16.*)", - R"(.*smoke_ConvertRangeSubgraphCPUTest/ConvertRangeSubgraphCPUTest.CompareWithRefs.*bf16.*)", - R"(.*smoke_FQLayerDQBias_4D.*FQLayerDQBias.smoke_CompareWithRefs.*_TS=\(\(1.3.64.64\)_\)_layer_type=MatMul.*)", - R"(.*smoke_Snippets_ConvMul/ConvEltwise.CompareWithRefImpl/IS\[0\]=\(1.10.16.16\)_IS\[1\]=\(1.10.16.16\)_Op=Multiply_#N=6_#S=1.*)", - R"(.*smoke_InterpolateBicubicPillow_LayoutAlign_Test/InterpolateLayerCPUTest.CompareWithRefs/.*Sizes=\(6.8\).*)", - R"(.*smoke_RDFT_CPU_1D/RDFTTestCPU.CompareWithRefs/prec=f32_.*TS0=\(\((106|246|245|510|1022)\)\).*)", - R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_.*_TS0=\(\((1022.64|24.39|126.32|510.64)\)\)_constAxes=true_axes=\(\(0.1\)\)_isInverse=false_primitive=jit_avx2.*)", - R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_.*_TS0=\(\((1022.64|126.32|510.64)\)\)_constAxes=true_axes=\(\(0\)\)_isInverse=false_primitive=jit_avx2.*)", - R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_.*_isInverse=false_primitive=jit_avx512.*)", - R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_.*_TS0=\(\((20.126|20.510|20.1022)\)\)_constAxes=true_axes=\(\(1\)\)_isInverse=false_primitive=jit_avx512.*)", - R"(.*smoke_TestsDFT_3d/DFTLayerTest.Inference/.*TS=.*1.120.128.1.2.*_Precision=f32.*signal_size=\(\).*)", - R"(.*smoke_TestsDFT_2d/DFTLayerTest.Inference.*TS=\{\(1.120.128.1.2\)\}_Precision=f32_Axes=\(2.1\)_signal_size=\(\)_Inverse=0.*)", - R"(.*smoke_FakeQuantizeLayerCPUTest_4D_(jit|ref)/FakeQuantizeLayerCPUTest.CompareWithRefs/IS=\[\?.\?.\?.\?\]_TS=\(\(4.16.6.7\)\).*inPrec=f32.*LEVELS=255.*)", - R"(.*smoke_FakeQuantizeLayerCPUTest_5D_(jit|ref)/FakeQuantizeLayerCPUTest.CompareWithRefs/IS=\[\?.\?.\?.\?.\?\]_TS=\(\((4|3).16.6.7.8\)\).*inPrec=f32.*LEVELS=255.*)", - R"(.*smoke_FakeQuantizeLayerCPUTest_Decompos/FakeQuantizeLayerCPUTest.CompareWithRefs/IS.*\(\((4.5.6.7|1.1.6.7|1.1.6.1|1.5.1.6)\)\)_inPrec=f32.*LEVELS=255.*)", - R"(.*smoke_CompareWithRefs/LRNLayerCPUTest.CompareWithRefs/f32_IS.*axes=\(1.2.3\).*)", - R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(9.16.32.126\)\)_constAxes=true_axes=\(\((0.1.2.3|3.1|_2._1)\)\).*isInverse=false.*)", - R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(1.192.36.64\)\)_constAxes=true_axes=\(\((0.1.2.3|3.2|_2._1|0.1|1)\)\).*isInverse=false.*)", - R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(1.192.36.64\)\)_constAxes=true_axes=\(\((0|_2._1|0.1.2.3)\)_.*isInverse=false.*)", - R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=.*_TS0=\(\(1.192.36.64\)_.*constAxes=false.*isInverse=false.*)", - R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(46.10.128.65\)\)_constAxes=true_axes=\(\((1.0|0.1.2.3|3.1|_2._1)\)\).*isInverse=false.*primitive=jit_avx512.*)", - R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(10.46.128.65\)\)_constAxes=true_axes=\(\((0.1|1.2)\)\).*isInverse=false.*primitive=jit_avx512.*)", - R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\?.192.36.64\]_.*_axes=\(\((0|_2._1|_1|1)\)_.*isInverse=false.*)", - R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\((1.120.64.64|1.120.96.96|\?.\?.\?.\?|1.192.\?.\?|1..2.\?.\?.1..100)\)\).*isInverse=false.*)", - R"(.*smoke_RDFT_2d/RDFTLayerTest.Inference/IS=\(100.16\)_modelType=f32_Axes=\((0.1|_2._1|1.0)\)_SignalSize=\(\).*)", - // Issue: 138520 - R"(.*smoke_MM_Static/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\]_\[\]_TS=\(\(55.12\)\)_\(\(12.55\)\)_.*\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(1.32.120\)\)_\(\(120.5\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(1.32.120\)\)_\(\(120.50\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(1.1.120\)\)_\(\(120.120\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(3.1.120\)\)_\(\(120.120\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[.*\]_\[.*\]_TS=\(\(1.5.32\)_\(1.5.32\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[..60...60...60\]_\[14.10\]_TS=\(\(1.3.14\)_\(1.7.14\)\)_\(\(14.10\)_\(14.10\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*nightly_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(1.32.120\)\)_\(\(120.5.*\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*nightly_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\?.\?.50\]_\[50.7\]_TS=\(\(1.2.50\)_\(1.10.50\)_\(1.2.50\)_\(2.2.50\)\)_\(\(50.7\)_\(50.7\)_\(50.7\)_\(50.7\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*smoke_MM_Dynamic_Fusing/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\?.\?\]_\[\?.33\]_TS=\(\(16.12\)_\(33.7\)_\(16.12\)\)_\(\(12.33\)_\(7.33\)_\(12.33\)\)_transpose_a=0_transpose_b=0_secondaryInputType=PARAMETER_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", - R"(.*(nightly|smoke)_MM_Brgemm_Static/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\]_\[\]_TS=\(\(55.12\)\)_\(\(12.55\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=brgemm_avx512.*)", - R"(.*smoke_MM_Brgemm_Dynamic_Fusing/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\?.\?\]_\[\?.33\]_TS=\(\(16.12\)_\(33.7\)_\(16.12\)\)_\(\(12.33\)_\(7.33\)_\(12.33\)\)_transpose_a=0_transpose_b=0_secondaryInputType=PARAMETER_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPUconfig=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=brgemm_avx512.*)", - // Issue: 140389 - R"(.*FQLayerDQBias.smoke_CompareWithRefs.*)", - R"(.*smoke_matmulBrgemmInt8/MatmulBrgemmInt8Test.CompareWithRefs.*MatMul.*InputType=i8_OutputType=i8.*)", - R"(.*smoke_Snippets_MHAWOTransposeOnInputs_4D/MHAWOTransposeOnInputs.CompareWithRefImpl.*)", - // Issue: 142448 - R"(smoke_Snippets_BroadcastSelect_Dynamic.*)", - // Issue: 141705 - R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/.*trip_count=5_exec_cond=1_netType=i8.*)", - R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/Input0_IS=\[\?.1.\?\]_TS=\(10.1.10\)_\(1.1.1\)_\(1.1.1\)_\(5.1.3\)_Input1_IS=\[\?.\?.\?\]_TS=.*_Input2_IS=\[\?.1.\?\]_.*_types=0_0_1_trip_count_type=.*_trip_count=(1|5)_exec_cond=1_netType=i8.*)", - R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/Input0_IS=\[1..10.1.1..10\]_.*_Input1_IS=\[1..8.1.1..8\]_.*_Input2_IS=\[1..10.\?.1..10\]_TS=.*_types=0_0_1_trip_count_type=.*_trip_count=(1|5)_exec_cond=1_netType=i8.*)", - // Issue: 142997 - R"(.*smoke_TestsROIAlign.*)", - // Issue: 136881 - R"(.*smoke_CompareWithRefs_4D_BitwiseShift_overflow_i32_cast.*_eltwise_op_type=BitwiseLeft.*_model_type=.*(i16|u16).*)", - // Issue: 163083 - // Issue: 163116 - R"(.*RandomUniformLayerTestCPU.*OutPrc=bf16.*)", - // Issue: 163117 - R"(.*InterpolateCubic_Layout_Test.*)", - // Issue: 163171 - R"(.*CPUDetectionOutputDynamic3InLargeTensor.*)", - // Issue: 163168 - R"(.*UniqueLayerTestCPU.*)", - // Issue: 163175 - R"(.*GridSampleLayerTestCPU.*dataPrc=i8.*)", - R"(.*GridSampleLayerTestCPU.*dataPrc=bf16.*)", - // Issue: 163177 - R"(.*NmsRotatedOpTest.*ScoreThr=0\.4.*)", - // Issue: 163222 - R"(.*bf16.*LSTMSequenceCPUTest.*)", - // Issue: 163223 - R"(.*bf16.*AUGRUSequenceCPUTest.*)", - // Issue: 163224 - R"(.*bf16.*GRUSequenceCPUTest.*)", - // Issue: 163227 - R"(.*QuantizedModelsTests\.MaxPoolFQ.*)", - R"(.*QuantizedModelsTests\.MaxPoolQDQ.*)", - // Issue: 163268 - R"(.*QuantizedModelsTests\.ConvolutionQDQ.*)", - R"(.*QuantizedModelsTests\.ConvolutionFQ.*)", - // Issue: 163230 - R"(.*ProposalLayerTest.*)", - // Issue: 163232 - R"(.*FC_3D_BF16.*MatMulLayerCPUTest.*)", - // Issue: 163242 - R"(.*bf16.*RNNSequenceCPUTest.*)", - R"(.*WeightlessCacheAccuracy.TiWithLstmCell.*model_dtype=bf16.*)", - // Issue: 163250 - R"(.*OnnxModelWithExtensionFromDSO.*)", - // Issue: 163273 - // todo: define correct area - R"(.*Deconv_2D_Planar_FP16.*DeconvolutionLayerCPUTest.*)", - // Issue: 163275 - R"(.*NoReshapeAndReshapeDynamic.*CodegenGelu.*)", - // Issue: 163351 - R"(.*CoreThreadingTestsWithIter.*nightly_AsyncInfer_ShareInput.*)", - // Sporadic failings with ASAN enabled - R"(.*CoreThreadingTest.*)", - R"(.*smoke_BehaviorTest.*)", - // This transformation is disabled on CPU - R"(.*smoke_LPT.*MultiplyToGroupConvolutionTransformation.*)", - // Disabled due to sporadic failures in CI, Issue: 157267 - R"(.*smoke_CompareWithRefs_4D_Blocked_Blocked_Fusing\/EltwiseLayerCPUTest.CompareWithRefs\/IS=\(\[\]_\)_TS.*2.4.4.1.*eltwise_op_type=(Sum|Mod|SqDiff|Prod)_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f32_InType=dynamic_OutType=dynamic_trgDev=CPU_config_item=INFERENCE_PRECISION_HINT=f32_inFmts=nChw16c.nChw16c_outFmts=nChw16c_Fused=FakeQuantize\(PerChannel\).*)", - R"(.*smoke_CachingSupportCase_CPU\/CompileModelCacheTestBase.CompareWithRefImpl\/SplitConvConcatNestedInBranchNestedOut_i16_batch1_CPU.*)", - R"(.*smoke_CompareWithRefs_5D_MemOrder_Blocked_Blocked\/EltwiseLayerCPUTest.CompareWithRefs\/IS=\(\[\]_\[\]_\)_TS=.*2.17.6.5.1.*_.*1.17.1.1.4.*_\)_eltwise_op_type=Sub_secondary_input_type=CONSTANT_opType=VECTOR_model_type=bf16_InType=dynamic_OutType=dynamic_trgDev=CPU_config_item=INFERENCE_PRECISION_HINT=f32_inFmts=nCdhw16c.nCdhw16c_outFmts=nCdhw16c_enforceSnippets=0.*)", - R"(.*.*smoke_CompareWithRefs_4D_Fusing_Blocked_Blocked\/EltwiseLayerCPUTest.CompareWithRefs\/IS=\(\[\]_\)_TS=\(\(2.4.4.1\)_\)_eltwise_op_type=Mod_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f32_InType=dynamic_OutType=dynamic_trgDev=CPU_config_item=INFERENCE_PRECISION_HINT=f32_inFmts=nChw16c.nChw16c_outFmts=nChw16c_Fused=FakeQuantize\(PerChannel\).Sigmoid.FakeQuantize\(PerTensor\)_enforceSnippets=0.*)", - R"(.*smoke_Conv_1D_GEMM_FP32\/ConvolutionLayerCPUTest.CompareWithRefs\/IS=\[\]_TS=\(\(2.12.7\)_\)_K\(3\)_S\(2\)_PB\(1\)_PE\(0\)_D=\(1\)_O=6_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=ncw_outFmts=ncw_primitive=jit_gemm_Fused=Relu.*)", + // CPU does not support dynamic rank + // Issue: 66778 + std::regex(R"(.*smoke_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)"), + std::regex(R"(.*smoke_BehaviorTests.*DynamicOutputToDynamicInput.*)"), + std::regex(R"(.*smoke_BehaviorTests.*DynamicInputToDynamicOutput.*)"), + // unsupported metrics + std::regex(R"(.*OVGetMetricPropsTest.*OVGetMetricPropsTest.*(MAX_BATCH_SIZE).*)"), + // supports only '' as device id + std::regex(R"(.*OVClassQueryModelTest.*QueryModelWithDeviceID.*)"), + // Issue 67214 + std::regex(R"(smoke_PrePostProcess.*resize_and_convert_layout_i8.*)"), + // Issue: 69086 + // need to add support convert BIN -> FP32 + // if we set output precision as BIN, when we create output blob precision looks like UNSPECIFIED + std::regex(R"(.*smoke_FakeQuantizeLayerCPUTest.*bin.*)"), + // Issue: 71756 + std::regex(R"(.*GroupDeconv_2D_DW_BF16/GroupDeconvolutionLayerCPUTest.CompareWithRefs.*PRC=f32.*inFmts=nChw16c_outFmts=nChw16c_primitive=jit_avx512_dw_Fused=Multiply\(PerChannel\).Add\(PerChannel\)_PluginConf_INFERENCE_PRECISION_HINT=bf16*)"), + std::regex(R"(.*smoke_GroupDeconv_(2|3)D_Blocked_BF16.*S=(\(2\.2\)|\(2\.2\.2\))_PB=(\(0\.0\)|\(0\.0\.0\))_PE=(\(0\.0\)|\(0\.0\.0\))_D=(\(1\.1\)|\(1\.1\.1\))_.*_O=64_G=4.*)"), + // Issue: + // New API tensor tests + std::regex(R"(.*OVInferRequestCheckTensorPrecision.*type=u1.*)"), + // Issue: 77390 + std::regex(R"(.*LoopLayerCPUTest.*exec_cond=0.*)"), + std::regex(R"(.*LoopLayerCPUTest.*trip_count=0.*)"), + std::regex(R"(.*LoopForDiffShapesLayerCPUTest.*exec_cond=0.*)"), + std::regex(R"(.*LoopForDiffShapesLayerCPUTest.*trip_count=0.*)"), + std::regex(R"(.*LoopForConcatLayerCPUTest.*exec_cond=0.*)"), + std::regex(R"(.*LoopForConcatLayerCPUTest.*trip_count=0.*)"), + // [ INFO ] Can't compile network without cache for .. with precision .. + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*KSOFunction.*)"), + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*NonMaxSuppression.*)"), + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)"), + // 94982. FP32->I32 conversion issue in the reference implementation. There can be some garbage in the rest of + // float values like 0.333333745. + // The kernel does not have such garbage. The diff 0.000000745 is taken into account in calculations and affects + // further type conversion. + // Reorder->GridSample->Reorder also does not work here. Potential fix is to use nearest conversion instead of + // truncation. + std::regex(R"(.*GridSampleLayerTestCPU.*(BILINEAR|BICUBIC).*(i32|i8).*)"), + std::regex(R"(.*smoke_static/GridSampleLayerTestCPU.CompareWithRefs/.*_TS=.*(1.7.5.3|2.6.3.10).*_interpMode=NEAREST_padMode=REFLECTION_alignCorners=False_dataPrc=(f32|i32)_gridPrc=f32_.*)"), + std::regex(R"(.*smoke_static/GridSampleLayerTestCPU.CompareWithRefs/.*_TS=.*5.3.2.13.*_interpMode=BICUBIC_padMode=REFLECTION_alignCorners=True_dataPrc=f32_gridPrc=f32_.*)"), + std::regex(R"(.*smoke_static/GridSampleLayerTestCPU.CompareWithRefs/.*_TS=.*2.1.6.16.*_interpMode=NEAREST_padMode=(BORDER|REFLECTION)_alignCorners=(True|False)_dataPrc=(f32|i32)_gridPrc=f32_.*)"), + std::regex(R"(.*smoke_dynamic/GridSampleLayerTestCPU.CompareWithRefs/IS=\(\[2..15.\?.\?.\?\]_\[\?.3.7.2\]\)_.*_interpMode=NEAREST_padMode=REFLECTION_alignCorners=False_dataPrc=f32_gridPrc=f32_.*)"), + std::regex(R"(.*smoke_dynamic/GridSampleLayerTestCPU.CompareWithRefs/IS=\(\[\?.\?.\?.\?\]_\[\?.\?.\?.\?\]\).*interpMode=NEAREST_padMode=REFLECTION_alignCorners=False_dataPrc=f32_gridPrc=f32_.*)"), + std::regex(R"(.*smoke_dynamic/GridSampleLayerTestCPU.CompareWithRefs/IS=\(\[\?.3.\?.\?\]_\[\?.\?.\?.2\]\).*interpMode=BICUBIC_padMode=REFLECTION_alignCorners=True_dataPrc=f32_gridPrc=f32_.*)"), + // AdaptiveAvgPool is converted into Reduce op for suitable parameters. CPU Reduce impl doesn't support non + // planar layout for 3D case + std::regex(R"(.*StaticAdaPoolAvg3DLayoutTest.*OS=\(1\).*_inFmts=(nwc|nCw16c|nCw8c).*)"), + // Issue: 111404 + std::regex(R"(.*smoke_set1/GatherElementsCPUTest.*)"), + // Issue: 111406 + std::regex(R"(.*smoke_InterpolateLinearOnnx_Layout_Test/InterpolateLayerCPUTest.*)"), + std::regex(R"(.*smoke_InterpolateLinear_Layout_Test/InterpolateLayerCPUTest.*)"), + std::regex(R"(.*smoke_InterpolateCubic_Layout_Test/InterpolateLayerCPUTest.*)"), + // Issue: 111412 + std::regex(R"(.*smoke_Proposal_(Static|Dynamic)_Test_Case1/ProposalLayerCPUTest.*)"), + // Issue: 111418 + std::regex(R"(.*smoke_Snippets_ConvertStub/ConvertStub\.CompareWithRefImpl/IS.*_OT=\(bf16\)_#N=2_#S=2_targetDevice=CPU.*)"), + std::regex(R"(.*smoke_Snippets_Convert/Convert\.CompareWithRefImpl/IS.*_IT=\((f32|f16)\)_OT=\(u8\)_#N=1_#S=1_targetDevice=CPU.*)"), + std::regex(R"(.*smoke_Snippets_ConvertManyOnInputs/ConvertManyOnInputs\.CompareWithRefImpl/IS.*_IT=\(f32\.u8\)_OT=\(\)_#N=1_#S=1_targetDevice=CPU.*)"), + // New plugin API doesn't support changes of pre-processing + std::regex(R"(.*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)"), + std::regex(R"(.*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)"), + // Plugin version was changed to ov::Version + std::regex(R"(.*VersionTest.pluginCurrentVersionIsCorrect.*)"), + // Issue: 114765 + std::regex(R"(.*smoke_PSROIPoolingAverageLayoutTest/PSROIPoolingLayerCPUTest.*bf16.*)"), + std::regex(R"(.*smoke_PSROIPoolingBilinearLayoutTest/PSROIPoolingLayerCPUTest.*bf16.*)"), + // Issue: 120222 + std::regex(R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=1_axis=3_.*_modelType=f16_trgDev=CPU.*)"), + std::regex(R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=7_axis=3_.*_modelType=f16_trgDev=CPU.*)"), + std::regex(R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=1_axis=1_.*_modelType=f16_trgDev=CPU.*)"), + std::regex(R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=7_axis=1_.*_modelType=f16_trgDev=CPU.*)"), + std::regex(R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=18_.*_modelType=f16_trgDev=CPU.*)"), + std::regex(R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=21_.*_sort=value_modelType=f16_trgDev=CPU.*)"), + // Issue: 121812 + std::regex(R"(.*ConvertCPULayerTest.*outFmts=(nhwc|nChw8c|nChw16c).*)"), + // Issue: 123320 + // Input precision bf16 is converted to fp32 by logic in core_config.cpp during ngraph reference test. + std::regex(R"(.*FakeConvertLayerTest.*dataPrecision=bf16.*)"), + // Need to generate sequence exactly in the i64 data type. Enable in scope of i64 enabling. + std::regex(R"(.*RandomUniformLayerTestCPU.*OutPrc=i64.*)"), + // Issue: 123815 (Tests are sensintive to available thread count on testing machines) + std::regex(R"(.*smoke_Snippets_MHA_.?D_SplitDimensionM_static.*)"), + // Issue: 126095 + std::regex(R"(^smoke_Multinomial(?:Static|Dynamic)+(?:Log)*.*seed_g=0_seed_o=0.*device=CPU.*)"), + // Issue: 129931 + std::regex(R"(smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[.*,3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ .*18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[6,1,1,1\]_\{ .*1.52806e.*39, .*0.2, .*0.3, .*0.3, .*0.2, .*0.1 \}_\{ 1.52806e.*39, 0.2, 0.3, 0.3, 0.2, 0.1 \}\})"), + // TODO: 141068 + std::regex(R"(smoke_Snippets_FQDecomposition.*netPRC=f16_D=CPU.*)"), + // Issue: 160734 + std::regex(R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[1\]_\{ -18.7 \}_\{ 18.7 \}\}.*)"), + // Issue: 160735 + std::regex(R"(.*smoke_TestsDFT_4d/DFTLayerTest.Inference/.*TS=\{\((10.4.20.32.2|1.120.128.1.2)\)\}.*Precision=f32.*signal_size=\(\).*)"), + // by calc abs_threshold with expected value + std::regex(R"(.*smoke_.*_4D.*/GatherLayerTestCPU.CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)"), + std::regex(R"(.*smoke.*Mvn6LayerTest.Inference/.*TS.*1.10.5.7.8.*_ModelType=f32.*_Ax=\((2.3.4|-3.-2.-1)\).*)"), + std::regex(R"(.*smoke.*Mvn6LayerTest.Inference/.*TS.*2.55.*_ModelType=f32.*)"), + std::regex(R"(.*smoke_ConvWithZeroPointFuse/ConvWithZeroPointFuseSubgraphTest.CompareWithRefs.*)"), + std::regex(R"(.*smoke_FakeQuantize/FakeQuantizeLayerTest.Inference.*TS=.*3.49.7.5.6.*LEVELS=(255|256).*netPRC=f32.*)"), + std::regex(R"(.*smoke_FakeQuantize/FakeQuantizeLayerTest.Inference.*TS=.*(2.16.4.3.18|3.10.2.5.6|3.49.5.6|2.16.3.18|2.8.5.18|3.10.5.6|2.8.1.5.18).*LEVELS=255.*netPRC=f32.*)"), + std::regex(R"(.*smoke_FakeQuantize.*/FakeQuantizeLayerTest.Inference.*TS=.*3.4.2.5.*LEVELS=255.*)"), + std::regex(R"(.*smoke_FakeQuantizePerChannel.*/FakeQuantizeLayerTest.Inference.*TS=.*11.10.22.19.*LEVELS=(255|256).*netPRC=f32.*)"), + std::regex(R"(.*smoke_MVN_5D/Mvn6LayerTest.Inference.*TS=.*3.4.2.5.*LEVELS=255.*netPRC=f16.*)"), + std::regex(R"(.*smoke_static/ConvertFqRnnToQuantizedRnn.*2.1.5.*2.1.1.*2.1.1.*)"), + std::regex(R"(.*smoke_InterpolateBicubicPillow_Layout_Test/InterpolateLayerCPUTest.CompareWithRefs/ShapeCalcMode=sizes_IS=\[?.2..20.?.?\]_TS.*1.17.4.4.*2.3.10.12.*1.17.4.4.*Sizes.*4.4.*10.20.*10.4.*PARAMETER.*0.0.0.0.*0.0.1.1.*2.3.*)"), + std::regex(R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/.*_netType=bf16.*)"), + std::regex(R"(.*smoke_FuseScaleShiftAndFakeQuantize/FuseScaleShiftAndFakeQuantizeTest.CompareWithRefs/.*Scale=\[ 30 \]_Shift=\[ 17 \]_Intervals=\[ -1 \],\[ 5 \],\[ -5 \],\[ 1 \].*)"), + std::regex(R"(.*smoke_QuantizedConvolutionBatchNorm.*/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_quantize.*)"), + std::regex(R"(.*smoke_Param/RandomUniformLayerTestCPU.CompareWithRefs/IS=\{3\}_OS=\[4,3,210\]_Min=-50_Max=0_ShapePrc=.*_OutPrc=f32_GlobalSeed=8_OperationalSeed=(0|3).*)"), + std::regex(R"(.*smoke_Param/RandomUniformLayerTestCPU.CompareWithRefs/IS=\{3\}_OS=\[4,3,210\]_Min=-50_Max=50_ShapePrc=.*_OutPrc=f32_GlobalSeed=8_OperationalSeed=(5|3|0).*)"), + std::regex(R"(.*smoke_Param/RandomUniformLayerTestCPU.CompareWithRefs/IS=\{3\}_OS=\[4,3,210\]_Min=-50_Max=50_ShapePrc=.*_OutPrc=f32_GlobalSeed=0_OperationalSeed=5.*)"), + std::regex(R"(.*smoke_Param/RandomUniformLayerTestCPU.CompareWithRefs/IS=\{1\}_OS=\[500\]_Min=-50_Max=50_ShapePrc=.*_OutPrc=f32_GlobalSeed=0_OperationalSeed=5.*)"), + std::regex(R"(.*smoke.*/RNNCellCPUTest.CompareWithRefs.*activations=.*relu.*INFERENCE_PRECISION_HINT=bf16.*)"), + std::regex(R"(.*smoke_InterpolateBicubicPillow_Layout_Test/InterpolateLayerCPUTest.CompareWithRefs/ShapeCalcMode=sizes_IS=\[\?.2..20.\?.\?\]_TS=\(1.17.4.4\)_\(2.3.10.12\)_\(1.17.4.4\)_Sizes=\(4.4\)_\(10.20\)_\(10.4\)_PARAMETER.*P.*.1.1.*.*)"), + std::regex(R"(.*smoke_InterpolateBicubicPillow_Layout_Test/InterpolateLayerCPUTest.CompareWithRefs/ShapeCalcMode=scales_IS=\[\?.2..20.\?.\?\]_TS=\(1.11.4.4\)_\(2.7.6.5\)_\(1.11.4.4\)_Scales=\(1.25.0.75\)_CONSTANT_.*PB=\(0.0.0.0\)_PE=\(0.0.1.1\).*)"), + std::regex(R"(.*smoke_Conv_Sum_Broadcast_BF16/ConvSumInPlaceTest.CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)"), + std::regex(R"(.*smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*InterpolateMode=cubic_ShapeCalcMode=scales_CoordinateTransformMode=(pytorch_half_pixel|half_pixel).*netType=f32.*)"), + std::regex(R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.8.16\)_KS=\(1.5\)_OC=.*_ET=f32_targetDevice=CPU.*)"), + std::regex(R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.7.32\)_KS=\(1.3\)_OC=.*_ET=f32_targetDevice=CPU.*)"), + std::regex(R"(.*smoke_BasicNegative/RangeAddSubgraphTest.*Step=-0.1_ET=f16.*)"), + std::regex(R"(.*smoke_ConvertRangeSubgraphCPUTest/ConvertRangeSubgraphCPUTest.CompareWithRefs.*bf16.*)"), + std::regex(R"(.*smoke_FQLayerDQBias_4D.*FQLayerDQBias.smoke_CompareWithRefs.*_TS=\(\(1.3.64.64\)_\)_layer_type=MatMul.*)"), + std::regex(R"(.*smoke_Snippets_ConvMul/ConvEltwise.CompareWithRefImpl/IS\[0\]=\(1.10.16.16\)_IS\[1\]=\(1.10.16.16\)_Op=Multiply_#N=6_#S=1.*)"), + std::regex(R"(.*smoke_InterpolateBicubicPillow_LayoutAlign_Test/InterpolateLayerCPUTest.CompareWithRefs/.*Sizes=\(6.8\).*)"), + std::regex(R"(.*smoke_RDFT_CPU_1D/RDFTTestCPU.CompareWithRefs/prec=f32_.*TS0=\(\((106|246|245|510|1022)\)\).*)"), + std::regex(R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_.*_TS0=\(\((1022.64|24.39|126.32|510.64)\)\)_constAxes=true_axes=\(\(0.1\)\)_isInverse=false_primitive=jit_avx2.*)"), + std::regex(R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_.*_TS0=\(\((1022.64|126.32|510.64)\)\)_constAxes=true_axes=\(\(0\)\)_isInverse=false_primitive=jit_avx2.*)"), + std::regex(R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_.*_isInverse=false_primitive=jit_avx512.*)"), + std::regex(R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_.*_TS0=\(\((20.126|20.510|20.1022)\)\)_constAxes=true_axes=\(\(1\)\)_isInverse=false_primitive=jit_avx512.*)"), + std::regex(R"(.*smoke_TestsDFT_3d/DFTLayerTest.Inference/.*TS=.*1.120.128.1.2.*_Precision=f32.*signal_size=\(\).*)"), + std::regex(R"(.*smoke_TestsDFT_2d/DFTLayerTest.Inference.*TS=\{\(1.120.128.1.2\)\}_Precision=f32_Axes=\(2.1\)_signal_size=\(\)_Inverse=0.*)"), + std::regex(R"(.*smoke_FakeQuantizeLayerCPUTest_4D_(jit|ref)/FakeQuantizeLayerCPUTest.CompareWithRefs/IS=\[\?.\?.\?.\?\]_TS=\(\(4.16.6.7\)\).*inPrec=f32.*LEVELS=255.*)"), + std::regex(R"(.*smoke_FakeQuantizeLayerCPUTest_5D_(jit|ref)/FakeQuantizeLayerCPUTest.CompareWithRefs/IS=\[\?.\?.\?.\?.\?\]_TS=\(\((4|3).16.6.7.8\)\).*inPrec=f32.*LEVELS=255.*)"), + std::regex(R"(.*smoke_FakeQuantizeLayerCPUTest_Decompos/FakeQuantizeLayerCPUTest.CompareWithRefs/IS.*\(\((4.5.6.7|1.1.6.7|1.1.6.1|1.5.1.6)\)\)_inPrec=f32.*LEVELS=255.*)"), + std::regex(R"(.*smoke_CompareWithRefs/LRNLayerCPUTest.CompareWithRefs/f32_IS.*axes=\(1.2.3\).*)"), + std::regex(R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(9.16.32.126\)\)_constAxes=true_axes=\(\((0.1.2.3|3.1|_2._1)\)\).*isInverse=false.*)"), + std::regex(R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(1.192.36.64\)\)_constAxes=true_axes=\(\((0.1.2.3|3.2|_2._1|0.1|1)\)\).*isInverse=false.*)"), + std::regex(R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(1.192.36.64\)\)_constAxes=true_axes=\(\((0|_2._1|0.1.2.3)\)_.*isInverse=false.*)"), + std::regex(R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=.*_TS0=\(\(1.192.36.64\)_.*constAxes=false.*isInverse=false.*)"), + std::regex(R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(46.10.128.65\)\)_constAxes=true_axes=\(\((1.0|0.1.2.3|3.1|_2._1)\)\).*isInverse=false.*primitive=jit_avx512.*)"), + std::regex(R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(10.46.128.65\)\)_constAxes=true_axes=\(\((0.1|1.2)\)\).*isInverse=false.*primitive=jit_avx512.*)"), + std::regex(R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\?.192.36.64\]_.*_axes=\(\((0|_2._1|_1|1)\)_.*isInverse=false.*)"), + std::regex(R"(.*smoke_RDFT_CPU_4D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\((1.120.64.64|1.120.96.96|\?.\?.\?.\?|1.192.\?.\?|1..2.\?.\?.1..100)\)\).*isInverse=false.*)"), + std::regex(R"(.*smoke_RDFT_2d/RDFTLayerTest.Inference/IS=\(100.16\)_modelType=f32_Axes=\((0.1|_2._1|1.0)\)_SignalSize=\(\).*)"), + // Issue: 138520 + std::regex(R"(.*smoke_MM_Static/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\]_\[\]_TS=\(\(55.12\)\)_\(\(12.55\)\)_.*\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(1.32.120\)\)_\(\(120.5\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(1.32.120\)\)_\(\(120.50\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(1.1.120\)\)_\(\(120.120\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(3.1.120\)\)_\(\(120.120\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[.*\]_\[.*\]_TS=\(\(1.5.32\)_\(1.5.32\)\).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[..60...60...60\]_\[14.10\]_TS=\(\(1.3.14\)_\(1.7.14\)\)_\(\(14.10\)_\(14.10\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*nightly_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\]_\[\]_TS=\(\(1.32.120\)\)_\(\(120.5.*\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*nightly_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\?.\?.50\]_\[50.7\]_TS=\(\(1.2.50\)_\(1.10.50\)_\(1.2.50\)_\(2.2.50\)\)_\(\(50.7\)_\(50.7\)_\(50.7\)_\(50.7\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*smoke_MM_Dynamic_Fusing/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\?.\?\]_\[\?.33\]_TS=\(\(16.12\)_\(33.7\)_\(16.12\)\)_\(\(12.33\)_\(7.33\)_\(12.33\)\)_transpose_a=0_transpose_b=0_secondaryInputType=PARAMETER_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"), + std::regex(R"(.*(nightly|smoke)_MM_Brgemm_Static/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\]_\[\]_TS=\(\(55.12\)\)_\(\(12.55\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=brgemm_avx512.*)"), + std::regex(R"(.*smoke_MM_Brgemm_Dynamic_Fusing/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\?.\?\]_\[\?.33\]_TS=\(\(16.12\)_\(33.7\)_\(16.12\)\)_\(\(12.33\)_\(7.33\)_\(12.33\)\)_transpose_a=0_transpose_b=0_secondaryInputType=PARAMETER_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPUconfig=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=brgemm_avx512.*)"), + // Issue: 140389 + std::regex(R"(.*FQLayerDQBias.smoke_CompareWithRefs.*)"), + std::regex(R"(.*smoke_matmulBrgemmInt8/MatmulBrgemmInt8Test.CompareWithRefs.*MatMul.*InputType=i8_OutputType=i8.*)"), + std::regex(R"(.*smoke_Snippets_MHAWOTransposeOnInputs_4D/MHAWOTransposeOnInputs.CompareWithRefImpl.*)"), + // Issue: 142448 + std::regex(R"(smoke_Snippets_BroadcastSelect_Dynamic.*)"), + // Issue: 141705 + std::regex(R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/.*trip_count=5_exec_cond=1_netType=i8.*)"), + std::regex(R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/Input0_IS=\[\?.1.\?\]_TS=\(10.1.10\)_\(1.1.1\)_\(1.1.1\)_\(5.1.3\)_Input1_IS=\[\?.\?.\?\]_TS=.*_Input2_IS=\[\?.1.\?\]_.*_types=0_0_1_trip_count_type=.*_trip_count=(1|5)_exec_cond=1_netType=i8.*)"), + std::regex(R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/Input0_IS=\[1..10.1.1..10\]_.*_Input1_IS=\[1..8.1.1..8\]_.*_Input2_IS=\[1..10.\?.1..10\]_TS=.*_types=0_0_1_trip_count_type=.*_trip_count=(1|5)_exec_cond=1_netType=i8.*)"), + // Issue: 142997 + std::regex(R"(.*smoke_TestsROIAlign.*)"), + // Issue: 136881 + std::regex(R"(.*smoke_CompareWithRefs_4D_BitwiseShift_overflow_i32_cast.*_eltwise_op_type=BitwiseLeft.*_model_type=.*(i16|u16).*)"), + // Issue: 163083 + // Issue: 163116 + std::regex(R"(.*RandomUniformLayerTestCPU.*OutPrc=bf16.*)"), + // Issue: 163117 + std::regex(R"(.*InterpolateCubic_Layout_Test.*)"), + // Issue: 163171 + std::regex(R"(.*CPUDetectionOutputDynamic3InLargeTensor.*)"), + // Issue: 163168 + std::regex(R"(.*UniqueLayerTestCPU.*)"), + // Issue: 163175 + std::regex(R"(.*GridSampleLayerTestCPU.*dataPrc=i8.*)"), + std::regex(R"(.*GridSampleLayerTestCPU.*dataPrc=bf16.*)"), + // Issue: 163177 + std::regex(R"(.*NmsRotatedOpTest.*ScoreThr=0\.4.*)"), + // Issue: 163222 + std::regex(R"(.*bf16.*LSTMSequenceCPUTest.*)"), + // Issue: 163223 + std::regex(R"(.*bf16.*AUGRUSequenceCPUTest.*)"), + // Issue: 163224 + std::regex(R"(.*bf16.*GRUSequenceCPUTest.*)"), + // Issue: 163227 + std::regex(R"(.*QuantizedModelsTests\.MaxPoolFQ.*)"), + std::regex(R"(.*QuantizedModelsTests\.MaxPoolQDQ.*)"), + // Issue: 163268 + std::regex(R"(.*QuantizedModelsTests\.ConvolutionQDQ.*)"), + std::regex(R"(.*QuantizedModelsTests\.ConvolutionFQ.*)"), + // Issue: 163230 + std::regex(R"(.*ProposalLayerTest.*)"), + // Issue: 163232 + std::regex(R"(.*FC_3D_BF16.*MatMulLayerCPUTest.*)"), + // Issue: 163242 + std::regex(R"(.*bf16.*RNNSequenceCPUTest.*)"), + std::regex(R"(.*WeightlessCacheAccuracy.TiWithLstmCell.*model_dtype=bf16.*)"), + // Issue: 163250 + std::regex(R"(.*OnnxModelWithExtensionFromDSO.*)"), + // Issue: 163273 + // todo: define correct area + std::regex(R"(.*Deconv_2D_Planar_FP16.*DeconvolutionLayerCPUTest.*)"), + // Issue: 163275 + std::regex(R"(.*NoReshapeAndReshapeDynamic.*CodegenGelu.*)"), + // Issue: 163351 + std::regex(R"(.*CoreThreadingTestsWithIter.*nightly_AsyncInfer_ShareInput.*)"), + // Sporadic failings with ASAN enabled + std::regex(R"(.*CoreThreadingTest.*)"), + std::regex(R"(.*smoke_BehaviorTest.*)"), + // This transformation is disabled on CPU + std::regex(R"(.*smoke_LPT.*MultiplyToGroupConvolutionTransformation.*)"), + // Disabled due to sporadic failures in CI, Issue: 157267 + std::regex(R"(.*smoke_CompareWithRefs_4D_Blocked_Blocked_Fusing\/EltwiseLayerCPUTest.CompareWithRefs\/IS=\(\[\]_\)_TS.*2.4.4.1.*eltwise_op_type=(Sum|Mod|SqDiff|Prod)_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f32_InType=dynamic_OutType=dynamic_trgDev=CPU_config_item=INFERENCE_PRECISION_HINT=f32_inFmts=nChw16c.nChw16c_outFmts=nChw16c_Fused=FakeQuantize\(PerChannel\).*)"), + std::regex(R"(.*smoke_CachingSupportCase_CPU\/CompileModelCacheTestBase.CompareWithRefImpl\/SplitConvConcatNestedInBranchNestedOut_i16_batch1_CPU.*)"), + std::regex(R"(.*smoke_CompareWithRefs_5D_MemOrder_Blocked_Blocked\/EltwiseLayerCPUTest.CompareWithRefs\/IS=\(\[\]_\[\]_\)_TS=.*2.17.6.5.1.*_.*1.17.1.1.4.*_\)_eltwise_op_type=Sub_secondary_input_type=CONSTANT_opType=VECTOR_model_type=bf16_InType=dynamic_OutType=dynamic_trgDev=CPU_config_item=INFERENCE_PRECISION_HINT=f32_inFmts=nCdhw16c.nCdhw16c_outFmts=nCdhw16c_enforceSnippets=0.*)"), + std::regex(R"(.*.*smoke_CompareWithRefs_4D_Fusing_Blocked_Blocked\/EltwiseLayerCPUTest.CompareWithRefs\/IS=\(\[\]_\)_TS=\(\(2.4.4.1\)_\)_eltwise_op_type=Mod_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f32_InType=dynamic_OutType=dynamic_trgDev=CPU_config_item=INFERENCE_PRECISION_HINT=f32_inFmts=nChw16c.nChw16c_outFmts=nChw16c_Fused=FakeQuantize\(PerChannel\).Sigmoid.FakeQuantize\(PerTensor\)_enforceSnippets=0.*)"), + std::regex(R"(.*smoke_Conv_1D_GEMM_FP32\/ConvolutionLayerCPUTest.CompareWithRefs\/IS=\[\]_TS=\(\(2.12.7\)_\)_K\(3\)_S\(2\)_PB\(1\)_PE\(0\)_D=\(1\)_O=6_AP=explicit_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPU_inFmts=ncw_outFmts=ncw_primitive=jit_gemm_Fused=Relu.*)"), + // fp32 floor for bf16 models: conversion issue + std::regex(R"(.*smoke.*ActivationLayerCPUTest.*CompareWithRefs/(Floor|Ceiling)_.*netPRC=bf16.*)"), - // Disabled due to dependency on tests execution order, issue: 178036 + // Disabled due to dependency on tests execution order, issue: 178036 #if !(defined(__APPLE__) && defined(__MACH__)) && (defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM)) - R"(.*smoke_ScaledAttn_CPU\/ScaledAttnLayerCPUTest.CompareWithRefs\/netPRC=f32_IS=\[\?.8.\?.64]_\[\?.8.\?.64]_\[\?.1.\?.\?\]_TS=\(1.8.100.64\)_\(1.8.1.64\)_\(2.8.10.64\)_\(2.8.10.64\)_\(1.8.100.64\)_\(1.8.1.64\)_\(2.8.10.64\)_\(2.8.10.64\)_\(1.1.1.100\)_\(1.1.1.1\)_\(2.1.1.10\)_\(2.1.10.10\)_is_causal=0_has_attn=0_has_scale=0_trgDev=CPU_primitive=ref_any.*)", + std::regex(R"(.*smoke_ScaledAttn_CPU\/ScaledAttnLayerCPUTest.CompareWithRefs\/netPRC=f32_IS=\[\?.8.\?.64]_\[\?.8.\?.64]_\[\?.1.\?.\?\]_TS=\(1.8.100.64\)_\(1.8.1.64\)_\(2.8.10.64\)_\(2.8.10.64\)_\(1.8.100.64\)_\(1.8.1.64\)_\(2.8.10.64\)_\(2.8.10.64\)_\(1.1.1.100\)_\(1.1.1.1\)_\(2.1.1.10\)_\(2.1.10.10\)_is_causal=0_has_attn=0_has_scale=0_trgDev=CPU_primitive=ref_any.*)"), #endif - }; - - // fp32 floor for bf16 models: conversion issue - retVector.emplace_back(R"(.*smoke.*ActivationLayerCPUTest.*CompareWithRefs/(Floor|Ceiling)_.*netPRC=bf16.*)"); - #if defined(OPENVINO_ARCH_X86) - retVector.emplace_back(R"(.*DetectionOutputLayerTest.*)"); - // WIP: plugin cannot be loaded for some reason - retVector.emplace_back(R"(.*IEClassBasicTestP.*)"); + std::regex(R"(.*DetectionOutputLayerTest.*)"), + // WIP: plugin cannot be loaded for some reason + std::regex(R"(.*IEClassBasicTestP.*)"), #elif defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) - { - retVector.emplace_back( - R"(smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.Inference/IS.*_eltwise_op_type=Div_secondary_input_type=PARAMETER_opType=VECTOR_model_type=i32_InType=dynamic_OutType=dynamic_trgDev=CPU.*)"); - // Issue: 123321 - retVector.emplace_back( - R"(.*smoke_RNNSequenceCommonZeroClip/RNNSequenceTest.Inference.*hidden_size=1.*relu.*direction=reverse.*)"); - // Ticket: 134601 - retVector.emplace_back(R"(.*smoke_GroupNormalization.*)"); - // by calc abs_threshold with expected value - retVector.emplace_back( - R"(.*smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*InterpolateMode=(linear|linear_onnx)_ShapeCalcMode=scales_CoordinateTransformMode=half_pixel.*PE=\(0.0.0.0\).*netType=f32.*)"); - retVector.emplace_back(R"(.*smoke_ConversionLayerTest/ConversionLayerTest.Inference/conversionOpType=Convert_.*_inputPRC=f16_targetPRC=(u8|i8).*)"); - retVector.emplace_back(R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference/.*TS=\{\((1.16.5.8|2.19.5.10)\)\}_ModelType=f32_.*_Ax=\(0.1.2.3\)_NormVariance=FALSE.*)"); - retVector.emplace_back(R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference/.*TS=\{\(2.19.5.10\)\}_ModelType=f32_.*_Ax=\(1\).*)"); - retVector.emplace_back(R"(.*smoke_LogSoftmax4D/LogSoftmaxLayerTest.Inference/.*TS=\{\(2.3.4.5\)\}_modelType=f32_axis=(-4|-3|-2|0|1|2).*)"); - retVector.emplace_back(R"(.*smoke_Interpolate_Basic/InterpolateLayerTest.Inference/.*InterpolateMode=cubic_ShapeCalcMode=sizes_CoordinateTransformMode=tf_half_pixel.*PB=\(0.0.0.0\)_PE=\(0.0.1.1\)_.*netType=f32.*)"); - // Ticket: 144845 - retVector.emplace_back(R"(.*LSTMCellFusion/LSTMCellFusionWithSplitWeights.SubgraphFusedToLSTMCell/(1|8|15))"); - // Ticket: 131541 - retVector.emplace_back(R"(.*smoke_MulticlassNmsLayerTest_dynamic2.*_outType=i32_.*)"); - // Ticket: 162434 - retVector.emplace_back(R"(smoke_LPT/MatMulTransformation.*)"); - // Ticket: 162260 - retVector.emplace_back(R"(smoke_Snippets_FQDecomposition.*netPRC=f32_D=CPU.*)"); - // Ticket: 166771 - retVector.emplace_back(R"(.*smoke_BroadcastEltwise/BroadcastEltwise.smoke_CompareWithRefs.*)"); - // Ticket: 168863 - retVector.emplace_back(R"(.*smoke_AvgPoolV14_CPU_4D/AvgPoolingV14LayerCPUTest.CompareWithRefs.*)"); - // Ticket: 168931 - retVector.emplace_back(R"(.*smoke_Reduce_OneAxis_dynamic_CPU/ReduceCPULayerTest.CompareWithRefs.*)"); - } - // invalid test: checks u8 precision for runtime graph, while it should be f32 - retVector.emplace_back(R"(smoke_NegativeQuantizedMatMulMultiplyFusion.*)"); - // int8 specific - retVector.emplace_back(R"(smoke_Quantized.*)"); - // Issue: 124309 - retVector.emplace_back(R"(.*InferRequestPreprocessConversionTest.*oLT=NHWC.*)"); - retVector.emplace_back(R"(.*smoke_NoReshape/OVCompiledModelGraphUniqueNodeNamesTest.CheckUniqueNodeNames.*)"); - retVector.emplace_back(R"(.*smoke_BehaviorTests/InferRequestPerfCountersTest.CheckOperationInPerfMap.*)"); - retVector.emplace_back( - R"(smoke_ExecGraph/ExecGraphRuntimePrecision.CheckRuntimePrecision/Function=FakeQuantizeBinaryConvolution.*)"); - // Issue: 124395 - retVector.emplace_back(R"(smoke_VariableStateBasic/InferRequestVariableStateTest.*)"); - retVector.emplace_back(R"(smoke_VariableState/OVInferRequestVariableStateTest.*)"); - // Issue: 141705 - retVector.emplace_back(R"(.*smoke_arm_Deconv_2D_Planar_FP16/DeconvolutionLayerCPUTest.*INFERENCE_PRECISION_HINT=f16.*)"); - retVector.emplace_back(R"(.*ConcatMultiQuerySDPTest.*u8.*)"); - retVector.emplace_back(R"(.*smoke_ConcatSDPTransposeByChannelTest.*)"); - // Issue: 168490 - retVector.emplace_back(R"(.*CPU/CoreThreadingTest.smoke_QueryModel.*)"); - retVector.emplace_back(R"(.*WeightlessCacheAccuracy.*)"); + std::regex(R"(smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.Inference/IS.*_eltwise_op_type=Div_secondary_input_type=PARAMETER_opType=VECTOR_model_type=i32_InType=dynamic_OutType=dynamic_trgDev=CPU.*)"), + // Issue: 123321 + std::regex(R"(.*smoke_RNNSequenceCommonZeroClip/RNNSequenceTest.Inference.*hidden_size=1.*relu.*direction=reverse.*)"), + // Ticket: 134601 + std::regex(R"(.*smoke_GroupNormalization.*)"), + // by calc abs_threshold with expected value + std::regex(R"(.*smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*InterpolateMode=(linear|linear_onnx)_ShapeCalcMode=scales_CoordinateTransformMode=half_pixel.*PE=\(0.0.0.0\).*netType=f32.*)"), + std::regex(R"(.*smoke_ConversionLayerTest/ConversionLayerTest.Inference/conversionOpType=Convert_.*_inputPRC=f16_targetPRC=(u8|i8).*)"), + std::regex(R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference/.*TS=\{\((1.16.5.8|2.19.5.10)\)\}_ModelType=f32_.*_Ax=\(0.1.2.3\)_NormVariance=FALSE.*)"), + std::regex(R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference/.*TS=\{\(2.19.5.10\)\}_ModelType=f32_.*_Ax=\(1\).*)"), + std::regex(R"(.*smoke_LogSoftmax4D/LogSoftmaxLayerTest.Inference/.*TS=\{\(2.3.4.5\)\}_modelType=f32_axis=(-4|-3|-2|0|1|2).*)"), + std::regex(R"(.*smoke_Interpolate_Basic/InterpolateLayerTest.Inference/.*InterpolateMode=cubic_ShapeCalcMode=sizes_CoordinateTransformMode=tf_half_pixel.*PB=\(0.0.0.0\)_PE=\(0.0.1.1\)_.*netType=f32.*)"), + // Ticket: 144845 + std::regex(R"(.*LSTMCellFusion/LSTMCellFusionWithSplitWeights.SubgraphFusedToLSTMCell/(1|8|15))"), + // Ticket: 131541 + std::regex(R"(.*smoke_MulticlassNmsLayerTest_dynamic2.*_outType=i32_.*)"), + // Ticket: 162434 + std::regex(R"(smoke_LPT/MatMulTransformation.*)"), + // Ticket: 162260 + std::regex(R"(smoke_Snippets_FQDecomposition.*netPRC=f32_D=CPU.*)"), + // Ticket: 166771 + std::regex(R"(.*smoke_BroadcastEltwise/BroadcastEltwise.smoke_CompareWithRefs.*)"), + // Ticket: 168863 + std::regex(R"(.*smoke_AvgPoolV14_CPU_4D/AvgPoolingV14LayerCPUTest.CompareWithRefs.*)"), + // Ticket: 168931 + std::regex(R"(.*smoke_Reduce_OneAxis_dynamic_CPU/ReduceCPULayerTest.CompareWithRefs.*)"), + // invalid test: checks u8 precision for runtime graph, while it should be f32 + std::regex(R"(smoke_NegativeQuantizedMatMulMultiplyFusion.*)"), + // int8 specific + std::regex(R"(smoke_Quantized.*)"), + // Issue: 124309 + std::regex(R"(.*InferRequestPreprocessConversionTest.*oLT=NHWC.*)"), + std::regex(R"(.*smoke_NoReshape/OVCompiledModelGraphUniqueNodeNamesTest.CheckUniqueNodeNames.*)"), + std::regex(R"(.*smoke_BehaviorTests/InferRequestPerfCountersTest.CheckOperationInPerfMap.*)"), + std::regex(R"(smoke_ExecGraph/ExecGraphRuntimePrecision.CheckRuntimePrecision/Function=FakeQuantizeBinaryConvolution.*)"), + // Issue: 124395 + std::regex(R"(smoke_VariableStateBasic/InferRequestVariableStateTest.*)"), + std::regex(R"(smoke_VariableState/OVInferRequestVariableStateTest.*)"), + // Issue: 141705 + std::regex(R"(.*smoke_arm_Deconv_2D_Planar_FP16/DeconvolutionLayerCPUTest.*INFERENCE_PRECISION_HINT=f16.*)"), + std::regex(R"(.*ConcatMultiQuerySDPTest.*u8.*)"), + std::regex(R"(.*smoke_ConcatSDPTransposeByChannelTest.*)"), + // Issue: 168490 + std::regex(R"(.*CPU/CoreThreadingTest.smoke_QueryModel.*)"), + std::regex(R"(.*WeightlessCacheAccuracy.*)"), #endif - #if defined(OPENVINO_ARCH_ARM) - // Issue: 144998 - retVector.emplace_back(R"(.*smoke_CachingSupportCase_CPU.*_(i8|u8).*)"); - retVector.emplace_back(R"(.*smoke_Hetero_CachingSupportCase.*_(i8|u8).*)"); - // TODO: rounding errors - retVector.emplace_back(R"(.*iv_secondaryInputType=PARAMETER_opType=VECTOR_NetType=i32.*)"); - // not supported - retVector.emplace_back(R"(.*fma.*EltwiseLayerCPUTest.*)"); - retVector.emplace_back(R"(.*int_jit.*EltwiseLayerCPUTest.*)"); - retVector.emplace_back(R"(.*dyn.*EltwiseChainTest.*)"); - retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=i8.*Conversion=i8.*)"); - retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=u8.*Conversion=i8.*)"); - retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=i16.*Conversion=i8.*)"); - retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=u16.*Conversion=i8.*)"); - retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=i32.*Conversion=i8.*)"); - // by calc abs_threshold with expected value - retVector.emplace_back(R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.*_eltwise_op_type=Div_.*_model_type=i32_.*)"); - // int8 / code-generation specific - retVector.emplace_back(R"(smoke_LPT.*)"); - retVector.emplace_back(R"(.*smoke_RoPETest.*)"); + // Issue: 144998 + std::regex(R"(.*smoke_CachingSupportCase_CPU.*_(i8|u8).*)"), + std::regex(R"(.*smoke_Hetero_CachingSupportCase.*_(i8|u8).*)"), + // TODO: rounding errors + std::regex(R"(.*iv_secondaryInputType=PARAMETER_opType=VECTOR_NetType=i32.*)"), + // not supported + std::regex(R"(.*fma.*EltwiseLayerCPUTest.*)"), + std::regex(R"(.*int_jit.*EltwiseLayerCPUTest.*)"), + std::regex(R"(.*dyn.*EltwiseChainTest.*)"), + std::regex(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=i8.*Conversion=i8.*)"), + std::regex(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=u8.*Conversion=i8.*)"), + std::regex(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=i16.*Conversion=i8.*)"), + std::regex(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=u16.*Conversion=i8.*)"), + std::regex(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*InPRC0=i32.*Conversion=i8.*)"), + // by calc abs_threshold with expected value + std::regex(R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.*_eltwise_op_type=Div_.*_model_type=i32_.*)"), + // int8 / code-generation specific + std::regex(R"(smoke_LPT.*)"), + std::regex(R"(.*smoke_RoPETest.*)"), + std::regex(R"(.*ActivationLayerTest.*Inference.*)"), + std::regex(R"(.*AddConvertToReorderTest.*smoke_TestAddReorder_CPU.*)"), + std::regex(R"(.*AddOutputsTest.*smoke_CheckOutputExist.*)"), + std::regex(R"(.*CompileModelCacheRuntimePropertiesTestBase.*CanLoadFromFileWithoutException.*)"), + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*2InputSubtract_f.*)"), + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*ConvPoolRelu_f.*)"), + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*MatMulBias_f.*)"), + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*SimpleFunctionRelu_f.*)"), + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl/MatMulBias_f32_batch1_CPU)"), + std::regex(R"(.*CompileModelLoadFromCacheTest.*CanGetCorrectLoadedFromCacheProperty.*)"), + std::regex(R"(.*CompileModelLoadFromFileTestBase.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)"), + std::regex(R"(.*CompileModelLoadFromFileTestBase.*CanLoadFromFileWithoutException.*)"), + std::regex(R"(.*CompileModelLoadFromMemoryTestBase.*CanLoadFromMemoryWithoutExecption.*)"), + std::regex(R"(.*CompileModelLoadFromMemoryTestBase.*CanLoadFromMemoryWithoutWeightsANdExecption.*)"), + std::regex(R"(.*CompileModelWithCacheEncryptionTest.*CanImportModelWithoutException.*)"), + std::regex(R"(.*ConcatMultiQuerySDPTest.*f16.*)"), + std::regex(R"(.*ConcatSDPTest.*f16.*)"), + std::regex(R"(.*FakeConvertLayerTest.*f16.*)"), + std::regex(R"(.*CoreThreadingTestsWithCacheEnabled.*smoke_compiled_model_cache_enabled.*)"), + std::regex(R"(.*CoreThreadingTestsWithIter.*smoke_CompileModel.*)"), + std::regex(R"(.*CustomOpConvertI64CPUTest.*CompareWithRefs.*)"), + std::regex(R"(.*EltwiseLayerCPUTest.*CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"), + std::regex(R"(.*EltwiseLayerTest.*Inference.*)"), + std::regex(R"(.*ExecGraphDuplicateInputsOutputsNames.*CheckOutputsMatch.*)"), + std::regex(R"(.*ExecGraphKeepAssignNode.*KeepAssignNode.*)"), + std::regex(R"(.*ExecGraphRemoveParameterNode.*RemoveParameterNode.*)"), + std::regex(R"(.*IndexAddTest.*CompareWithRefs.*)"), + std::regex(R"(.*InterpolateLayerCPUTest.*CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"), + std::regex(R"(.*MatMulLayerCPUTest.*CompareWithRefs.*)"), + std::regex(R"(.*MatmulWeightsDecompression.*CompareWithRefs.*)"), + std::regex(R"(.*MvnLayerCPUTest.*CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"), + std::regex(R"(.*NonInputInPlaceTest.*CompareWithRefs.*)"), + std::regex(R"(.*OVClassCompiledModelGetPropertyTest_EXEC_DEVICES.*CanGetExecutionDeviceInfo.*)"), + std::regex(R"(.*OVClassConfigTestCPU.*smoke_.*)"), + std::regex(R"(.*OVClassConfigTestCPU.*smoke_CpuExecNetwork.*)"), + std::regex(R"(.*OVInferenceChaining.*StaticOutputToDynamicInput.*)"), + std::regex(R"(.*OVInferenceChaining.*StaticOutputToStaticInput.*)"), + std::regex(R"(.*OVInferenceChainingStatic.*StaticOutputToStaticInput.*)"), + std::regex(R"(.*ReduceCPULayerTest.*CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"), + // Issue: 164799 + std::regex(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*)"), + // Issue 167685 + std::regex(R"(.*importExportModelWithTypeRelaxedExt.*)"), #endif - #if defined(OPENVINO_ARCH_ARM64) - // Issue: 149216. For low precision model from original framework, Snippets PropagatePrecision should insert ConvertTruncation instead - // of ConvertSaturation when converting larger integer to smaller integer to align with c++ standard and ngraph reference. - retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*Op0=Prod.*Conversion=i8.*)"); + // Issue: 149216. For low precision model from original framework, Snippets PropagatePrecision should insert ConvertTruncation instead + // of ConvertSaturation when converting larger integer to smaller integer to align with c++ standard and ngraph reference. + std::regex(R"(.*smoke_EltwiseChain_MergeConvert_int8/.*Op0=Prod.*Conversion=i8.*)"), #endif - #if defined(OPENVINO_ARCH_RISCV64) - // object is not initialized - retVector.emplace_back(R"(.*StaticLoopDynamicSubgraphCPUTest.smoke_StaticLoopWithDynSubgraph.*)"); - // crash with 'double free or corruption (!prev)' - retVector.emplace_back(R"(.*smoke_InterpolateBilinearPillow_Layout_Test.*)"); - retVector.emplace_back(R"(.*smoke_InterpolateBicubicPillow_Layout_Test.*)"); - // unsupported node type 'CausalMaskPreprocess' - retVector.emplace_back(R"(CausalMaskPreprocessCausalMaskPreprocess.smoke_CompareWithRefs)"); - // fused op FakeQuantize has not been found - retVector.emplace_back(R"(ConvAndFQWithSharedConstants.smoke_ConvAndFQWithSharedConstants_CPU)"); - // subgraphs code-generator is not supported on non-x64 platforms - retVector.emplace_back(R"(SubgraphSnippetSerializationTest.smoke_SerializeSubgraph(WithScalarConst)?)"); - retVector.emplace_back(R"(.*SubgraphWithBlockedFormat.*)"); - // unsupported layout - retVector.emplace_back(R"(GatherAddAvgpool.smoke_CompareWithRefs)"); - retVector.emplace_back(R"(smoke_StaticAdaPoolAvg(4|5)DLayoutTest/AdaPoolLayerCPUTest.*_outFmts=nd?hwc_1)"); - retVector.emplace_back(R"(.*smoke_CompareWithRefs_Mvn(4|5)D(_Static)?/MvnLayerCPUTest.CompareWithRefs.*inFmts=nd?hwc.*)"); - retVector.emplace_back(R"(.*smoke_TopK(_int32|_bubble_BLK_on_channel_horiz)?(_dynamic)?/TopKLayerCPUTest.CompareWithRefs.*inFmts=(nhwc|nChw8c|nChw16c).x.*)"); - retVector.emplace_back(R"(.*smoke_(Group)?Convolution(2|3)D/ConvConcatSubgraphTest.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_FakeQuantizeCache_(4|5)D/FakeQuantizeCacheTest.CompareWithRefs.*inFmts=(nhwc|nChw8c|ndhwc|nCdhw8c).*)"); - retVector.emplace_back(R"(.*ReduceCPULayerTest.CompareWithRefs.*inFmts=nhwc.*)"); - // only infer_precision=f32 is supported on riscv64 platforms - retVector.emplace_back(R"(.*smoke_CompareWithRefs_(4|5)D.*EltwiseLayerCPUTest.CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"); - retVector.emplace_back(R"(.*smoke_CompareWithRefs_Mvn[12345]D.*/MvnLayerCPUTest.CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"); - retVector.emplace_back(R"(.*ReduceCPULayerTest.CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"); - retVector.emplace_back(R"(.*RemoveUselessFP16ConvertCPUTest.*INFERENCE_PRECISION_HINT=f16.*)"); - // fused op Add has not been found - retVector.emplace_back(R"(.*smoke_CompareWithRefs_fma_(4|5)D/EltwiseLayerCPUTest.CompareWithRefs.*)"); - // primType is unexpected - retVector.emplace_back(R"(.*smoke_Param(Const)?/RandomUniformLayerTestCPU.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_Reduce_Int32_CPU/ReduceCPULayerTest.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_(static|dynamic)Shapes4D(C(16|32))?(_Transpose|_PermutePerChannels)/TransposeLayerCPUTest.CompareWithRefs.*netPRC=f32.*INFERENCE_PRECISION_HINT=f16.*)"); - retVector.emplace_back(R"(.*smoke_(static|dynamic)_1D/GatherLayerTestCPU.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_RDFT_CPU_(1|2|4)D/RDFTTestCPU.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_CompareWithRefs(Numpy|None)_dynamic/SelectLayerCPUTest.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_Check/ConvPoolActivTest.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_Conv_Sum_(1x1_)?Broadcast(_FP32|_Strided|_INT8|_Several_Consumers|_StaticShape)?/Conv(1x1)?Sum(InPlace(Test(Int8|SeveralConsumers)?|Strided)?|(Unsupported)?BroadcastTest).CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_ReshapeFc/ReshapeFcCPUTest.CompareWithRefs.*)"); - // dimensions of shapes are mismatched - retVector.emplace_back(R"(.*CPUDetectionOutputDynamic3InLargeTensor/DetectionOutputLayerCPUTest.CompareWithRefs.*varEnc=0.*)"); - // cannot get dims for non static shape - retVector.emplace_back(R"(.*nightly_/NmsRotatedOpTest.CompareWithRefs/IS=\(\[\]_\[\]\)_TS=\{\(3.11.5\)_\(3.15.11\)\}__BoxPrc=f16_MaxPrc=i64_ThrPrc=f16_OutPrc=i64_MaxBox=10.*ConstIn=\{True,True,True,True,True\}_Device=CPU.*)"); - retVector.emplace_back(R"(nightly_/NmsRotatedOpTest.CompareWithRefs/IS=\(\[\]_\[\]\)_TS=\{\(15.29.5\)_\(15.31.29\)\}__BoxPrc=f16_MaxPrc=i64_ThrPrc=f16_OutPrc=i64_MaxBox=10_IouThr=0.5_ScoreThr=0.4_SortDesc=False_Clockwise=True_ConstIn=\{True,True,True,True,True\}_Device=CPU)"); - retVector.emplace_back(R"(nightly_/NmsRotatedOpTest.CompareWithRefs/IS=\(\[\]_\[\]\)_TS=\{\(21.64.5\)_\(21.32.64\)\}__BoxPrc=f16_MaxPrc=i64_ThrPrc=f16_OutPrc=i64_MaxBox=10_IouThr=0.5_ScoreThr=0.4_SortDesc=False_Clockwise=True_ConstIn=\{True,True,True,True,True\}_Device=CPU)"); - retVector.emplace_back(R"(nightly_/NmsRotatedOpTest.CompareWithRefs/IS=\(\[\?.\?.5\]_\[\?.\?.\?\]\)_TS=\{\(7.35.5\)_\(7.30.35\)\}_\{\(7.35.5\)_\(7.100.35\)\}_\{\(7.35.5\)_\(7.133.35\)\}__BoxPrc=f16_MaxPrc=i64_ThrPrc=f16_OutPrc=i64_MaxBox=10_IouThr=0.5_ScoreThr=0.4_SortDesc=False_Clockwise=True_ConstIn=\{True,True,True,True,True\}_Device=CPU)"); - // Accuracy problem - retVector.emplace_back(R"(.*InterpolateCubic_Layout_Test.*)"); - retVector.emplace_back(R"(.*nightly_(static|dynamic)/UniqueLayerTestCPU.*dataPrc=i8.*)"); - retVector.emplace_back(R"(.*smoke_Interpolate_Basic/InterpolateLayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.4.6.6\)\}_TS=\(1.4.8.8\)_InterpolateMode=cubic_ShapeCalcMode=sizes_CoordinateTransformMode=tf_half_pixel_for_nn_NearestMode=round_prefer_floor_cube_coef=-0.75_Antialias=0_PB=\(0.0.0.0\)_PE=\(0.0.1.1\)_Axes=\(0.1.2.3\)_Scales=\(1.1.1.33333.1.33333\)_netType=f32_trgDev=CPU.*)"); - retVector.emplace_back(R"(.*smoke_MaxPool_ExplicitPad_CeilRounding.*K\(3.3\)_S\(\d.2\).*PE\(0.2\).*)"); - // Incorrect number of input or output memory formats - retVector.emplace_back(R"(.*smoke_(static|dynamic)/RNNCellCPUTest.CompareWithRefs.*)"); - // crash with code: 11 - retVector.emplace_back(R"(.*smoke_static/ConvertFqRnnToQuantizedRnn.CompareWithRefs.*)"); - // Fused op Elu has not been found - retVector.emplace_back(R"(.*smoke_Check/AlignMatMulInputRanksTest.CompareWithRefs/IS_A=\[\d.+\]_IS_B=\[\d.+\]__Fused=Elu.*)"); - // Unsupported node type 'ScaledDotProductAttention' - retVector.emplace_back(R"(.*smoke_Concat(MultiQuery)?SDP(Transpose)?Test(SetState)?/.*)"); - // Unexpected transpose count - retVector.emplace_back(R"(.*smoke_Basic/FuseTransposeAndReorderTest3.CompareWithRefs.*)"); - // Unsupported node type 'Interaction' - retVector.emplace_back(R"(.*smoke_Interaction/IntertactionCPUTest.CompareWithRefs.*)"); - // Unexpected count of the `Reorder` nodes - retVector.emplace_back(R"(.*smoke_MergeTransposeReorder_(static|dynamic)/MergeTransposeReorderCPUTest.CompareWithRefs.*)"); - // Unexpected value of 'num_ops' - retVector.emplace_back(R"(.*smoke_RoPETest.*)"); - // Unsupported node type 'ScaledDotProductAttention' - retVector.emplace_back(R"(.*smoke_SDPAGroupBeamSearchTest/SDPAGroupBeamSearchTest.CompareWithRefs.*)"); - // Crash with code: 6 - retVector.emplace_back(R"(^smoke_VirtualPlugin_BehaviorTests/OVHoldersTest.Orders/target_device=HETERO.CPU$)"); - // Crash with code: 14, but can be launched with `--gtest_filter` - retVector.emplace_back(R"(.*CPU/CoreThreadingTest.smoke_QueryModel.*)"); - // Target Static Shape is empty - retVector.emplace_back(R"(.*proposal_params/.*)"); - // Quantized models unsupported - retVector.emplace_back(R"(.*Quantized.*)"); - - if (!ov::intel_cpu::riscv64::mayiuse(ov::intel_cpu::riscv64::gv)) { - // Integer division is supported only by JIT Executor which is available on platforms with GV instruction sets. - // In other cases there might be accuracy problems. - retVector.emplace_back(R"(.*smoke_EltwiseChain/EltwiseChainTest.CompareWithRefs.*InPRC3=i32_Op0=Div_Op1.*)"); - retVector.emplace_back(R"(.*smoke_CompareWithRefs_static.*eltwise_op_type=Div.*model_type=i32.*)"); - } + // object is not initialized + std::regex(R"(.*StaticLoopDynamicSubgraphCPUTest.smoke_StaticLoopWithDynSubgraph.*)"), + // crash with 'double free or corruption (!prev)' + std::regex(R"(.*smoke_InterpolateBilinearPillow_Layout_Test.*)"), + std::regex(R"(.*smoke_InterpolateBicubicPillow_Layout_Test.*)"), + // unsupported node type 'CausalMaskPreprocess' + std::regex(R"(CausalMaskPreprocessCausalMaskPreprocess.smoke_CompareWithRefs)"), + // fused op FakeQuantize has not been found + std::regex(R"(ConvAndFQWithSharedConstants.smoke_ConvAndFQWithSharedConstants_CPU)"), + // subgraphs code-generator is not supported on non-x64 platforms + std::regex(R"(SubgraphSnippetSerializationTest.smoke_SerializeSubgraph(WithScalarConst)?)"), + std::regex(R"(.*SubgraphWithBlockedFormat.*)"), + // unsupported layout + std::regex(R"(GatherAddAvgpool.smoke_CompareWithRefs)"), + std::regex(R"(smoke_StaticAdaPoolAvg(4|5)DLayoutTest/AdaPoolLayerCPUTest.*_outFmts=nd?hwc_1)"), + std::regex(R"(.*smoke_CompareWithRefs_Mvn(4|5)D(_Static)?/MvnLayerCPUTest.CompareWithRefs.*inFmts=nd?hwc.*)"), + std::regex(R"(.*smoke_TopK(_int32|_bubble_BLK_on_channel_horiz)?(_dynamic)?/TopKLayerCPUTest.CompareWithRefs.*inFmts=(nhwc|nChw8c|nChw16c).x.*)"), + std::regex(R"(.*smoke_(Group)?Convolution(2|3)D/ConvConcatSubgraphTest.CompareWithRefs.*)"), + std::regex(R"(.*smoke_FakeQuantizeCache_(4|5)D/FakeQuantizeCacheTest.CompareWithRefs.*inFmts=(nhwc|nChw8c|ndhwc|nCdhw8c).*)"), + std::regex(R"(.*ReduceCPULayerTest.CompareWithRefs.*inFmts=nhwc.*)"), + // only infer_precision=f32 is supported on riscv64 platforms + std::regex(R"(.*smoke_CompareWithRefs_(4|5)D.*EltwiseLayerCPUTest.CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"), + std::regex(R"(.*smoke_CompareWithRefs_Mvn[12345]D.*/MvnLayerCPUTest.CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"), + std::regex(R"(.*ReduceCPULayerTest.CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"), + std::regex(R"(.*RemoveUselessFP16ConvertCPUTest.*INFERENCE_PRECISION_HINT=f16.*)"), + // fused op Add has not been found + std::regex(R"(.*smoke_CompareWithRefs_fma_(4|5)D/EltwiseLayerCPUTest.CompareWithRefs.*)"), + // primType is unexpected + std::regex(R"(.*smoke_Param(Const)?/RandomUniformLayerTestCPU.CompareWithRefs.*)"), + std::regex(R"(.*smoke_Reduce_Int32_CPU/ReduceCPULayerTest.CompareWithRefs.*)"), + std::regex(R"(.*smoke_(static|dynamic)Shapes4D(C(16|32))?(_Transpose|_PermutePerChannels)/TransposeLayerCPUTest.CompareWithRefs.*netPRC=f32.*INFERENCE_PRECISION_HINT=f16.*)"), + std::regex(R"(.*smoke_(static|dynamic)_1D/GatherLayerTestCPU.CompareWithRefs.*)"), + std::regex(R"(.*smoke_RDFT_CPU_(1|2|4)D/RDFTTestCPU.CompareWithRefs.*)"), + std::regex(R"(.*smoke_CompareWithRefs(Numpy|None)_dynamic/SelectLayerCPUTest.CompareWithRefs.*)"), + std::regex(R"(.*smoke_Check/ConvPoolActivTest.CompareWithRefs.*)"), + std::regex(R"(.*smoke_Conv_Sum_(1x1_)?Broadcast(_FP32|_Strided|_INT8|_Several_Consumers|_StaticShape)?/Conv(1x1)?Sum(InPlace(Test(Int8|SeveralConsumers)?|Strided)?|(Unsupported)?BroadcastTest).CompareWithRefs.*)"), + std::regex(R"(.*smoke_ReshapeFc/ReshapeFcCPUTest.CompareWithRefs.*)"), + // dimensions of shapes are mismatched + std::regex(R"(.*CPUDetectionOutputDynamic3InLargeTensor/DetectionOutputLayerCPUTest.CompareWithRefs.*varEnc=0.*)"), + // cannot get dims for non static shape + std::regex(R"(.*nightly_/NmsRotatedOpTest.CompareWithRefs/IS=\(\[\]_\[\]\)_TS=\{\(3.11.5\)_\(3.15.11\)\}__BoxPrc=f16_MaxPrc=i64_ThrPrc=f16_OutPrc=i64_MaxBox=10.*ConstIn=\{True,True,True,True,True\}_Device=CPU.*)"), + std::regex(R"(nightly_/NmsRotatedOpTest.CompareWithRefs/IS=\(\[\]_\[\]\)_TS=\{\(15.29.5\)_\(15.31.29\)\}__BoxPrc=f16_MaxPrc=i64_ThrPrc=f16_OutPrc=i64_MaxBox=10_IouThr=0.5_ScoreThr=0.4_SortDesc=False_Clockwise=True_ConstIn=\{True,True,True,True,True\}_Device=CPU)"), + std::regex(R"(nightly_/NmsRotatedOpTest.CompareWithRefs/IS=\(\[\]_\[\]\)_TS=\{\(21.64.5\)_\(21.32.64\)\}__BoxPrc=f16_MaxPrc=i64_ThrPrc=f16_OutPrc=i64_MaxBox=10_IouThr=0.5_ScoreThr=0.4_SortDesc=False_Clockwise=True_ConstIn=\{True,True,True,True,True\}_Device=CPU)"), + std::regex(R"(nightly_/NmsRotatedOpTest.CompareWithRefs/IS=\(\[\?.\?.5\]_\[\?.\?.\?\]\)_TS=\{\(7.35.5\)_\(7.30.35\)\}_\{\(7.35.5\)_\(7.100.35\)\}_\{\(7.35.5\)_\(7.133.35\)\}__BoxPrc=f16_MaxPrc=i64_ThrPrc=f16_OutPrc=i64_MaxBox=10_IouThr=0.5_ScoreThr=0.4_SortDesc=False_Clockwise=True_ConstIn=\{True,True,True,True,True\}_Device=CPU)"), + // Accuracy problem + std::regex(R"(.*InterpolateCubic_Layout_Test.*)"), + std::regex(R"(.*nightly_(static|dynamic)/UniqueLayerTestCPU.*dataPrc=i8.*)"), + std::regex(R"(.*smoke_Interpolate_Basic/InterpolateLayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.4.6.6\)\}_TS=\(1.4.8.8\)_InterpolateMode=cubic_ShapeCalcMode=sizes_CoordinateTransformMode=tf_half_pixel_for_nn_NearestMode=round_prefer_floor_cube_coef=-0.75_Antialias=0_PB=\(0.0.0.0\)_PE=\(0.0.1.1\)_Axes=\(0.1.2.3\)_Scales=\(1.1.1.33333.1.33333\)_netType=f32_trgDev=CPU.*)"), + std::regex(R"(.*smoke_MaxPool_ExplicitPad_CeilRounding.*K\(3.3\)_S\(\d.2\).*PE\(0.2\).*)"), + // Incorrect number of input or output memory formats + std::regex(R"(.*smoke_(static|dynamic)/RNNCellCPUTest.CompareWithRefs.*)"), + // crash with code: 11 + std::regex(R"(.*smoke_static/ConvertFqRnnToQuantizedRnn.CompareWithRefs.*)"), + // Fused op Elu has not been found + std::regex(R"(.*smoke_Check/AlignMatMulInputRanksTest.CompareWithRefs/IS_A=\[\d.+\]_IS_B=\[\d.+\]__Fused=Elu.*)"), + // Unsupported node type 'ScaledDotProductAttention' + std::regex(R"(.*smoke_Concat(MultiQuery)?SDP(Transpose)?Test(SetState)?/.*)"), + // Unexpected transpose count + std::regex(R"(.*smoke_Basic/FuseTransposeAndReorderTest3.CompareWithRefs.*)"), + // Unsupported node type 'Interaction' + std::regex(R"(.*smoke_Interaction/IntertactionCPUTest.CompareWithRefs.*)"), + // Unexpected count of the `Reorder` nodes + std::regex(R"(.*smoke_MergeTransposeReorder_(static|dynamic)/MergeTransposeReorderCPUTest.CompareWithRefs.*)"), + // Unexpected value of 'num_ops' + std::regex(R"(.*smoke_RoPETest.*)"), + // Unsupported node type 'ScaledDotProductAttention' + std::regex(R"(.*smoke_SDPAGroupBeamSearchTest/SDPAGroupBeamSearchTest.CompareWithRefs.*)"), + // Crash with code: 6 + std::regex(R"(^smoke_VirtualPlugin_BehaviorTests/OVHoldersTest.Orders/target_device=HETERO.CPU$)"), + // Crash with code: 14, but can be launched with `--gtest_filter` + std::regex(R"(.*CPU/CoreThreadingTest.smoke_QueryModel.*)"), + // Target Static Shape is empty + std::regex(R"(.*proposal_params/.*)"), + // Quantized models unsupported + std::regex(R"(.*Quantized.*)"), + std::regex((R"(smoke_Snippets.*\[.*\?.*\].*)"), + std::regex((R"(smoke_Snippets(?!_(Eltwise|ThreeInputsEltwise)(/|_)).*)"), + std::regex((R"(.*_enforceSnippets=1.*)"), + std::regex((R"(smoke_Snippets_Eltwise.*/Add.*)"), + std::regex((R"(smoke_Snippets_Eltwise/TwoInputsAndOutputs.*)"), + std::regex((R"(smoke_Snippets_Eltwise_TwoResults.*)"), + std::regex((R"(smoke_Snippets_ThreeInputsEltwise.*)"), #endif - #if !defined(OPENVINO_ARCH_X86_64) - // very time-consuming test - retVector.emplace_back(R"(.*OVInferConsistencyTest.*)"); - // TODO: generate new 'expected' runtime graph for non-x64 CPU - retVector.emplace_back(R"(smoke_serialization/ExecGraphSerializationTest.ExecutionGraph.*)"); - retVector.emplace_back( - R"(smoke_ExecGraph/ExecGraphRuntimePrecision.CheckRuntimePrecision/Function=(EltwiseWithTwoDynamicInputs|FakeQuantizeRelu).*)"); - // Issue 108803: bug in CPU scalar implementation - retVector.emplace_back(R"(smoke_TestsDFT_(1|2|3|4)d/DFTLayerTest.CompareWithRefs.*)"); - retVector.emplace_back(R"(smoke_TestsDFT_(1|2|3|4)d/DFTLayerTest.Inference.*)"); - // Issue 88764, 91647, 108802: accuracy issue - retVector.emplace_back(R"(MultipleLSTMCellTest/MultipleLSTMCellTest.CompareWithRefs.*)"); - // Compressed weights are not supported - retVector.emplace_back(R"(smoke_MatMulSharedCompressedWeights.*)"); - retVector.emplace_back(R"(smoke_Model_Distribution_MatMulSharedCompressedWeights.*)"); - retVector.emplace_back(R"(smoke_MatmulAndGatherSharedWeightsDecompression.*)"); - // Issue: 170863 - retVector.emplace_back(R"(smoke_Model_Distribution_MatMul_NoTranspose.*)"); + // very time-consuming test + std::regex(R"(.*OVInferConsistencyTest.*)"), + // TODO: generate new 'expected' runtime graph for non-x64 CPU + std::regex(R"(smoke_serialization/ExecGraphSerializationTest.ExecutionGraph.*)"), + + std::regex(R"(smoke_ExecGraph/ExecGraphRuntimePrecision.CheckRuntimePrecision/Function=(EltwiseWithTwoDynamicInputs|FakeQuantizeRelu).*)"), + // Issue 108803: bug in CPU scalar implementation + std::regex(R"(smoke_TestsDFT_(1|2|3|4)d/DFTLayerTest.CompareWithRefs.*)"), + std::regex(R"(smoke_TestsDFT_(1|2|3|4)d/DFTLayerTest.Inference.*)"), + // Issue 88764, 91647, 108802: accuracy issue + std::regex(R"(MultipleLSTMCellTest/MultipleLSTMCellTest.CompareWithRefs.*)"), + // Compressed weights are not supported + std::regex(R"(smoke_MatMulSharedCompressedWeights.*)"), + std::regex(R"(smoke_Model_Distribution_MatMulSharedCompressedWeights.*)"), + std::regex(R"(smoke_MatmulAndGatherSharedWeightsDecompression.*)"), + // Issue: 170863 + std::regex(R"(smoke_Model_Distribution_MatMul_NoTranspose.*)"), #endif #if !defined(OPENVINO_ARCH_ARM64) && !defined(OPENVINO_ARCH_X86_64) && !defined(OPENVINO_ARCH_RISCV64) - // smoke_Snippets test cases are on platforms except x64, ARM64 and RISCV64 - retVector.emplace_back(R"(smoke_Snippets.*)"); + // smoke_Snippets test cases are on platforms except x64, ARM64 and RISCV64 + std::regex(R"(smoke_Snippets.*)"), #endif #if defined(OPENVINO_ARCH_ARM64) - // Tests to be enabled on ARM64 - retVector.emplace_back(R"(smoke_Snippets_ConvAdd/ConvEltwise.CompareWithRefImpl.*)"); - retVector.emplace_back(R"(smoke_Snippets_GroupNormalization.*)"); - retVector.emplace_back(R"(smoke_Snippets_PrecisionPropagation_Convertion.*)"); -#endif -#if defined(OPENVINO_ARCH_RISCV64) - retVector.emplace_back(R"(smoke_Snippets.*\[.*\?.*\].*)"); - retVector.emplace_back(R"(smoke_Snippets(?!_(Eltwise|ThreeInputsEltwise)(/|_)).*)"); - retVector.emplace_back(R"(.*_enforceSnippets=1.*)"); - retVector.emplace_back(R"(smoke_Snippets_Eltwise.*/Add.*)"); - retVector.emplace_back(R"(smoke_Snippets_Eltwise/TwoInputsAndOutputs.*)"); - retVector.emplace_back(R"(smoke_Snippets_Eltwise_TwoResults.*)"); - retVector.emplace_back(R"(smoke_Snippets_ThreeInputsEltwise.*)"); + // Tests to be enabled on ARM64 + std::regex(R"(smoke_Snippets_ConvAdd/ConvEltwise.CompareWithRefImpl.*)"), + std::regex(R"(smoke_Snippets_GroupNormalization.*)"), + std::regex(R"(smoke_Snippets_PrecisionPropagation_Convertion.*)"), #endif + #if defined(_WIN32) - retVector.emplace_back(R"(.*smoke_QuantizedConvolutionBatchNormTransposeOnWeights/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_quantize_type=fake_quantize_intervals_type=per_(tensor|channel)_transpose_on_weights=true_device=CPU.*)"); - retVector.emplace_back(R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1,1,1,1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -12.7 \}_output_high\{ 12.8 \}_precision=\}_fq_on_weights=\{_255_\[1,1,1,1\]_\{ -12.7 \}_\{ 12.7 \}\}.*)"); - retVector.emplace_back(R"(.*smoke_LPT/FuseDequantizeToFakeQuantizeTransformation.CompareWithRefImpl/CPU_f32_0_dynamic_\[\]_f32__\{\}_\{\}__\{ 0.01, 0.1, 1 \}_f32_\[1,3\]_1_1_.*)"); - retVector.emplace_back(R"(.*smoke_QuantizedConvolutionBatchNorm/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_quantize_.*)"); - retVector.emplace_back(R"(.*smoke_QuantizedConvolutionBatchNorm/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_backprop_quantize_type=(quantize_dequantize_intervals|compressed_weights_intervals).*)"); - retVector.emplace_back(R"(.*smoke_LPT/MatMulTransformation.CompareWithRefImpl/f32_CPU_\[(1|8|1,1,1),4,12,2\]_level=256_shape=\[\]_input_low=\{ (0|-12.8) \}_input_high=\{ (25.5|12.7) \}_output_low=\{ (0|-12.8) \}_output_high\{ (25.5|12.7) \}_.*)"); - retVector.emplace_back(R"(.*smoke_LPT/MatMulTransformation.CompareWithRefImpl/f32_CPU_\[(1|8|1,1,1),4,12,2\]_level=256_shape=\[\]_input_low=\{ (0|-12.8) \}_input_high=\{ (25.5|12.7) \}_output_low=\{ (0|-12.8) \}_output_high\{ (25.5|12.7) \}_.*)"); - retVector.emplace_back( - R"(.*smoke_MatMulCompressedWeights_corner_cases_basic/MatmulWeightsDecompression.CompareWithRefs/data_shape=\[\?.\?.\?\]_\(\[1,1,4096\]\)_weights_shape=\[4096,4096\]_group_size=128_weights_precision=nf4_decompression_precision=f16_scale_precision=dynamic_transpose_weights=0_decompression_subtract=full_reshape_on_decompression=1_config=\(\).*)"); - retVector.emplace_back(R"(.*smoke_RDFT_CPU_1D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(126\)\)_constAxes=true_axes=\(\(0\)\)_isInverse=false.*)"); - retVector.emplace_back(R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(16.38\)\)_constAxes=true_axes=\(\(0.1\)\)_isInverse=false.*)"); -#endif - if (!ov::intel_cpu::hasHardwareSupport(ov::element::bf16)) { - // on platforms which do not support bfloat16, we are disabling bf16 tests since there are no bf16 primitives, - // tests are useless on such platforms - retVector.emplace_back(R"(.*(BF|bf)16.*)"); - retVector.emplace_back(R"(.*bfloat16.*)"); - } -#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) - if (!ov::with_cpu_x86_avx2()) { - // MatMul in Snippets uses BRGEMM that is supported only on AVX2 (and newer) platforms - // Disabled Snippets MHA tests as well because MHA pattern contains MatMul - retVector.emplace_back(R"(.*Snippets.*MHA.*)"); - retVector.emplace_back(R"(.*Snippets.*(MatMul|Matmul).*)"); - } - if (!ov::intel_cpu::hasHardwareSupport(ov::element::f16)) { - // Skip fp16 tests for paltforms that don't support fp16 precision - retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); - retVector.emplace_back(R"(.*ConcatMultiQuerySDPTest.*f16.*)"); - retVector.emplace_back(R"(.*ConcatSDPTest.*f16.*)"); - retVector.emplace_back(R"(.*ConvertCPULayerTest.*f16.*)"); - } -#elif defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) - if (!ov::intel_cpu::hasIntDotProductSupport()) { - retVector.emplace_back(R"(.*smoke_MatMulCompressedWeights_Kleidiai.*)"); - } - if (!ov::intel_cpu::hasHardwareSupport(ov::element::f16)) { - // Skip fp16 tests for paltforms that don't support fp16 precision - retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); - retVector.emplace_back(R"(.*Prc=f16.*)"); - retVector.emplace_back(R"(.*ConcatMultiQuerySDPTest.*f16.*HasShapeOf=1.*)"); - retVector.emplace_back(R"(.*ConvertCPULayerTest.*f16.*)"); - } + std::regex(R"(.*smoke_QuantizedConvolutionBatchNormTransposeOnWeights/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_quantize_type=fake_quantize_intervals_type=per_(tensor|channel)_transpose_on_weights=true_device=CPU.*)"), + std::regex(R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1,1,1,1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -12.7 \}_output_high\{ 12.8 \}_precision=\}_fq_on_weights=\{_255_\[1,1,1,1\]_\{ -12.7 \}_\{ 12.7 \}\}.*)"), + std::regex(R"(.*smoke_LPT/FuseDequantizeToFakeQuantizeTransformation.CompareWithRefImpl/CPU_f32_0_dynamic_\[\]_f32__\{\}_\{\}__\{ 0.01, 0.1, 1 \}_f32_\[1,3\]_1_1_.*)"), + std::regex(R"(.*smoke_QuantizedConvolutionBatchNorm/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_quantize_.*)"), + std::regex(R"(.*smoke_QuantizedConvolutionBatchNorm/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_backprop_quantize_type=(quantize_dequantize_intervals|compressed_weights_intervals).*)"), + std::regex(R"(.*smoke_LPT/MatMulTransformation.CompareWithRefImpl/f32_CPU_\[(1|8|1,1,1),4,12,2\]_level=256_shape=\[\]_input_low=\{ (0|-12.8) \}_input_high=\{ (25.5|12.7) \}_output_low=\{ (0|-12.8) \}_output_high\{ (25.5|12.7) \}_.*)"), + std::regex(R"(.*smoke_LPT/MatMulTransformation.CompareWithRefImpl/f32_CPU_\[(1|8|1,1,1),4,12,2\]_level=256_shape=\[\]_input_low=\{ (0|-12.8) \}_input_high=\{ (25.5|12.7) \}_output_low=\{ (0|-12.8) \}_output_high\{ (25.5|12.7) \}_.*)"), + std::regex(R"(.*smoke_MatMulCompressedWeights_corner_cases_basic/MatmulWeightsDecompression.CompareWithRefs/data_shape=\[\?.\?.\?\]_\(\[1,1,4096\]\)_weights_shape=\[4096,4096\]_group_size=128_weights_precision=nf4_decompression_precision=f16_scale_precision=dynamic_transpose_weights=0_decompression_subtract=full_reshape_on_decompression=1_config=\(\).*)"), + std::regex(R"(.*smoke_RDFT_CPU_1D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(126\)\)_constAxes=true_axes=\(\(0\)\)_isInverse=false.*)"), + std::regex(R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(16.38\)\)_constAxes=true_axes=\(\(0.1\)\)_isInverse=false.*)"), #endif -#if defined(OPENVINO_ARCH_ARM) - retVector.emplace_back(R"(.*ActivationLayerTest.*Inference.*)"); - retVector.emplace_back(R"(.*AddConvertToReorderTest.*smoke_TestAddReorder_CPU.*)"); - retVector.emplace_back(R"(.*AddOutputsTest.*smoke_CheckOutputExist.*)"); - retVector.emplace_back(R"(.*CompileModelCacheRuntimePropertiesTestBase.*CanLoadFromFileWithoutException.*)"); - retVector.emplace_back(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*2InputSubtract_f.*)"); - retVector.emplace_back(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*ConvPoolRelu_f.*)"); - retVector.emplace_back(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*MatMulBias_f.*)"); - retVector.emplace_back(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*SimpleFunctionRelu_f.*)"); - retVector.emplace_back(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl/MatMulBias_f32_batch1_CPU)"); - retVector.emplace_back(R"(.*CompileModelLoadFromCacheTest.*CanGetCorrectLoadedFromCacheProperty.*)"); - retVector.emplace_back(R"(.*CompileModelLoadFromFileTestBase.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)"); - retVector.emplace_back(R"(.*CompileModelLoadFromFileTestBase.*CanLoadFromFileWithoutException.*)"); - retVector.emplace_back(R"(.*CompileModelLoadFromMemoryTestBase.*CanLoadFromMemoryWithoutExecption.*)"); - retVector.emplace_back(R"(.*CompileModelLoadFromMemoryTestBase.*CanLoadFromMemoryWithoutWeightsANdExecption.*)"); - retVector.emplace_back(R"(.*CompileModelWithCacheEncryptionTest.*CanImportModelWithoutException.*)"); - retVector.emplace_back(R"(.*ConcatMultiQuerySDPTest.*f16.*)"); - retVector.emplace_back(R"(.*ConcatSDPTest.*f16.*)"); - retVector.emplace_back(R"(.*FakeConvertLayerTest.*f16.*)"); - retVector.emplace_back(R"(.*CoreThreadingTestsWithCacheEnabled.*smoke_compiled_model_cache_enabled.*)"); - retVector.emplace_back(R"(.*CoreThreadingTestsWithIter.*smoke_CompileModel.*)"); - retVector.emplace_back(R"(.*CustomOpConvertI64CPUTest.*CompareWithRefs.*)"); - retVector.emplace_back(R"(.*EltwiseLayerCPUTest.*CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"); - retVector.emplace_back(R"(.*EltwiseLayerTest.*Inference.*)"); - retVector.emplace_back(R"(.*ExecGraphDuplicateInputsOutputsNames.*CheckOutputsMatch.*)"); - retVector.emplace_back(R"(.*ExecGraphKeepAssignNode.*KeepAssignNode.*)"); - retVector.emplace_back(R"(.*ExecGraphRemoveParameterNode.*RemoveParameterNode.*)"); - retVector.emplace_back(R"(.*IndexAddTest.*CompareWithRefs.*)"); - retVector.emplace_back(R"(.*InterpolateLayerCPUTest.*CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"); - retVector.emplace_back(R"(.*MatMulLayerCPUTest.*CompareWithRefs.*)"); - retVector.emplace_back(R"(.*MatmulWeightsDecompression.*CompareWithRefs.*)"); - retVector.emplace_back(R"(.*MvnLayerCPUTest.*CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"); - retVector.emplace_back(R"(.*NonInputInPlaceTest.*CompareWithRefs.*)"); - retVector.emplace_back(R"(.*OVClassCompiledModelGetPropertyTest_EXEC_DEVICES.*CanGetExecutionDeviceInfo.*)"); - retVector.emplace_back(R"(.*OVClassConfigTestCPU.*smoke_.*)"); - retVector.emplace_back(R"(.*OVClassConfigTestCPU.*smoke_CpuExecNetwork.*)"); - retVector.emplace_back(R"(.*OVInferenceChaining.*StaticOutputToDynamicInput.*)"); - retVector.emplace_back(R"(.*OVInferenceChaining.*StaticOutputToStaticInput.*)"); - retVector.emplace_back(R"(.*OVInferenceChainingStatic.*StaticOutputToStaticInput.*)"); - retVector.emplace_back(R"(.*ReduceCPULayerTest.*CompareWithRefs.*INFERENCE_PRECISION_HINT=f16.*)"); - // Issue: 164799 - retVector.emplace_back(R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*)"); - // Issue 167685 - retVector.emplace_back(R"(.*importExportModelWithTypeRelaxedExt.*)"); -#endif - if (!ov::test::snippets::is_i8_supported_by_brgemm()) { - retVector.emplace_back(R"(.*Snippets.*MatMulFQ.*)"); - retVector.emplace_back(R"(.*Snippets.*MatMul.*Quantized.*)"); - retVector.emplace_back(R"(.*Snippets.*MHAFQ.*)"); - retVector.emplace_back(R"(.*Snippets.*MHAINT8.*)"); - retVector.emplace_back(R"(.*Snippets.*MHAQuant.*)"); - retVector.emplace_back(R"(.*Snippets.*MLP.*Quantized.*)"); - } - // MHA BF16 precision is only supported on BF16 supported platform - if (!ov::test::snippets::is_bf16_supported_by_brgemm()) { - // ignored for not supported bf16 platforms - retVector.emplace_back(R"(.*smoke_Snippets_EnforcePrecision_bf16.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_MHAWOTransposeEnforceBF16.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_FullyConnected_EnforceBF16.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_MHA.*EnforceBF16.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_.*MLP.*bf16.*)"); - retVector.emplace_back(R"(.*ConcatSDPTest.*bf16.*)"); - } - if (!ov::test::snippets::is_fp16_supported_by_brgemm()) { - retVector.emplace_back(R"(.*smoke_Snippets_MHA.*FP16.*)"); - } else { - // Skip failing FP16 MHA tests on ARM64 due to low accuracy (FP16 accumulator is used in Gemm) - retVector.emplace_back(R"(.*smoke_Snippets_MHAWOTransposeEnforceFP16/MHAWOTranspose\.CompareWithRefImpl.*IS\[0\]=\[\?\.\?\.\?\.\?\].*IS\[1\]=\[\?\.\?\.\?\.\?\].*IS\[2\]=\[\?\.\?\.\?\.\?\].*)"); - } - if (!ov::with_cpu_x86_avx512_core_amx_int8()) - // TODO: Issue 92895 - // on platforms which do not support AMX, we are disabling I8 input tests - retVector.emplace_back(R"(smoke_LPT/FakeQuantizeWithNotOptimalTransformation.CompareWithRefImpl.*CPU.*i8.*)"); - // RNN/LSTM/GRU/AUGRU BF16 tests on avx512 core ISA - if (ov::with_cpu_x86_avx512_core() && !ov::with_cpu_x86_avx512_core_amx_bf16()) { - retVector.emplace_back(R"(smoke.*(AUGRUCellCPUTest|GRUCellCPUTest|RNNCellCPUTest|LSTMCellLayerCPUTest).CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)"); - retVector.emplace_back(R"(nightly.*bf16.*(AUGRUSequenceCPUTest|GRUSequenceCPUTest|LSTMSequenceCPUTest).CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)"); - } #ifdef SNIPPETS_LIBXSMM_TPP - // GN in TPP requires exposing tmp Buffer results outside the loop (ticket: 151234) - retVector.emplace_back(R"(.*smoke_Snippets_GroupNormalization.*)"); - // TPP performs precision conversion implicitly, it makes all Convert tests irrelevant - retVector.emplace_back(R"(.*smoke_Snippets_Convert.*)"); - // ABS and ROUND operations are needed for TPP support. Disable, since low precisions are not supported by TPP yet. - retVector.emplace_back(R"(.*smoke_Snippets_FQ.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_TransposeMatMulFQ.*)"); - // TPP doesn't support op with 2 outs, when one of them is Result (ticket: 130642) - retVector.emplace_back(R"(.*smoke_Snippets_MaxNumParamsEltwise.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_Eltwise_TwoResults.*)"); - // Accuracy problem with Exp + Reciprocal combination on TPP side (ticket: 130699) - retVector.emplace_back(R"(.*smoke_Snippets_ExpReciprocal.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_AddSoftmax.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_TransposeSoftmaxEltwise.*)"); - // Low-precision Matmuls are not supported by TPP yet - retVector.emplace_back(R"(.*smoke_Snippets.*=(BF16|bf16|i8|u8).*)"); - retVector.emplace_back(R"(.*smoke_Snippets.*MatMulFQ.*)"); - retVector.emplace_back(R"(.*smoke_Snippets.*MatMulBiasQuantized.*)"); - retVector.emplace_back(R"(.*smoke_Snippets.*MatMulsQuantized.*)"); - retVector.emplace_back(R"(.*smoke_Snippets.*MatMulsQuantizedSoftmax.*)"); - retVector.emplace_back(R"(.*smoke_Snippets.*MHAINT8MatMul.*)"); - retVector.emplace_back(R"(.*smoke_Snippets.*MHAQuantMatMul0.*)"); - retVector.emplace_back(R"(.*smoke_Snippets.*MHAFQ.*)"); - retVector.emplace_back(R"(.*smoke_Snippets.*PrecisionPropagation_Convertion.*)"); - retVector.emplace_back(R"(.*smoke_MHAQuant.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_MLP.*)"); - if (!ov::with_cpu_x86_avx512_core_amx()) { - // Issue: 165178 - retVector.emplace_back(R"(.*smoke_Snippets_Softmax/Softmax\.CompareWithRefImpl/IS=\[\]_TS=\(\(.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_MHA.*IS\[0\]=\[\]_\(.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_TransposeSoftmax/TransposeSoftmax\.CompareWithRefImpl/IS\[0\]=\[\]_TS\[0\]=\(\(.*)"); - } + // GN in TPP requires exposing tmp Buffer results outside the loop (ticket: 151234) + std::regex(R"(.*smoke_Snippets_GroupNormalization.*)"), + // TPP performs precision conversion implicitly, it makes all Convert tests irrelevant + std::regex(R"(.*smoke_Snippets_Convert.*)"), + // ABS and ROUND operations are needed for TPP support. Disable, since low precisions are not supported by TPP yet. + std::regex(R"(.*smoke_Snippets_FQ.*)"), + std::regex(R"(.*smoke_Snippets_TransposeMatMulFQ.*)"), + // TPP doesn't support op with 2 outs, when one of them is Result (ticket: 130642) + std::regex(R"(.*smoke_Snippets_MaxNumParamsEltwise.*)"), + std::regex(R"(.*smoke_Snippets_Eltwise_TwoResults.*)"), + // Accuracy problem with Exp + Reciprocal combination on TPP side (ticket: 130699) + std::regex(R"(.*smoke_Snippets_ExpReciprocal.*)"), + std::regex(R"(.*smoke_Snippets_AddSoftmax.*)"), + std::regex(R"(.*smoke_Snippets_TransposeSoftmaxEltwise.*)"), + // Low-precision Matmuls are not supported by TPP yet + std::regex(R"(.*smoke_Snippets.*=(BF16|bf16|i8|u8).*)"), + std::regex(R"(.*smoke_Snippets.*MatMulFQ.*)"), + std::regex(R"(.*smoke_Snippets.*MatMulBiasQuantized.*)"), + std::regex(R"(.*smoke_Snippets.*MatMulsQuantized.*)"), + std::regex(R"(.*smoke_Snippets.*MatMulsQuantizedSoftmax.*)"), + std::regex(R"(.*smoke_Snippets.*MHAINT8MatMul.*)"), + std::regex(R"(.*smoke_Snippets.*MHAQuantMatMul0.*)"), + std::regex(R"(.*smoke_Snippets.*MHAFQ.*)"), + std::regex(R"(.*smoke_Snippets.*PrecisionPropagation_Convertion.*)"), + std::regex(R"(.*smoke_MHAQuant.*)"), + std::regex(R"(.*smoke_Snippets_MLP.*)"), # if defined(OPENVINO_ARCH_ARM64) - retVector.emplace_back(R"(.*smoke_Snippets_GatedMLP_f32.*InputShape=\[\]_\(\[1\.32\.1024\]\).*)"); - retVector.emplace_back(R"(.*smoke_Snippets_MatMulTransposeB.*IS\[0\]=\[\]_.*T\[0\]=f32.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_TransposeMatMulBias.*)"); + std::regex(R"(.*smoke_Snippets_GatedMLP_f32.*InputShape=\[\]_\(\[1\.32\.1024\]\).*)"), + std::regex(R"(.*smoke_Snippets_MatMulTransposeB.*IS\[0\]=\[\]_.*T\[0\]=f32.*)"), + std::regex(R"(.*smoke_Snippets_TransposeMatMulBias.*)"), # endif #endif + }; + + + if (!ov::intel_cpu::hasHardwareSupport(ov::element::bf16)) { + // on platforms which do not support bfloat16, we are disabling bf16 tests since there are no bf16 primitives, + // tests are useless on such platforms + patterns.emplace_back(std::regex(R"(.*(BF|bf)16.*)")); + patterns.emplace_back(std::regex(R"(.*bfloat16.*)")); + } + if (!ov::test::snippets::is_i8_supported_by_brgemm()) { + patterns.emplace_back(std::regex(R"(.*Snippets.*MatMulFQ.*)")); + patterns.emplace_back(std::regex(R"(.*Snippets.*MatMul.*Quantized.*)")); + patterns.emplace_back(std::regex(R"(.*Snippets.*MHAFQ.*)")); + patterns.emplace_back(std::regex(R"(.*Snippets.*MHAINT8.*)")); + patterns.emplace_back(std::regex(R"(.*Snippets.*MHAQuant.*)")); + patterns.emplace_back(std::regex(R"(.*Snippets.*MLP.*Quantized.*)")); + } + // MHA BF16 precision is only supported on BF16 supported platform + if (!ov::test::snippets::is_bf16_supported_by_brgemm()) { + // ignored for not supported bf16 platforms + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_EnforcePrecision_bf16.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_MHAWOTransposeEnforceBF16.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_FullyConnected_EnforceBF16.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_MHA.*EnforceBF16.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_.*MLP.*bf16.*)")); + patterns.emplace_back(std::regex(R"(.*ConcatSDPTest.*bf16.*)")); + } + if (!ov::test::snippets::is_fp16_supported_by_brgemm()) { + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_MHA.*FP16.*)")); + } else { + // Skip failing FP16 MHA tests on ARM64 due to low accuracy (FP16 accumulator is used in Gemm) + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_MHAWOTransposeEnforceFP16/MHAWOTranspose\.CompareWithRefImpl.*IS\[0\]=\[\?\.\?\.\?\.\?\].*IS\[1\]=\[\?\.\?\.\?\.\?\].*IS\[2\]=\[\?\.\?\.\?\.\?\].*)")); + } + if (!ov::with_cpu_x86_avx512_core_amx_int8()) { + // TODO: Issue 92895 + // on platforms which do not support AMX, we are disabling I8 input tests + patterns.emplace_back(std::regex(R"(smoke_LPT/FakeQuantizeWithNotOptimalTransformation.CompareWithRefImpl.*CPU.*i8.*)")); + } + // RNN/LSTM/GRU/AUGRU BF16 tests on avx512 core ISA + if (ov::with_cpu_x86_avx512_core() && !ov::with_cpu_x86_avx512_core_amx_bf16()) { + patterns.emplace_back(std::regex(R"(smoke.*(AUGRUCellCPUTest|GRUCellCPUTest|RNNCellCPUTest|LSTMCellLayerCPUTest).CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)")); + patterns.emplace_back(std::regex(R"(nightly.*bf16.*(AUGRUSequenceCPUTest|GRUSequenceCPUTest|LSTMSequenceCPUTest).CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)")); + } + if (ov::with_cpu_x86_avx512_core_amx()) { + // Issue: 131475 + patterns.emplace_back(std::regex(R"(smoke_ExportImportTest/ExportOptimalNumStreams.OptimalNumStreams/.*)")); + // by calc abs_threshold with expected value + patterns.emplace_back(std::regex(R"(.*smoke_GatherCompressedWeights_basic/GatherWeightsDecompression.CompareWithRefs.*INFERENCE_PRECISION_HINT.*bf16.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Interaction/IntertactionCPUTest.CompareWithRefs.*Prc=i32.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_MatMulCompressedWeights_(amx|sym_amx|corner_cases_amx)/MatmulWeightsDecompression.CompareWithRefs.*INFERENCE_PRECISION_HINT.*bf16.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_EnforcePrecision_bf16/EnforcePrecisionTest.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_MHABF16_4D/MHA.CompareWithRefImpl/.*\[1.58.16.34\]_IS\[1\]=\[1.58.16.34\]_IS\[2\]=\[1.1.1.58\]_IS\[3\]=\[1.58.16.34\].*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_MHAWOTransposeBF16/MHAWOTranspose.CompareWithRefImpl/.*IS\[0\]=\[2.\?.64\].*IS\[1\]=\[2.64.\?\].*IS\[2\]=\[2.\?.64\].*)")); + // Issue: 141705 + patterns.emplace_back(std::regex(R"(.*smoke_Deconv_(2|3)D_NSPC_INT8_AMX/DeconvolutionLayerCPUTest.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Deconv_(2|3)D_NSPC_INT8_AMX/DeconvolutionLayerCPUTest.*)")); + } - if (ov::with_cpu_x86_avx512_core_amx()) { - // Issue: 131475 - retVector.emplace_back(R"(smoke_ExportImportTest/ExportOptimalNumStreams.OptimalNumStreams/.*)"); - // by calc abs_threshold with expected value - retVector.emplace_back(R"(.*smoke_GatherCompressedWeights_basic/GatherWeightsDecompression.CompareWithRefs.*INFERENCE_PRECISION_HINT.*bf16.*)"); - retVector.emplace_back(R"(.*smoke_Interaction/IntertactionCPUTest.CompareWithRefs.*Prc=i32.*)"); - retVector.emplace_back(R"(.*smoke_MatMulCompressedWeights_(amx|sym_amx|corner_cases_amx)/MatmulWeightsDecompression.CompareWithRefs.*INFERENCE_PRECISION_HINT.*bf16.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_EnforcePrecision_bf16/EnforcePrecisionTest.*)"); - retVector.emplace_back(R"(.*smoke_Snippets_MHABF16_4D/MHA.CompareWithRefImpl/.*\[1.58.16.34\]_IS\[1\]=\[1.58.16.34\]_IS\[2\]=\[1.1.1.58\]_IS\[3\]=\[1.58.16.34\].*)"); - retVector.emplace_back(R"(.*smoke_Snippets_MHAWOTransposeBF16/MHAWOTranspose.CompareWithRefImpl/.*IS\[0\]=\[2.\?.64\].*IS\[1\]=\[2.64.\?\].*IS\[2\]=\[2.\?.64\].*)"); - // Issue: 141705 - retVector.emplace_back(R"(.*smoke_Deconv_(2|3)D_NSPC_INT8_AMX/DeconvolutionLayerCPUTest.*)"); - retVector.emplace_back(R"(.*smoke_Deconv_(2|3)D_NSPC_INT8_AMX/DeconvolutionLayerCPUTest.*)"); - } + // Xattention only verified on AMX platform + if (!ov::with_cpu_x86_avx512_core_amx()) { + patterns.emplace_back(std::regex(R"(.*EnableXattn=1.*)")); + } - // Xattention only verified on AMX platform - if (!ov::with_cpu_x86_avx512_core_amx()) { - retVector.emplace_back(R"(.*EnableXattn=1.*)"); - } + if (ov::with_cpu_x86_avx512_core_fp16() || CPUTestUtils::with_cpu_x86_avx2_vnni_2()) { + // Issue: 143852 + patterns.emplace_back(std::regex(R"(smoke_ConvertRangeSubgraphCPUTest/ConvertRangeSubgraphCPUTest\.CompareWithRefs.*Prc=f16.*)")); + patterns.emplace_back(std::regex(R"((smoke|nightly)_FC_3D_FP16/.*_Fused=Multiply\(PerChannel\).*)")); + patterns.emplace_back(std::regex(R"((smoke|nightly)_MM_Brgemm_Static_FP16.*TS=\(\(55\.12\)\).*_Fused=Multiply\(PerChannel\).*)")); + patterns.emplace_back(std::regex(R"(smoke_MM_Dynamic_Fusing_FP16/.*TS=\(\(16\.12\)_\(33\.7\)_\(16\.12\)\).*_Fused=Multiply\(PerChannel\).*)")); + patterns.emplace_back(std::regex(R"(smoke_MM_Brgemm_Dynamic_Fusing_FP16/.*TS=\(\(16\.12\)_\(33\.7\)_\(16\.12\)\).*_Fused=Multiply\(PerChannel\).*)")); + patterns.emplace_back(std::regex(R"(smoke_Conv_.*_FP16/.*_Fused=PRelu1D\.Multiply\(PerChannel\)\.Add\(PerChannel\).*)")); + patterns.emplace_back(std::regex(R"(smoke_Conv_Sum_Broadcast_FP16/ConvSumInPlaceTest.*Relu\.Multiply\(PerChannel\)\.Add\(PerChannel\).*)")); + } + + if (CPUTestUtils::with_cpu_x86_avx2_vnni_2()) { + // jit_gemm_BF16 kernels are not supported for conv,inner_product,matmul on avx2_vnni_2 platforms + patterns.emplace_back(std::regex(R"(smoke_Conv_.*D_GEMM_BF16.*)")); + patterns.emplace_back( + std::regex(R"(smoke_GroupConv_.*D_Gemm_BF16/GroupConvolutionLayerCPUTest.CompareWithRefs.*primitive=jit_gemm.*)")); + patterns.emplace_back(std::regex(R"(smoke_.*MatMulLayerCPUTest.*INFERENCE_PRECISION_HINT=bf16.*_primitive=jit_gemm.*)")); + // by calc abs_threshold with expected value + patterns.emplace_back(std::regex(R"(smoke_Snippets_MHAWOTransposeBF16/MHAWOTranspose.CompareWithRefImpl/.*IS\[0\]=\[\]_\(\[12.128.100\]\).*)")); + patterns.emplace_back(std::regex(R"(smoke_Snippets_MHAWOTransposeBF16/MHAWOTranspose.CompareWithRefImpl/.*IS\[0\]=\[2.\?.64\].*)")); + patterns.emplace_back(std::regex(R"(smoke_Snippets_MHAWOTransposeBF16/MHAWOTranspose.CompareWithRefImpl/.*IS\[0\]=\[\?.\?.\?.\?\].*)")); + } + +#if defined(OPENVINO_ARCH_RISCV64) + if (!ov::intel_cpu::riscv64::mayiuse(ov::intel_cpu::riscv64::gv)) { + // Integer division is supported only by JIT Executor which is available on platforms with GV instruction sets. + // In other cases there might be accuracy problems. + patterns.emplace_back(std::regex(R"(.*smoke_EltwiseChain/EltwiseChainTest.CompareWithRefs.*InPRC3=i32_Op0=Div_Op1.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_CompareWithRefs_static.*eltwise_op_type=Div.*model_type=i32.*)")); + } +#endif +#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) + if (!ov::with_cpu_x86_avx2()) { + // MatMul in Snippets uses BRGEMM that is supported only on AVX2 (and newer) platforms + // Disabled Snippets MHA tests as well because MHA pattern contains MatMul + patterns.emplace_back(std::regex(R"(.*Snippets.*MHA.*)")); + patterns.emplace_back(std::regex(R"(.*Snippets.*(MatMul|Matmul).*)")); + } + if (!ov::intel_cpu::hasHardwareSupport(ov::element::f16)) { + // Skip fp16 tests for paltforms that don't support fp16 precision + patterns.emplace_back(std::regex(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)")); + patterns.emplace_back(std::regex(R"(.*ConcatMultiQuerySDPTest.*f16.*)")); + patterns.emplace_back(std::regex(R"(.*ConcatSDPTest.*f16.*)")); + patterns.emplace_back(std::regex(R"(.*ConvertCPULayerTest.*f16.*)")); + } +#elif defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) + if (!ov::intel_cpu::hasIntDotProductSupport()) { + patterns.emplace_back(std::regex(R"(.*smoke_MatMulCompressedWeights_Kleidiai.*)")); + } + if (!ov::intel_cpu::hasHardwareSupport(ov::element::f16)) { + // Skip fp16 tests for paltforms that don't support fp16 precision + patterns.emplace_back(std::regex(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)")); + patterns.emplace_back(std::regex(R"(.*Prc=f16.*)")); + patterns.emplace_back(std::regex(R"(.*ConcatMultiQuerySDPTest.*f16.*HasShapeOf=1.*)")); + patterns.emplace_back(std::regex(R"(.*ConvertCPULayerTest.*f16.*)")); + } +#endif +#ifdef SNIPPETS_LIBXSMM_TPP + if (!ov::with_cpu_x86_avx512_core_amx()) { + // Issue: 165178 + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_Softmax/Softmax\.CompareWithRefImpl/IS=\[\]_TS=\(\(.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_MHA.*IS\[0\]=\[\]_\(.*)")); + patterns.emplace_back(std::regex(R"(.*smoke_Snippets_TransposeSoftmax/TransposeSoftmax\.CompareWithRefImpl/IS\[0\]=\[\]_TS\[0\]=\(\(.*)")); + } +#endif - if (ov::with_cpu_x86_avx512_core_fp16() || CPUTestUtils::with_cpu_x86_avx2_vnni_2()) { - // Issue: 143852 - retVector.emplace_back(R"(smoke_ConvertRangeSubgraphCPUTest/ConvertRangeSubgraphCPUTest\.CompareWithRefs.*Prc=f16.*)"); - retVector.emplace_back(R"((smoke|nightly)_FC_3D_FP16/.*_Fused=Multiply\(PerChannel\).*)"); - retVector.emplace_back(R"((smoke|nightly)_MM_Brgemm_Static_FP16.*TS=\(\(55\.12\)\).*_Fused=Multiply\(PerChannel\).*)"); - retVector.emplace_back(R"(smoke_MM_Dynamic_Fusing_FP16/.*TS=\(\(16\.12\)_\(33\.7\)_\(16\.12\)\).*_Fused=Multiply\(PerChannel\).*)"); - retVector.emplace_back(R"(smoke_MM_Brgemm_Dynamic_Fusing_FP16/.*TS=\(\(16\.12\)_\(33\.7\)_\(16\.12\)\).*_Fused=Multiply\(PerChannel\).*)"); - retVector.emplace_back(R"(smoke_Conv_.*_FP16/.*_Fused=PRelu1D\.Multiply\(PerChannel\)\.Add\(PerChannel\).*)"); - retVector.emplace_back(R"(smoke_Conv_Sum_Broadcast_FP16/ConvSumInPlaceTest.*Relu\.Multiply\(PerChannel\)\.Add\(PerChannel\).*)"); - } + return patterns; + }; - if (CPUTestUtils::with_cpu_x86_avx2_vnni_2()) { - // jit_gemm_BF16 kernels are not supported for conv,inner_product,matmul on avx2_vnni_2 platforms - retVector.emplace_back(R"(smoke_Conv_.*D_GEMM_BF16.*)"); - retVector.emplace_back( - R"(smoke_GroupConv_.*D_Gemm_BF16/GroupConvolutionLayerCPUTest.CompareWithRefs.*primitive=jit_gemm.*)"); - retVector.emplace_back(R"(smoke_.*MatMulLayerCPUTest.*INFERENCE_PRECISION_HINT=bf16.*_primitive=jit_gemm.*)"); - // by calc abs_threshold with expected value - retVector.emplace_back(R"(smoke_Snippets_MHAWOTransposeBF16/MHAWOTranspose.CompareWithRefImpl/.*IS\[0\]=\[\]_\(\[12.128.100\]\).*)"); - retVector.emplace_back(R"(smoke_Snippets_MHAWOTransposeBF16/MHAWOTranspose.CompareWithRefImpl/.*IS\[0\]=\[2.\?.64\].*)"); - retVector.emplace_back(R"(smoke_Snippets_MHAWOTransposeBF16/MHAWOTranspose.CompareWithRefImpl/.*IS\[0\]=\[\?.\?.\?.\?\].*)"); - } + const static std::vector patterns = get_patterns(); - return retVector; + return patterns; } diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 4c9f4d5074b99f..23fe4726fcea3a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -30,220 +30,226 @@ bool immadSupported() { } } // namespace -std::vector disabledTestPatterns() { - std::vector returnVal = { +const std::vector& disabled_test_patterns() { + auto get_patterns = []() { + std::vector patterns{ // These tests might fail due to accuracy loss a bit bigger than threshold - R"(.*(GRUCellTest).*)", - R"(.*(RNNSequenceTest).*)", + std::regex(R"(.*(GRUCellTest).*)"), + std::regex(R"(.*(RNNSequenceTest).*)"), // These test cases might fail due to FP16 overflow - R"(.*(LSTM).*activations=\(relu.*modelType=f16.*)", + std::regex(R"(.*(LSTM).*activations=\(relu.*modelType=f16.*)"), // Need to update activation primitive to support any broadcastable constant to enable these cases. - R"(.*ActivationParamLayerTest.*)", + std::regex(R"(.*ActivationParamLayerTest.*)"), // Unknown issues - R"(.*(LSTMSequence).*mode=.*_RAND_SEQ_LEN_CONST.*)", - R"(.*(smoke_DetectionOutput5In).*)", + std::regex(R"(.*(LSTMSequence).*mode=.*_RAND_SEQ_LEN_CONST.*)"), + std::regex(R"(.*(smoke_DetectionOutput5In).*)"), // TODO: Issue: 47773 - R"(.*(ProposalLayerTest).*)", + std::regex(R"(.*(ProposalLayerTest).*)"), // TODO: Issue: 54194 - R"(.*ActivationLayerTest.*SoftPlus.*)", + std::regex(R"(.*ActivationLayerTest.*SoftPlus.*)"), // TODO: Issue: 59586, NormalizeL2 output mismatch for empty axes case - R"(.*NormalizeL2LayerTest.*axes=\(\).*)", + std::regex(R"(.*NormalizeL2LayerTest.*axes=\(\).*)"), // Not allowed dynamic loop tests on GPU - R"(.*smoke_StaticShapeLoop_dynamic_exit.*)", + std::regex(R"(.*smoke_StaticShapeLoop_dynamic_exit.*)"), // TODO Issue 100145 - R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", + std::regex(R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)"), // Expected behavior. GPU plugin doesn't support i64 for eltwise power operation. - R"(.*EltwiseLayerTest.*eltwise_op_type=Pow.*model_type=i64.*)", + std::regex(R"(.*EltwiseLayerTest.*eltwise_op_type=Pow.*model_type=i64.*)"), // need dynamic rank - R"(.*smoke.*BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", - R"(.*smoke.*BehaviorTests.*DynamicOutputToDynamicInput.*)", - R"(.*smoke.*BehaviorTests.*DynamicInputToDynamicOutput.*)", + std::regex(R"(.*smoke.*BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)"), + std::regex(R"(.*smoke.*BehaviorTests.*DynamicOutputToDynamicInput.*)"), + std::regex(R"(.*smoke.*BehaviorTests.*DynamicInputToDynamicOutput.*)"), // TODO: Issue: 180519 - R"(.*CoreThreadingTestsWithIter.*)", + std::regex(R"(.*CoreThreadingTestsWithIter.*)"), // TODO: Issue: 145926 - R"(.*CoreThreadingTest.smoke_QueryModel.*)", + std::regex(R"(.*CoreThreadingTest.smoke_QueryModel.*)"), // Assign-3/ReadValue-3 does not have evaluate() methods; ref implementation does not save the value across the inferences. - R"(smoke_MemoryTestV3.*)", + std::regex(R"(smoke_MemoryTestV3.*)"), // Issue: 90539 - R"(.*CachingSupportCase.*LoadNetworkCacheTestBase.*CompareWithRefImpl.*)", - R"(.*CachingSupportCase.*GPU.*CompileModelCacheTestBase.*CompareWithRefImpl.*)", + std::regex(R"(.*CachingSupportCase.*LoadNetworkCacheTestBase.*CompareWithRefImpl.*)"), + std::regex(R"(.*CachingSupportCase.*GPU.*CompileModelCacheTestBase.*CompareWithRefImpl.*)"), // Issue: 111437 - R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.Inference.*)", - R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.Inference.*)", + std::regex(R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.Inference.*)"), + std::regex(R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.Inference.*)"), // Issue: 111440 - R"(.*smoke_set1/GatherElementsGPUTest.Inference.*)", + std::regex(R"(.*smoke_set1/GatherElementsGPUTest.Inference.*)"), // Issue: 168015. Low precision PRelu is not supported on GPU - R"(.*smoke_LPT.*PReluTransformation.*)", + std::regex(R"(.*smoke_LPT.*PReluTransformation.*)"), // Issue: 168016. Low precision LSTMSequence/GPUSequence are not supported on GPU - R"(.*smoke_LPT.*RecurrentCellTransformation.*)", + std::regex(R"(.*smoke_LPT.*RecurrentCellTransformation.*)"), // Issue: expected precision mismatch - R"(.*smoke_LPT.*PullReshapeThroughDequantizationTransformation.*)", + std::regex(R"(.*smoke_LPT.*PullReshapeThroughDequantizationTransformation.*)"), // Issue: accuracy mismatch - R"(.*smoke_LPT.*FuseDequantizeToFakeQuantizeTransformation.*f32_0_dynamic_\[\]_f32__\{\}_\{\}__\{.0.01.\}_dynamic_\[\]_0_1_dynamic_f32_level=256_shape=\[\]_input_low=\{.0.\}_input_high=\{.2.55.\}_output_low=\{.0.\}_output_high=\{.2.55.\}_output_precision=_constant_precision=)", - R"(.*smoke_LPT.*MatMulWithConstantTransformation.*\[1,1,3,4\].*level=256_shape=\[1,3,1\]_input_low=\{.0,.0,.0.\}_input_high=\{.25,.24,.25.\}_output_low=\{.0,.0,.0.\}_output_high=\{.25,.24,.25.\}_output_precision=_constant_precision=.*)", + std::regex(R"(.*smoke_LPT.*FuseDequantizeToFakeQuantizeTransformation.*f32_0_dynamic_\[\]_f32__\{\}_\{\}__\{.0.01.\}_dynamic_\[\]_0_1_dynamic_f32_level=256_shape=\[\]_input_low=\{.0.\}_input_high=\{.2.55.\}_output_low=\{.0.\}_output_high=\{.2.55.\}_output_precision=_constant_precision=)"), + std::regex(R"(.*smoke_LPT.*MatMulWithConstantTransformation.*\[1,1,3,4\].*level=256_shape=\[1,3,1\]_input_low=\{.0,.0,.0.\}_input_high=\{.25,.24,.25.\}_output_low=\{.0,.0,.0.\}_output_high=\{.25,.24,.25.\}_output_precision=_constant_precision=.*)"), // Issue: 123493 - R"(.*GroupNormalizationTest.*CompareWithRefs.*NetType=f16.*)", + std::regex(R"(.*GroupNormalizationTest.*CompareWithRefs.*NetType=f16.*)"), // Doesn't match reference results as v6 ref impl behavior is misaligned with expected - R"(smoke_MemoryTestV3.*)", + std::regex(R"(smoke_MemoryTestV3.*)"), // by calc abs_threshold with expected value - R"(.*smoke_CTCLoss_Set2/CTCLossLayerTest.Inference/IS=\(\[\]\)_TS=\{\(3.6.8\)\}_LL=\(6.5.6\)_A=\(4.1.2.3.4.5\)\(5.4.3.0.1.0\)\(2.1.3.1.3.0\)_AL=\(3.3.5\)_BI=7_PCR=1_CMR=1_U=0_PF=f32_PI=i64.*)", - R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=.*1.16.10.10.*_OS=\(\)_K\(1.1\)_S\(1.3\).*)", - R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=.*1.32.10.10.*_OS=\(\)_K\(1.1\)_S\(1.3\).*)", - R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=.*1.3.30.30.*_OS=\(\)_K\(1.1\)_S\(1.3\).*O=16.*)", - R"(.*smoke_ConvolutionBackpropData2D_AutoPadValid/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\((1\.32\.10\.10|1\.16\.10\.10)\)\}_OS=\(\)_K\(1.1\)_S\(1.3\)_PB\(0.0\)_PE\(0.0\)_D=\(1.1\)_OP=\(\)_O=(1|5|16)_AP=valid_netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData2D_AutoPadValid/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=.*1.3.30.30.*_OS=\(\)_K\(1.1\)_S\(1.3\)_PB\(0.0\)_PE\(0.0\)_D=\(1.1\)_OP=\(\)_O=16_AP=valid_netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\((1.32.10.10|1.16.10.10|1.3.30.30)\)\}_OS=\(\)_K\(1.1\)_S\(3.3\)_PB\(0.0\)_PE\(0.0\)_D=\(1.1\)_OP=\((1.1|2.2)\)_O=(1|5|16)_AP=valid_netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\((1.32.10.10|1.16.10.10|1.3.30.30)\)\}_OS=\(\)_K\(1.1\)_S\(3.3\)_PB\(0.0\)_PE\((0.0|1.1)\)_D=\(1.1\)_OP=\((1.1|2.2)\)_O=(1|5|16).*)", - R"(.*smoke_GridSample/GridSampleLayerTest.Inference/DS=\((5.2.3.5|5.3.4.6)\)_GS=\((5.7.3.2|5.2.8.2)\)_align_corners=(0|1)_Mode=(bilinear|bicubic)_padding_mode=zeros_model_type=f16_grid_type=f32.*)", - R"(.*smoke_MatMul_BothTranspose/MatMulLayerTest.Inference/IS=\(\[\]_\[\]\)_TS=\{\(5\)_\(5\)\}_transpose_a=1_transpose_b=1_secondary_input_type=(CONSTANT|PARAMETER)_modelType=(f16|f32).*)", - R"(.*smoke_dynamic_conv_reshape_fullyconnected/ConvReshapeFullyConnectedDynamicGPUTestDynamic.Inference/IS=\[\?\.64\.1\.\?\.\?\]_\[1\.64\.1\.1\.1\]_model_type=f16.*)", - R"(.*smoke_empty_tensor/EmptyTensorDynamicGPUTest.Inference/IS=\[\?\]_\[30\]_\[40\]_\[50\]_\[10\]_\[7\]_\[\?.\?\]_\[1.0\]_\[1.8\]_\[1.0\]_\[1.3\]_\[1.20\]_NetType=i32.*)", - R"(.*smoke_Convolution2D_ExplicitPadding/ActivatiConvolutionLayerTestonLayerTest.Inference.*netPRC=f16.*)", - R"(.*smoke_Convolution2D_AutoPadValid/ConvolutionLayerTest.Inference.*netPRC=f16.*)", - R"(.*smoke_Convolution3D_Basic1/ConvolutionLayerTest.*)", - R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding/ConvolutionBackpropDataLayerTest.*)", - R"(.*smoke_ConvolutionBackpropData2D_AutoPadValid/ConvolutionBackpropDataLayerTest.*K\((3.5|3.3)\).*netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.*K\((3.5|3.3)\).*netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*K\((3.5|3.3)\).*PE\(1.1\).*netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\(1.32.10.10\).*K\((3.5|3.3)\).*PE\(0.0\).*netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\((1.3.30.30|1.16.10.10)\).*K\(3.5\).*PE\(0.0\).*netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\((1.3.30.30|1.16.10.10)\).*K\(3.3\).*PE\(0.0\).*O=(1|5|16)_AP=explicit_netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData3D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\((1.16.5.5.5|1.32.5.5.5)\)\}.*O=(1|5)_AP=valid_netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData3D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference.*O=16_AP=valid_netPRC=f16.*)", - R"(.*moke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\((1.16.5.5.5|1.32.5.5.5)\)\}.*O=(1|5)_AP=valid_netPRC=f16.*)", - R"(.*moke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference.*O=16_AP=valid_netPRC=f16.*)", - R"(.*smoke_DeformableConvolution2D_ExplicitPadding/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=explicit_BI_PAD=0_MODULATION=1_netPRC=f16.*)", - R"(.*smoke_DeformableConvolution2D_AutoPadValid/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=valid_BI_PAD=0_MODULATION=1_netPRC=f16.*)", - R"(.*smoke_DeformableConvolution2D_DeformableGroups_ExplicitPadding/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=explicit_BI_PAD=(0|1)_MODULATION=(0|1)_netPRC=f16.*)", - R"(.*smoke_DeformableConvolution2D_SingleTestCase/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=explicit_BI_PAD=(0|1)_MODULATION=(0|1)_netPRC=f16.*)", - R"(.*smoke_DeformableConvolution2D_MultipleGroup.*/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=explicit_BI_PAD=(0|1)_MODULATION=(0|1)_netPRC=f16.*)", - R"(.*smoke_DFT_5d/DFTLayerTest.Inference/IS=\(\[\]\)_TS=\{\(10.4.8.2.2\)\}_Precision=f32_Axes=\(0.1.2.3\)_signal_size=\(\)_Inverse=0.*)", - R"(.*smoke_DFT_6d/DFTLayerTest.Inference/IS=\(\[\]\)_TS=\{\(10.4.8.2.5.2\)\}_Precision=f32_Axes=\(0.1.2.3.4\)_signal_.*_Inverse=0.*)", - R"(.*smoke_ConvolutionLayerGPUTest_ExplicitPad1D/ConvolutionLayerGPUTestDynamic.*netPRC=f16.*)", - R"(.*smoke_MVN_5D/Mvn6LayerTest.Inference/.*ModelType=f16.*_Ax=\(2.3.4\).*)", - R"(.*smoke_MVN_5D/Mvn6LayerTest.Inference/.*ModelType=f32.*_Ax=\(2.3.4\).*NormVariance=FALSE.*)", - R"(.*smoke_MVN_4D/Mvn6LayerTest.Inference/.*TS=\{\(1.10.5.17\)\}.*_ModelType=f16.*Ax=\(2.3\).*)", - R"(.*smoke_MVN_4D/Mvn6LayerTest.Inference/.*TS=\{\(1.3.8.9\)\}.*_ModelType=f16.*Ax=\((2.3|1.2.3)\).*)", - R"(.*smoke_MVN_3D/Mvn6LayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.32.17\)\}_ModelType=f16_AxType=(i64|i32)_Ax=\((1.2|2)\).*)", - R"(.*smoke_MVN_2D/Mvn6LayerTest.Inference.*TS=\{\(2.55\)\}_ModelType=f32_.*)", - R"(.*smoke_Decomposition_6D/Mvn6LayerTest.Inference.*ModelType=(f16|f32).*Ax=\(0.1.2\).*)", - R"(.*smoke_Decomposition_6D/Mvn6LayerTest.Inference.*ModelType=(f16|f32).*Ax=\(0.1.5\).*)", - R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference.*ModelType=f16.*Ax=\(1\).*)", - R"(.*smoke_CTCLoss_Set2/CTCLossLayerTest.Inference/.*_LL=\(6.5.6\)_A=\(2.1.5.3.2.6\)\(3.3.3.3.3.3\)\(6.5.6.5.6.5\)_.*_BI=7_.*_CMR=1_U=1_PF=f16.*)", - R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference/.*precision=f32.*)", - R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference/IS=\(\[\]_\)_TS=\(\(1.2.6\)\)_input_precision=f16.*)", - R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference/IS=\(\[\]_\)_TS=\(\(1.2.18\)\)_input_precision=f16.*)", - R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference_cached/IS=\(\[\?.\?.96\]_\)_TS=\(\(1.4.96\)\)_input_precision=f32.*)", - R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference_cached/IS=\(\[\?.\?.\?\]_\)_TS=\(\(1.2.16\)\)_input_precision=f32.*)", - R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference_cached/IS=\(\[\]_\)_TS=\(\(1.2.6\)\)_input_precision=(f16|f32).*)", - R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference_cached/IS=\(\[\]_\)_TS=\(\(1.2.18\)\)_input_precision=(f16|f32).*)", - R"(.*smoke_MM_Static_OneDNN/MatMulLayerGPUTest.Inference.*input_type=PARAMETER_netPRC=f16.*)", - R"(.*smoke_Decomposition_3D/Mvn6LayerTest.Inference/.*TS=\{\(1.32.17\)\}_ModelType=f16_AxType=.*_Ax=\(0.1.2\).*)", - R"(.*moke_Decomposition_3D/Mvn6LayerTest.Inference.*TS=\{\(1.37.9\)\}_ModelType=f16_AxType=.*_Ax=\(1\).*)", - R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference/.*TS=\{\(2.19.5.10\)\}_ModelType=f32_AxType=(i32|i64)_Ax=\((0.3|3)\)_NormVariance=FALSE.*)", - R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference/.*TS=\{\(2.19.5.10\)\}_ModelType=f16_AxType=(i32|i64)_Ax=\(0.3\)_NormVariance=TRUE.*)", - R"(.*smoke_Convolution2D_ExplicitPadding/ConvolutionLayerTest.*netPRC=f16.*)", - R"(.*smoke_SwiGLUFusion_basic/SwiGLUFusion.Inference.*/IS=\(\[\?.\?.96\]_\)_.*_input_precision=f16.*)", - R"(.*smoke_dynamic_reduce_deconv_concat/ReduceDeconvConcatDynamicGPUTest.Inference/IS=\[1.32.64.\?.\?\]_\[1.32.64.64.64\]_\[1.8.128.\?.\?.4\]_\[1.8.128.128.128.4\]_model_type=f16.*)", - R"(.*smoke_GPU_Dynamic/KVCacheTest.Inference.*_precision=f16.*)", - R"(.*smoke_dynamic_shapeof_activation_sqrt/shapeofActivationDynamicGPUTest.Inference/IS=\[\?.\?.1.64\]_\[1.3136.1.64\]_\[1.49.1.64\]_\[2.49.1.64\]_NetType=f16_targetDevice=GPU_activatioinType=23_inShape=\(\)_constantValue=\(\).*)", - R"(.*smoke_GroupConvolutionLayerGPUTest_dynamic2D.*/GroupConvolutionLayerGPUTestDynamic.Inference/.*_netPRC=f16.*)", - R"(.*smoke_(DFT|IDFT|IRDFT)_GPU_4D/DFTLayerGPUTest.CompareWithRefs.*)", - R"(.*smoke_RDFT_GPU_4D/DFTLayerGPUTest.CompareWithRefs/prec=(f32|f16)_IS0=\[\?.\?.\?.\?\]_TS0=\(\(1.192.36.64\)\)_IS1=\[\?\]_TS1=\(\(1\)\)_IS2=\[\?\]_TS2=\(\(1\)\).*)", - R"(.*smoke_ConvolutionLayerGPUTest_dynamic.*ConvolutionLayerGPUTestDynamic.*netPRC=f16.*)", - R"(.*smoke_NoReshape/SplitConvConcat.CompareWithRefImpl/IS=\(1.6.40.40\)_ET=f16_.*)", - R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.7.32\)_KS=\(1.3\)_OC=(32|64)_ET=f32.*)", - R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.8.16\)_KS=\(1.5\)_OC=(32|64)_ET=f32.*)", - R"(.*smoke_MAX_and_AVGPool_ValidPad/PoolingLayerTest.Inference.*_AvgPool_ExcludePad=0_K\(3.5\).*modelType=f16.*)", - R"(.*smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/.*_TS=\{\(1.4.5.6\)_\(1.4.6.4\)\}_.*_input_type=CONSTANT_modelType=f16_.*)", - R"(.*smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/.*_TS=\{\(4.5.6\)_\(6.3\)\}_.*_input_type=PARAMETER_modelType=f16_.*)", - R"(.*smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/.*_TS=\{\(9.9.9\)_\(9.9\)\}_.*_input_type=PARAMETER_modelType=f16_.*)", - R"(.*smoke_MatMul_FirstTranspose/MatMulLayerTest.Inference/.*_TS=\{\(100.65\)_\(100.73\)\}_.*_modelType=f16_.*)", - R"(.*smoke_MatMul_SecondTranspose/MatMulLayerTest.Inference/.*_TS=\{\(1.16.128\)_\(1.64.128\)\}_.*_modelType=f16_.*)", - R"(.*smoke_MatMul_SecondTranspose/MatMulLayerTest.Inference/.*_TS=\{\(1.64.80\)_\(1.77.80\)\}_.*_modelType=f16_.*)", - R"(.*smoke_MatMul_SecondTranspose/MatMulLayerTest.Inference/.*_TS=\{\(65.100\)_\(73.100\)\}_.*_modelType=f16_.*)", - R"(.*smoke_MatMul_BothTranspose/MatMulLayerTest.Inference/.*_TS=\{\(100.65\)_\(73.100\)\}_.*_modelType=f16_.*)", - R"(.*smoke_Convolution2D_ExplicitPadding/ConvolutionLayerTest.Inference/.*_TS=\{\(1.3.30.30\)\}_K\(3.5\)_.*_O=5_AP=explicit_netPRC=f16.*)", - R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.3.10.10.10\)\}_.*_PE\((0.0.0|1.1.1)\)_D=\(1.1.1\)_OP=\((1.1.1|2.2.2)\)_O=16_AP=explicit_netPRC=f16_.*)", - R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.32.5.5.5\)\}_.*_netPRC=f16_.*)", - R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.16.5.5.5\)\}_.*_netPRC=f16_.*)", - R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.16.5.5.5\)\}_.*_netPRC=f16_.*)", - R"(.*smoke_PSROIPooling_average/PSROIPoolingLayerTest.Inference/IS=\(3.8.16.16\)_coord_shape=\(10.5\)_out_dim=2_group_size=2_scale=(0.625|1)_bins_x=1_bins_y=1_mode=average_modelType=f16.*)", - R"(.*smoke_RDFT_5d_last_axis/RDFTLayerTest.Inference/IS=\(10.4.8.2.5\)_modelType=f32_Axes=\(0.1.2.3.4\)_SignalSize=\(\).*)", + std::regex(R"(.*smoke_CTCLoss_Set2/CTCLossLayerTest.Inference/IS=\(\[\]\)_TS=\{\(3.6.8\)\}_LL=\(6.5.6\)_A=\(4.1.2.3.4.5\)\(5.4.3.0.1.0\)\(2.1.3.1.3.0\)_AL=\(3.3.5\)_BI=7_PCR=1_CMR=1_U=0_PF=f32_PI=i64.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=.*1.16.10.10.*_OS=\(\)_K\(1.1\)_S\(1.3\).*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=.*1.32.10.10.*_OS=\(\)_K\(1.1\)_S\(1.3\).*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=.*1.3.30.30.*_OS=\(\)_K\(1.1\)_S\(1.3\).*O=16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_AutoPadValid/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\((1\.32\.10\.10|1\.16\.10\.10)\)\}_OS=\(\)_K\(1.1\)_S\(1.3\)_PB\(0.0\)_PE\(0.0\)_D=\(1.1\)_OP=\(\)_O=(1|5|16)_AP=valid_netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_AutoPadValid/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=.*1.3.30.30.*_OS=\(\)_K\(1.1\)_S\(1.3\)_PB\(0.0\)_PE\(0.0\)_D=\(1.1\)_OP=\(\)_O=16_AP=valid_netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\((1.32.10.10|1.16.10.10|1.3.30.30)\)\}_OS=\(\)_K\(1.1\)_S\(3.3\)_PB\(0.0\)_PE\(0.0\)_D=\(1.1\)_OP=\((1.1|2.2)\)_O=(1|5|16)_AP=valid_netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\((1.32.10.10|1.16.10.10|1.3.30.30)\)\}_OS=\(\)_K\(1.1\)_S\(3.3\)_PB\(0.0\)_PE\((0.0|1.1)\)_D=\(1.1\)_OP=\((1.1|2.2)\)_O=(1|5|16).*)"), + std::regex(R"(.*smoke_GridSample/GridSampleLayerTest.Inference/DS=\((5.2.3.5|5.3.4.6)\)_GS=\((5.7.3.2|5.2.8.2)\)_align_corners=(0|1)_Mode=(bilinear|bicubic)_padding_mode=zeros_model_type=f16_grid_type=f32.*)"), + std::regex(R"(.*smoke_MatMul_BothTranspose/MatMulLayerTest.Inference/IS=\(\[\]_\[\]\)_TS=\{\(5\)_\(5\)\}_transpose_a=1_transpose_b=1_secondary_input_type=(CONSTANT|PARAMETER)_modelType=(f16|f32).*)"), + std::regex(R"(.*smoke_dynamic_conv_reshape_fullyconnected/ConvReshapeFullyConnectedDynamicGPUTestDynamic.Inference/IS=\[\?\.64\.1\.\?\.\?\]_\[1\.64\.1\.1\.1\]_model_type=f16.*)"), + std::regex(R"(.*smoke_empty_tensor/EmptyTensorDynamicGPUTest.Inference/IS=\[\?\]_\[30\]_\[40\]_\[50\]_\[10\]_\[7\]_\[\?.\?\]_\[1.0\]_\[1.8\]_\[1.0\]_\[1.3\]_\[1.20\]_NetType=i32.*)"), + std::regex(R"(.*smoke_Convolution2D_ExplicitPadding/ActivatiConvolutionLayerTestonLayerTest.Inference.*netPRC=f16.*)"), + std::regex(R"(.*smoke_Convolution2D_AutoPadValid/ConvolutionLayerTest.Inference.*netPRC=f16.*)"), + std::regex(R"(.*smoke_Convolution3D_Basic1/ConvolutionLayerTest.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding/ConvolutionBackpropDataLayerTest.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_AutoPadValid/ConvolutionBackpropDataLayerTest.*K\((3.5|3.3)\).*netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.*K\((3.5|3.3)\).*netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*K\((3.5|3.3)\).*PE\(1.1\).*netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\(1.32.10.10\).*K\((3.5|3.3)\).*PE\(0.0\).*netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\((1.3.30.30|1.16.10.10)\).*K\(3.5\).*PE\(0.0\).*netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData2D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\((1.3.30.30|1.16.10.10)\).*K\(3.3\).*PE\(0.0\).*O=(1|5|16)_AP=explicit_netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData3D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\((1.16.5.5.5|1.32.5.5.5)\)\}.*O=(1|5)_AP=valid_netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData3D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference.*O=16_AP=valid_netPRC=f16.*)"), + std::regex(R"(.*moke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*TS=\{\((1.16.5.5.5|1.32.5.5.5)\)\}.*O=(1|5)_AP=valid_netPRC=f16.*)"), + std::regex(R"(.*moke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference.*O=16_AP=valid_netPRC=f16.*)"), + std::regex(R"(.*smoke_DeformableConvolution2D_ExplicitPadding/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=explicit_BI_PAD=0_MODULATION=1_netPRC=f16.*)"), + std::regex(R"(.*smoke_DeformableConvolution2D_AutoPadValid/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=valid_BI_PAD=0_MODULATION=1_netPRC=f16.*)"), + std::regex(R"(.*smoke_DeformableConvolution2D_DeformableGroups_ExplicitPadding/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=explicit_BI_PAD=(0|1)_MODULATION=(0|1)_netPRC=f16.*)"), + std::regex(R"(.*smoke_DeformableConvolution2D_SingleTestCase/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=explicit_BI_PAD=(0|1)_MODULATION=(0|1)_netPRC=f16.*)"), + std::regex(R"(.*smoke_DeformableConvolution2D_MultipleGroup.*/DeformableConvolutionLayerTest.Inference.*O=(1|5)_AP=explicit_BI_PAD=(0|1)_MODULATION=(0|1)_netPRC=f16.*)"), + std::regex(R"(.*smoke_DFT_5d/DFTLayerTest.Inference/IS=\(\[\]\)_TS=\{\(10.4.8.2.2\)\}_Precision=f32_Axes=\(0.1.2.3\)_signal_size=\(\)_Inverse=0.*)"), + std::regex(R"(.*smoke_DFT_6d/DFTLayerTest.Inference/IS=\(\[\]\)_TS=\{\(10.4.8.2.5.2\)\}_Precision=f32_Axes=\(0.1.2.3.4\)_signal_.*_Inverse=0.*)"), + std::regex(R"(.*smoke_ConvolutionLayerGPUTest_ExplicitPad1D/ConvolutionLayerGPUTestDynamic.*netPRC=f16.*)"), + std::regex(R"(.*smoke_MVN_5D/Mvn6LayerTest.Inference/.*ModelType=f16.*_Ax=\(2.3.4\).*)"), + std::regex(R"(.*smoke_MVN_5D/Mvn6LayerTest.Inference/.*ModelType=f32.*_Ax=\(2.3.4\).*NormVariance=FALSE.*)"), + std::regex(R"(.*smoke_MVN_4D/Mvn6LayerTest.Inference/.*TS=\{\(1.10.5.17\)\}.*_ModelType=f16.*Ax=\(2.3\).*)"), + std::regex(R"(.*smoke_MVN_4D/Mvn6LayerTest.Inference/.*TS=\{\(1.3.8.9\)\}.*_ModelType=f16.*Ax=\((2.3|1.2.3)\).*)"), + std::regex(R"(.*smoke_MVN_3D/Mvn6LayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.32.17\)\}_ModelType=f16_AxType=(i64|i32)_Ax=\((1.2|2)\).*)"), + std::regex(R"(.*smoke_MVN_2D/Mvn6LayerTest.Inference.*TS=\{\(2.55\)\}_ModelType=f32_.*)"), + std::regex(R"(.*smoke_Decomposition_6D/Mvn6LayerTest.Inference.*ModelType=(f16|f32).*Ax=\(0.1.2\).*)"), + std::regex(R"(.*smoke_Decomposition_6D/Mvn6LayerTest.Inference.*ModelType=(f16|f32).*Ax=\(0.1.5\).*)"), + std::regex(R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference.*ModelType=f16.*Ax=\(1\).*)"), + std::regex(R"(.*smoke_CTCLoss_Set2/CTCLossLayerTest.Inference/.*_LL=\(6.5.6\)_A=\(2.1.5.3.2.6\)\(3.3.3.3.3.3\)\(6.5.6.5.6.5\)_.*_BI=7_.*_CMR=1_U=1_PF=f16.*)"), + std::regex(R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference/.*precision=f32.*)"), + std::regex(R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference/IS=\(\[\]_\)_TS=\(\(1.2.6\)\)_input_precision=f16.*)"), + std::regex(R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference/IS=\(\[\]_\)_TS=\(\(1.2.18\)\)_input_precision=f16.*)"), + std::regex(R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference_cached/IS=\(\[\?.\?.96\]_\)_TS=\(\(1.4.96\)\)_input_precision=f32.*)"), + std::regex(R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference_cached/IS=\(\[\?.\?.\?\]_\)_TS=\(\(1.2.16\)\)_input_precision=f32.*)"), + std::regex(R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference_cached/IS=\(\[\]_\)_TS=\(\(1.2.6\)\)_input_precision=(f16|f32).*)"), + std::regex(R"(.*smoke_RMSNormDecomposition_basic/RMSNormDecomposition.Inference_cached/IS=\(\[\]_\)_TS=\(\(1.2.18\)\)_input_precision=(f16|f32).*)"), + std::regex(R"(.*smoke_MM_Static_OneDNN/MatMulLayerGPUTest.Inference.*input_type=PARAMETER_netPRC=f16.*)"), + std::regex(R"(.*smoke_Decomposition_3D/Mvn6LayerTest.Inference/.*TS=\{\(1.32.17\)\}_ModelType=f16_AxType=.*_Ax=\(0.1.2\).*)"), + std::regex(R"(.*moke_Decomposition_3D/Mvn6LayerTest.Inference.*TS=\{\(1.37.9\)\}_ModelType=f16_AxType=.*_Ax=\(1\).*)"), + std::regex(R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference/.*TS=\{\(2.19.5.10\)\}_ModelType=f32_AxType=(i32|i64)_Ax=\((0.3|3)\)_NormVariance=FALSE.*)"), + std::regex(R"(.*smoke_Decomposition_4D/Mvn6LayerTest.Inference/.*TS=\{\(2.19.5.10\)\}_ModelType=f16_AxType=(i32|i64)_Ax=\(0.3\)_NormVariance=TRUE.*)"), + std::regex(R"(.*smoke_Convolution2D_ExplicitPadding/ConvolutionLayerTest.*netPRC=f16.*)"), + std::regex(R"(.*smoke_SwiGLUFusion_basic/SwiGLUFusion.Inference.*/IS=\(\[\?.\?.96\]_\)_.*_input_precision=f16.*)"), + std::regex(R"(.*smoke_dynamic_reduce_deconv_concat/ReduceDeconvConcatDynamicGPUTest.Inference/IS=\[1.32.64.\?.\?\]_\[1.32.64.64.64\]_\[1.8.128.\?.\?.4\]_\[1.8.128.128.128.4\]_model_type=f16.*)"), + std::regex(R"(.*smoke_GPU_Dynamic/KVCacheTest.Inference.*_precision=f16.*)"), + std::regex(R"(.*smoke_dynamic_shapeof_activation_sqrt/shapeofActivationDynamicGPUTest.Inference/IS=\[\?.\?.1.64\]_\[1.3136.1.64\]_\[1.49.1.64\]_\[2.49.1.64\]_NetType=f16_targetDevice=GPU_activatioinType=23_inShape=\(\)_constantValue=\(\).*)"), + std::regex(R"(.*smoke_GroupConvolutionLayerGPUTest_dynamic2D.*/GroupConvolutionLayerGPUTestDynamic.Inference/.*_netPRC=f16.*)"), + std::regex(R"(.*smoke_(DFT|IDFT|IRDFT)_GPU_4D/DFTLayerGPUTest.CompareWithRefs.*)"), + std::regex(R"(.*smoke_RDFT_GPU_4D/DFTLayerGPUTest.CompareWithRefs/prec=(f32|f16)_IS0=\[\?.\?.\?.\?\]_TS0=\(\(1.192.36.64\)\)_IS1=\[\?\]_TS1=\(\(1\)\)_IS2=\[\?\]_TS2=\(\(1\)\).*)"), + std::regex(R"(.*smoke_ConvolutionLayerGPUTest_dynamic.*ConvolutionLayerGPUTestDynamic.*netPRC=f16.*)"), + std::regex(R"(.*smoke_NoReshape/SplitConvConcat.CompareWithRefImpl/IS=\(1.6.40.40\)_ET=f16_.*)"), + std::regex(R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.7.32\)_KS=\(1.3\)_OC=(32|64)_ET=f32.*)"), + std::regex(R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.8.16\)_KS=\(1.5\)_OC=(32|64)_ET=f32.*)"), + std::regex(R"(.*smoke_MAX_and_AVGPool_ValidPad/PoolingLayerTest.Inference.*_AvgPool_ExcludePad=0_K\(3.5\).*modelType=f16.*)"), + std::regex(R"(.*smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/.*_TS=\{\(1.4.5.6\)_\(1.4.6.4\)\}_.*_input_type=CONSTANT_modelType=f16_.*)"), + std::regex(R"(.*smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/.*_TS=\{\(4.5.6\)_\(6.3\)\}_.*_input_type=PARAMETER_modelType=f16_.*)"), + std::regex(R"(.*smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/.*_TS=\{\(9.9.9\)_\(9.9\)\}_.*_input_type=PARAMETER_modelType=f16_.*)"), + std::regex(R"(.*smoke_MatMul_FirstTranspose/MatMulLayerTest.Inference/.*_TS=\{\(100.65\)_\(100.73\)\}_.*_modelType=f16_.*)"), + std::regex(R"(.*smoke_MatMul_SecondTranspose/MatMulLayerTest.Inference/.*_TS=\{\(1.16.128\)_\(1.64.128\)\}_.*_modelType=f16_.*)"), + std::regex(R"(.*smoke_MatMul_SecondTranspose/MatMulLayerTest.Inference/.*_TS=\{\(1.64.80\)_\(1.77.80\)\}_.*_modelType=f16_.*)"), + std::regex(R"(.*smoke_MatMul_SecondTranspose/MatMulLayerTest.Inference/.*_TS=\{\(65.100\)_\(73.100\)\}_.*_modelType=f16_.*)"), + std::regex(R"(.*smoke_MatMul_BothTranspose/MatMulLayerTest.Inference/.*_TS=\{\(100.65\)_\(73.100\)\}_.*_modelType=f16_.*)"), + std::regex(R"(.*smoke_Convolution2D_ExplicitPadding/ConvolutionLayerTest.Inference/.*_TS=\{\(1.3.30.30\)\}_K\(3.5\)_.*_O=5_AP=explicit_netPRC=f16.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.3.10.10.10\)\}_.*_PE\((0.0.0|1.1.1)\)_D=\(1.1.1\)_OP=\((1.1.1|2.2.2)\)_O=16_AP=explicit_netPRC=f16_.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.32.5.5.5\)\}_.*_netPRC=f16_.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.16.5.5.5\)\}_.*_netPRC=f16_.*)"), + std::regex(R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.16.5.5.5\)\}_.*_netPRC=f16_.*)"), + std::regex(R"(.*smoke_PSROIPooling_average/PSROIPoolingLayerTest.Inference/IS=\(3.8.16.16\)_coord_shape=\(10.5\)_out_dim=2_group_size=2_scale=(0.625|1)_bins_x=1_bins_y=1_mode=average_modelType=f16.*)"), + std::regex(R"(.*smoke_RDFT_5d_last_axis/RDFTLayerTest.Inference/IS=\(10.4.8.2.5\)_modelType=f32_Axes=\(0.1.2.3.4\)_SignalSize=\(\).*)"), // Issue: 136862 - R"(.*smoke_ConditionGPUTest_static/StaticConditionLayerGPUTest.CompareWithRefs/IS=\(3.6\)_netPRC=i8_ifCond=PARAM_targetDevice=GPU_.*)", + std::regex(R"(.*smoke_ConditionGPUTest_static/StaticConditionLayerGPUTest.CompareWithRefs/IS=\(3.6\)_netPRC=i8_ifCond=PARAM_targetDevice=GPU_.*)"), // Use weight from model not from path hint - R"(.*compile_from_weightless_blob.*)", + std::regex(R"(.*compile_from_weightless_blob.*)"), #if defined(_WIN32) // by calc abs_threshold with expected value - R"(.*smoke_RemoteTensor/OVRemoteTensorBatched_Test.NV12toBGR_buffer/(num_batch_4|num_batch_2).*)", - R"(.*smoke_Check/ConstantResultSubgraphTest.Inference/SubgraphType=SINGLE_COMPONENT_IS=\[1,3,10,10\]_IT=i16_Device=GPU.*)", + std::regex(R"(.*smoke_RemoteTensor/OVRemoteTensorBatched_Test.NV12toBGR_buffer/(num_batch_4|num_batch_2).*)"), + std::regex(R"(.*smoke_Check/ConstantResultSubgraphTest.Inference/SubgraphType=SINGLE_COMPONENT_IS=\[1,3,10,10\]_IT=i16_Device=GPU.*)"), // Issue: 126388 - R"(.*smoke_outputTensorShapesForDynamicInput.*)", + std::regex(R"(.*smoke_outputTensorShapesForDynamicInput.*)"), #endif + }; + if (!isGPU1Present()) { + patterns.push_back(std::regex(R"(.*nightly_OVClassSpecificDevice0Test/OVSpecificDeviceSetConfigTest.GetConfigSpecificDeviceNoThrow/GPU.1.*)")); + patterns.push_back(std::regex(R"(.*nightly_OVClassSpecificDevice0Test/OVSpecificDeviceGetConfigTest.GetConfigSpecificDeviceNoThrow/GPU.1.*)")); + patterns.push_back(std::regex(R"(.*nightly_OVClassSpecificDevice0Test/OVSpecificDeviceTestSetConfig.SetConfigSpecificDeviceNoThrow/GPU.1.*)")); + patterns.push_back(std::regex(R"(.*nightly_OVClassSetDefaultDeviceIDPropTest/OVClassSetDefaultDeviceIDPropTest.SetDefaultDeviceIDNoThrow/0.*)")); + patterns.push_back(std::regex(R"(.*nightly_OVClassSeveralDevicesTest/OVClassSeveralDevicesTestCompileModel.CompileModelActualSeveralDevicesNoThrow/0.*)")); + } + if (immadSupported()) { + // Failure list + // Case (first 20 chars) Fail Count, Pass Count + // ---------------------------------------------------------------------- ------------------------ + // LSTMCellCommon/LSTMCellTest.Inference/decomposition0_batch=5_hidden_si [146, 622] + // LSTMCellCommon/LSTMCellTest.Inference/decomposition1_batch=5_hidden_si [132, 636] + // smoke_MatMulCompressedWeights_corner_cases_basic/MatmulWeightsDecompre [96, 288] + // smoke_MaxPool8_ExplicitPad_FloorRounding/MaxPoolingV8LayerTest.Inferen [64, 128] + // smoke_MaxPool8_ExplicitPad_CeilRounding/MaxPoolingV8LayerTest.Inferenc [32, 64] + // MatMulCompressedWeights_corner_cases_big/MatmulWeightsDecompression.In [22, 362] + // smoke_MatMulCompressedWeights_basic/MatmulWeightsDecompression.Inferen [16, 44] + // smoke_MatmulAndGatherSharedWeightsDecompression/SharedMatmulAndGatherW [14, 10] + // smoke_LoRA_HorizontalFusion/FullyConnectedHorizontalFusion.Inference/d [12, 0] + // smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition0_batch=5_hid [8, 56] + // smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition1_batch=5_hid [8, 56] + // smoke_Decomposition_3D/Mvn6LayerTest.Inference/IS=([])_TS={(1.37.9)}_M [2, 46] + // smoke_MatmulWeightsDecompressionQuantizeConvolution_basic/MatmulWeight [2, 10] + // smoke_MatMulCompressedWeights_dyn_quan/MatmulWeightsDecompression.Infe [2, 4] + // smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/IS=([]_[])_TS={(1.2 [2, 2] + // smoke_static_conv_n_dynamic_concat/ConvStaticConcatDynamicGPUTestDynam [2, 0] + // LSTMSequenceCM/LSTMSequenceGPUTest.Inference/mode=PURE_SEQ_seq_lengths [2, 0] + // smoke_GroupConvolutionLayerGPUTest_dynamic1DSymPad_Disabled/GroupConvo [2, 0] + // LSTMSequenceCommonZeroClip/LSTMSequenceGPUTest.Inference/mode=CONVERT_ [1, 323] + // LSTMSequenceCommonZeroClip/LSTMSequenceGPUTest.Inference/mode=PURE_SEQ [1, 323] + // smoke_ScaledAttnStatic_GPU/ScaledAttnLayerGPUTest.CompareWithRefs/netP [1, 63] + // smoke_FC_3D/MatMulLayerGPUTest.Inference/IS=[]_[]_TS=((1.429))_((1.429 [1, 1] + // Inference_without_convert/BF16WeightsDecompression.Inference_without_c [1, 1] + // smoke_ConvolutionLayerGPUTest_3D_tensor_basic/ConvolutionLayerGPUTest. [1, 0] + patterns.push_back(std::regex(R"(.*smoke_MatMulCompressedWeights_corner_cases_basic/MatmulWeightsDecompre.*)")); + patterns.push_back(std::regex(R"(.*smoke_MaxPool8_ExplicitPad_FloorRounding/MaxPoolingV8LayerTest.Inferen.*)")); + patterns.push_back(std::regex(R"(.*smoke_MaxPool8_ExplicitPad_CeilRounding/MaxPoolingV8LayerTest.Inferenc.*)")); + patterns.push_back(std::regex(R"(.*smoke_MatMulCompressedWeights_basic/MatmulWeightsDecompression.Inferen.*)")); + patterns.push_back(std::regex(R"(.*smoke_MatmulAndGatherSharedWeightsDecompression/SharedMatmulAndGatherW.*)")); + patterns.push_back(std::regex(R"(.*smoke_LoRA_HorizontalFusion/FullyConnectedHorizontalFusion.Inference/d.*)")); + patterns.push_back(std::regex(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition0_batch=5_hid.*)")); + patterns.push_back(std::regex(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition1_batch=5_hid.*)")); + patterns.push_back(std::regex(R"(.*smoke_Decomposition_3D/Mvn6LayerTest.Inference/IS=.*)")); + patterns.push_back(std::regex(R"(.*smoke_MatmulWeightsDecompressionQuantizeConvolution_basic/MatmulWeight.*)")); + patterns.push_back(std::regex(R"(.*smoke_MatMulCompressedWeights_dyn_quan/MatmulWeightsDecompression.Infe.*)")); + patterns.push_back(std::regex(R"(.*smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/IS=.*)")); + patterns.push_back(std::regex(R"(.*smoke_GroupConvolutionLayerGPUTest_dynamic1DSymPad_Disabled/GroupConvo.*)")); + patterns.push_back(std::regex(R"(.*smoke_static_conv_n_dynamic_concat/ConvStaticConcatDynamicGPUTestDynam.*)")); + patterns.push_back(std::regex(R"(.*smoke_ScaledAttnStatic_GPU/ScaledAttnLayerGPUTest.CompareWithRefs/netP.*)")); + patterns.push_back(std::regex(R"(.*smoke_FC_3D/MatMulLayerGPUTest.Inference/.*)")); + patterns.push_back(std::regex(R"(.*smoke_ConvolutionLayerGPUTest_3D_tensor_basic/ConvolutionLayerGPUTest..*)")); + patterns.push_back(std::regex(R"(.*smoke_MatmulWeightsDecompressionQuantizeConvolution_basic.*)")); + patterns.push_back(std::regex(R"(.*smoke_Nms9LayerTest/Nms9LayerTest.Inference/num_batches=2_num_boxes=50.*)")); + } else { + // CVS-172342 + patterns.push_back(std::regex(R"(.*smoke_MatMulCompressedWeights_3D_weight.*)")); + } + + return patterns; }; - if (!isGPU1Present()) { - returnVal.push_back(R"(.*nightly_OVClassSpecificDevice0Test/OVSpecificDeviceSetConfigTest.GetConfigSpecificDeviceNoThrow/GPU.1.*)"); - returnVal.push_back(R"(.*nightly_OVClassSpecificDevice0Test/OVSpecificDeviceGetConfigTest.GetConfigSpecificDeviceNoThrow/GPU.1.*)"); - returnVal.push_back(R"(.*nightly_OVClassSpecificDevice0Test/OVSpecificDeviceTestSetConfig.SetConfigSpecificDeviceNoThrow/GPU.1.*)"); - returnVal.push_back(R"(.*nightly_OVClassSetDefaultDeviceIDPropTest/OVClassSetDefaultDeviceIDPropTest.SetDefaultDeviceIDNoThrow/0.*)"); - returnVal.push_back(R"(.*nightly_OVClassSeveralDevicesTest/OVClassSeveralDevicesTestCompileModel.CompileModelActualSeveralDevicesNoThrow/0.*)"); - } - if (immadSupported()) { - // Failure list - // Case (first 20 chars) Fail Count, Pass Count - // ---------------------------------------------------------------------- ------------------------ - // LSTMCellCommon/LSTMCellTest.Inference/decomposition0_batch=5_hidden_si [146, 622] - // LSTMCellCommon/LSTMCellTest.Inference/decomposition1_batch=5_hidden_si [132, 636] - // smoke_MatMulCompressedWeights_corner_cases_basic/MatmulWeightsDecompre [96, 288] - // smoke_MaxPool8_ExplicitPad_FloorRounding/MaxPoolingV8LayerTest.Inferen [64, 128] - // smoke_MaxPool8_ExplicitPad_CeilRounding/MaxPoolingV8LayerTest.Inferenc [32, 64] - // MatMulCompressedWeights_corner_cases_big/MatmulWeightsDecompression.In [22, 362] - // smoke_MatMulCompressedWeights_basic/MatmulWeightsDecompression.Inferen [16, 44] - // smoke_MatmulAndGatherSharedWeightsDecompression/SharedMatmulAndGatherW [14, 10] - // smoke_LoRA_HorizontalFusion/FullyConnectedHorizontalFusion.Inference/d [12, 0] - // smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition0_batch=5_hid [8, 56] - // smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition1_batch=5_hid [8, 56] - // smoke_Decomposition_3D/Mvn6LayerTest.Inference/IS=([])_TS={(1.37.9)}_M [2, 46] - // smoke_MatmulWeightsDecompressionQuantizeConvolution_basic/MatmulWeight [2, 10] - // smoke_MatMulCompressedWeights_dyn_quan/MatmulWeightsDecompression.Infe [2, 4] - // smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/IS=([]_[])_TS={(1.2 [2, 2] - // smoke_static_conv_n_dynamic_concat/ConvStaticConcatDynamicGPUTestDynam [2, 0] - // LSTMSequenceCM/LSTMSequenceGPUTest.Inference/mode=PURE_SEQ_seq_lengths [2, 0] - // smoke_GroupConvolutionLayerGPUTest_dynamic1DSymPad_Disabled/GroupConvo [2, 0] - // LSTMSequenceCommonZeroClip/LSTMSequenceGPUTest.Inference/mode=CONVERT_ [1, 323] - // LSTMSequenceCommonZeroClip/LSTMSequenceGPUTest.Inference/mode=PURE_SEQ [1, 323] - // smoke_ScaledAttnStatic_GPU/ScaledAttnLayerGPUTest.CompareWithRefs/netP [1, 63] - // smoke_FC_3D/MatMulLayerGPUTest.Inference/IS=[]_[]_TS=((1.429))_((1.429 [1, 1] - // Inference_without_convert/BF16WeightsDecompression.Inference_without_c [1, 1] - // smoke_ConvolutionLayerGPUTest_3D_tensor_basic/ConvolutionLayerGPUTest. [1, 0] - returnVal.push_back(R"(.*smoke_MatMulCompressedWeights_corner_cases_basic/MatmulWeightsDecompre.*)"); - returnVal.push_back(R"(.*smoke_MaxPool8_ExplicitPad_FloorRounding/MaxPoolingV8LayerTest.Inferen.*)"); - returnVal.push_back(R"(.*smoke_MaxPool8_ExplicitPad_CeilRounding/MaxPoolingV8LayerTest.Inferenc.*)"); - returnVal.push_back(R"(.*smoke_MatMulCompressedWeights_basic/MatmulWeightsDecompression.Inferen.*)"); - returnVal.push_back(R"(.*smoke_MatmulAndGatherSharedWeightsDecompression/SharedMatmulAndGatherW.*)"); - returnVal.push_back(R"(.*smoke_LoRA_HorizontalFusion/FullyConnectedHorizontalFusion.Inference/d.*)"); - returnVal.push_back(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition0_batch=5_hid.*)"); - returnVal.push_back(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition1_batch=5_hid.*)"); - returnVal.push_back(R"(.*smoke_Decomposition_3D/Mvn6LayerTest.Inference/IS=.*)"); - returnVal.push_back(R"(.*smoke_MatmulWeightsDecompressionQuantizeConvolution_basic/MatmulWeight.*)"); - returnVal.push_back(R"(.*smoke_MatMulCompressedWeights_dyn_quan/MatmulWeightsDecompression.Infe.*)"); - returnVal.push_back(R"(.*smoke_MatMul_NoTranspose/MatMulLayerTest.Inference/IS=.*)"); - returnVal.push_back(R"(.*smoke_GroupConvolutionLayerGPUTest_dynamic1DSymPad_Disabled/GroupConvo.*)"); - returnVal.push_back(R"(.*smoke_static_conv_n_dynamic_concat/ConvStaticConcatDynamicGPUTestDynam.*)"); - returnVal.push_back(R"(.*smoke_ScaledAttnStatic_GPU/ScaledAttnLayerGPUTest.CompareWithRefs/netP.*)"); - returnVal.push_back(R"(.*smoke_FC_3D/MatMulLayerGPUTest.Inference/.*)"); - returnVal.push_back(R"(.*smoke_ConvolutionLayerGPUTest_3D_tensor_basic/ConvolutionLayerGPUTest..*)"); - returnVal.push_back(R"(.*smoke_MatmulWeightsDecompressionQuantizeConvolution_basic.*)"); - returnVal.push_back(R"(.*smoke_Nms9LayerTest/Nms9LayerTest.Inference/num_batches=2_num_boxes=50.*)"); - } else { - // CVS-172342 - returnVal.push_back(R"(.*smoke_MatMulCompressedWeights_3D_weight.*)"); - } - return returnVal; + + const static std::vector patterns = get_patterns(); + return patterns; } diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp index 58ec3c5c16a25c..d413277b959455 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp @@ -173,7 +173,7 @@ class KVCacheTests: public ::testing::Test { auto compare_tensors = [&model, &inference_precision](const std::vector expected, const std::vector& actual) { ASSERT_EQ(expected.size(), actual.size()); ASSERT_EQ(expected.size(), model->get_results().size()); - auto compareMap = ov::test::utils::getCompareMap(); + const auto& compareMap = ov::test::utils::getCompareMap(); const auto& results = model->get_results(); for (size_t j = 0; j < results.size(); j++) { const auto result = results[j]; @@ -484,7 +484,7 @@ class KVCacheTests: public ::testing::Test { auto compare_tensors = [&model, &inference_precision](const std::vector expected, const std::vector& actual) { ASSERT_EQ(expected.size(), actual.size()); ASSERT_EQ(expected.size(), model->get_results().size()); - auto compareMap = ov::test::utils::getCompareMap(); + const auto& compareMap = ov::test::utils::getCompareMap(); const auto& results = model->get_results(); for (size_t j = 0; j < results.size(); j++) { const auto result = results[j]; diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp index a6d5f93198ec0c..102accd4418296 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp @@ -160,7 +160,7 @@ class SDPAWithKVCacheTest : public ::testing::Test, public ::testing::WithParamI auto compare_tensors = [&model, &inference_precision](const std::vector expected, const std::vector& actual) { ASSERT_EQ(expected.size(), actual.size()); - auto compareMap = ov::test::utils::getCompareMap(); + const auto& compareMap = ov::test::utils::getCompareMap(); for (size_t i = 0; i < expected.size(); i++) { auto it = compareMap.find(ov::op::v13::ScaledDotProductAttention::get_type_info_static()); ASSERT_NE(it, compareMap.end()); diff --git a/src/plugins/intel_npu/tests/functional/behavior/ov_infer_request/compile_and_infer.hpp b/src/plugins/intel_npu/tests/functional/behavior/ov_infer_request/compile_and_infer.hpp index 31d464e472b9be..57208e2fd30c25 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/ov_infer_request/compile_and_infer.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/ov_infer_request/compile_and_infer.hpp @@ -102,7 +102,7 @@ class OVCompileAndInferRequest : public testing::WithParamInterfaceGetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); } diff --git a/src/plugins/intel_npu/tests/functional/behavior/ov_plugin/life_time.hpp b/src/plugins/intel_npu/tests/functional/behavior/ov_plugin/life_time.hpp index 9424ee75993a29..16b8cdc4b861c1 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/ov_plugin/life_time.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/ov_plugin/life_time.hpp @@ -143,7 +143,7 @@ TEST_P(OVHoldersTestNPU, LoadedAny) { } TEST_P(OVHoldersTestNPU, LoadedRemoteContext) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::RemoteContext ctx; { diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dma_buf_remote_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dma_buf_remote_run.hpp index 18e92602e1084f..99474ba53f3bac 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dma_buf_remote_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dma_buf_remote_run.hpp @@ -112,7 +112,7 @@ class DmaBufRemoteRunTests : public ov::test::behavior::OVPluginTestBase, }; TEST_P(DmaBufRemoteRunTests, CheckRemoteTensorSharedBuf) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -145,7 +145,7 @@ TEST_P(DmaBufRemoteRunTests, CheckRemoteTensorSharedBuf) { } TEST_P(DmaBufRemoteRunTests, CheckRemoteTensorSharedBuChangingTensors) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -199,7 +199,7 @@ TEST_P(DmaBufRemoteRunTests, CheckRemoteTensorSharedBuChangingTensors) { } TEST_P(DmaBufRemoteRunTests, CheckOutputDataFromMultipleRuns) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dx12_remote_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dx12_remote_run.hpp index 01a7ce6afa8c43..a85f94b377eb41 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dx12_remote_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dx12_remote_run.hpp @@ -257,7 +257,7 @@ class DX12RemoteRunTests : public ov::test::behavior::OVPluginTestBase, }; TEST_P(DX12RemoteRunTests, CheckRemoteTensorSharedBuf) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -283,7 +283,7 @@ TEST_P(DX12RemoteRunTests, CheckRemoteTensorSharedBuf) { } TEST_P(DX12RemoteRunTests, CheckRemoteTensorSharedBuChangingTensors) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -330,7 +330,7 @@ TEST_P(DX12RemoteRunTests, CheckRemoteTensorSharedBuChangingTensors) { } TEST_P(DX12RemoteRunTests, CheckOutputDataFromMultipleRuns) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp index e00fdf583850a6..8caeab502de558 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp @@ -116,7 +116,7 @@ class RemoteRunTests : public ov::test::behavior::OVPluginTestBase, }; TEST_P(RemoteRunTests, CheckIsContinuousHostTensorScalar) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() auto zero_context = core->get_default_context(target_device); @@ -132,7 +132,7 @@ TEST_P(RemoteRunTests, CheckIsContinuousHostTensorScalar) { } TEST_P(RemoteRunTests, CheckIsContinuousHostTensor1Dimension) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() auto zero_context = core->get_default_context(target_device); @@ -151,7 +151,7 @@ TEST_P(RemoteRunTests, CheckIsContinuousHostTensor1Dimension) { } TEST_P(RemoteRunTests, CheckIsContinuousHostTensor2Dimensions) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() auto zero_context = core->get_default_context(target_device); @@ -176,7 +176,7 @@ TEST_P(RemoteRunTests, CheckIsContinuousHostTensor2Dimensions) { } TEST_P(RemoteRunTests, CheckIsContinuousHostTensor3Dimensions) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() auto zero_context = core->get_default_context(target_device); @@ -204,7 +204,7 @@ TEST_P(RemoteRunTests, CheckIsContinuousHostTensor3Dimensions) { } TEST_P(RemoteRunTests, CheckIsContinuousHostTensor4Dimensions) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() auto zero_context = core->get_default_context(target_device); @@ -244,7 +244,7 @@ TEST_P(RemoteRunTests, CheckIsContinuousHostTensor4Dimensions) { } TEST_P(RemoteRunTests, CheckRemoteTensorInternalBuf) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::InferRequest inference_request; ov::CompiledModel compiled_model; @@ -627,7 +627,7 @@ TEST_P(RemoteRunTests, ImportCpuVAUsingStandardRemoteTensorAPI) { } TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContext) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::InferRequest inference_request; @@ -652,7 +652,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContext) { } TEST_P(RemoteRunTests, CheckRemoteTensorSetOnlyTensorType) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -668,7 +668,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorSetOnlyTensorType) { } TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContextandChangedInTensor) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -694,7 +694,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContextandChange } TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContextandChangedInTensorExpectToFail) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -713,7 +713,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContextandChange } TEST_P(RemoteRunTests, CheckImportModelPath) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -742,7 +742,7 @@ TEST_P(RemoteRunTests, CheckImportModelPath) { } TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufChangingTensors) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; ov::InferRequest inference_request; @@ -784,7 +784,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufChangingTensors) { } TEST_P(RemoteRunTests, CheckOutputDataFromTwoRuns) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -828,7 +828,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRuns) { } TEST_P(RemoteRunTests, CheckOutputDataFromRemoteTensorFromDifferentContext) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -883,7 +883,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromRemoteTensorFromDifferentContext) { } TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors1) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -935,7 +935,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors1) { } TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors2) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -984,7 +984,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors2) { } TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors3) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -1024,7 +1024,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors3) { } TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensorsHostTensor1) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -1053,7 +1053,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensorsHostTensor1) } TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensorsHostTensor2) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -1094,7 +1094,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensorsHostTensor2) } TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensors) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -1192,7 +1192,7 @@ TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensors) { } TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensorsWithRemoteTensors) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::CompiledModel compiled_model; @@ -1290,7 +1290,7 @@ TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensorsWithRemoteTensors) { } TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTensors0) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() testing::internal::Random random(1); @@ -1379,7 +1379,7 @@ TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTens } TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTensors1) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() testing::internal::Random random(1); @@ -1469,7 +1469,7 @@ TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTens } TEST_P(RemoteRunTests, CheckContextFromDifferentOvCores) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::Core core0; ov::Core core1; @@ -1483,7 +1483,7 @@ TEST_P(RemoteRunTests, CheckContextFromDifferentOvCores) { } TEST_P(RemoteRunTests, CheckContextFromDifferentDestroyedOvCores) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::RemoteContext context1, context2; diff --git a/src/plugins/intel_npu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_npu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 8c44c493e73da6..4d8112b3d17e69 100644 --- a/src/plugins/intel_npu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_npu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -229,9 +229,9 @@ bool categoryRuleEnabler(const std::string& category, return false; } -std::vector disabledTestPatterns(); +const std::vector& disabled_test_patterns(); -std::vector disabledTestPatterns() { +const std::vector& disabled_test_patterns() { // Initialize skip registry static const auto skipRegistry = []() { SkipRegistry _skipRegistry; @@ -293,9 +293,11 @@ std::vector disabledTestPatterns() { }(); // clang-format on - std::vector matchingPatterns; + static std::vector patterns; + std::vector matching_patterns; const auto currentTestName = getCurrentTestName(); - matchingPatterns.emplace_back(skipRegistry.getMatchingPattern(currentTestName)); + matching_patterns.emplace_back(skipRegistry.getMatchingPattern(currentTestName)); + patterns = matching_patterns; - return matchingPatterns; + return patterns; } diff --git a/src/plugins/template/tests/functional/skip_tests_config.cpp b/src/plugins/template/tests/functional/skip_tests_config.cpp index 62533e85f1f6f3..6998128b5c6802 100644 --- a/src/plugins/template/tests/functional/skip_tests_config.cpp +++ b/src/plugins/template/tests/functional/skip_tests_config.cpp @@ -9,115 +9,119 @@ #include "openvino/core/core_visibility.hpp" -std::vector disabledTestPatterns() { - std::vector retVector{ +const std::vector& disabled_test_patterns() { + const static std::vector patterns{ // unsupported metrics - R"(.*smoke_OVGetMetricPropsTest.*OVGetMetricPropsTest.*(RANGE_FOR_STREAMS|MAX_BATCH_SIZE).*)", + std::regex(R"(.*smoke_OVGetMetricPropsTest.*OVGetMetricPropsTest.*(RANGE_FOR_STREAMS|MAX_BATCH_SIZE).*)"), // CVS-64094 - R"(.*ReferenceLogSoftmaxLayerTest.*4.*iType=f16.*axis=.*1.*)", + std::regex(R"(.*ReferenceLogSoftmaxLayerTest.*4.*iType=f16.*axis=.*1.*)"), // CVS-64012 - R"(.*ReferenceDeformableConvolutionLayerTest.*f16.*real_offset_padding_stride_dialation.*)", - R"(.*ReferenceDeformableConvolutionLayerTest.*bf16.*)", - R"(.*ReferenceDeformableConvolutionV8LayerTest.*f16.*real_offset_padding_stride_dialation.*)", - R"(.*ReferenceDeformableConvolutionV8LayerTest.*bf16.*)", - R"(.*ReferenceDeformableConvolutionV8LayerTest.*f64.*mask.*)", + std::regex(R"(.*ReferenceDeformableConvolutionLayerTest.*f16.*real_offset_padding_stride_dialation.*)"), + std::regex(R"(.*ReferenceDeformableConvolutionLayerTest.*bf16.*)"), + std::regex(R"(.*ReferenceDeformableConvolutionV8LayerTest.*f16.*real_offset_padding_stride_dialation.*)"), + std::regex(R"(.*ReferenceDeformableConvolutionV8LayerTest.*bf16.*)"), + std::regex(R"(.*ReferenceDeformableConvolutionV8LayerTest.*f64.*mask.*)"), // CVS-63973 - R"(.*ReferencePSROIPoolingLayerTest.*bf16.*)", + std::regex(R"(.*ReferencePSROIPoolingLayerTest.*bf16.*)"), // CVS-63977 - R"(.*ReferenceProposalV1LayerTest.*f16.*)", + std::regex(R"(.*ReferenceProposalV1LayerTest.*f16.*)"), // CVS-64082 - R"(.*ReferenceProposalV4LayerTest.*f16.*)", + std::regex(R"(.*ReferenceProposalV4LayerTest.*f16.*)"), // CVS-64101 - R"(.*ReferenceExperimentalGPLayerTest.*bf16.*)", + std::regex(R"(.*ReferenceExperimentalGPLayerTest.*bf16.*)"), // CVS-64105 - R"(.*ReferenceGatherElementsTestNegative.*)", + std::regex(R"(.*ReferenceGatherElementsTestNegative.*)"), // CVS-64052 - R"(.*ReferenceStridedSliceLayerTest.*strided_slice_stride_optional_dynamic)", + std::regex(R"(.*ReferenceStridedSliceLayerTest.*strided_slice_stride_optional_dynamic)"), // CVS-64017 - R"(.*ReferenceGatherTest.*dType=i16.*)", - R"(.*ReferenceGatherTest.*dType=u16.*)", - R"(.*ReferenceGatherTest.*dType=bf16.*)", - R"(.*ReferenceGatherTest.*dType=f64.*)", + std::regex(R"(.*ReferenceGatherTest.*dType=i16.*)"), + std::regex(R"(.*ReferenceGatherTest.*dType=u16.*)"), + std::regex(R"(.*ReferenceGatherTest.*dType=bf16.*)"), + std::regex(R"(.*ReferenceGatherTest.*dType=f64.*)"), // CVS-64110 - R"(.*ReferenceGatherTestV7.*dType=i16.*)", - R"(.*ReferenceGatherTestV7.*dType=u16.*)", - R"(.*ReferenceGatherTestV7.*dType=bf16.*)", - R"(.*ReferenceGatherTestV7.*dType=f64.*)", + std::regex(R"(.*ReferenceGatherTestV7.*dType=i16.*)"), + std::regex(R"(.*ReferenceGatherTestV7.*dType=u16.*)"), + std::regex(R"(.*ReferenceGatherTestV7.*dType=bf16.*)"), + std::regex(R"(.*ReferenceGatherTestV7.*dType=f64.*)"), // CVS-64037 - R"(.*ReferencePadTest.*pad_exterior_2d_0x0)", - R"(.*ReferencePadTest.*pad_exterior_2d_0x3)", - R"(.*ReferencePadTest.*pad_exterior_2d_3x0)", + std::regex(R"(.*ReferencePadTest.*pad_exterior_2d_0x0)"), + std::regex(R"(.*ReferencePadTest.*pad_exterior_2d_0x3)"), + std::regex(R"(.*ReferencePadTest.*pad_exterior_2d_3x0)"), // CVS-70975 - R"(.*ReferencePadTestParamsTooLarge.*)", + std::regex(R"(.*ReferencePadTestParamsTooLarge.*)"), // CVS-64113 - R"(.*ReferenceRollLayerTest.*dType=i4.*)", - R"(.*ReferenceRollLayerTest.*dType=u4.*)", + std::regex(R"(.*ReferenceRollLayerTest.*dType=i4.*)"), + std::regex(R"(.*ReferenceRollLayerTest.*dType=u4.*)"), // CVS-64066 - R"(.*ReferenceGRUCellTestHardsigmoidActivationFunction.*gru_cell_hardsigmoid_activation_function)", + std::regex(R"(.*ReferenceGRUCellTestHardsigmoidActivationFunction.*gru_cell_hardsigmoid_activation_function)"), // CVS-71381 - R"(.*ReferenceExpLayerTest.*u32.*)", - R"(.*ReferenceExpLayerTest.*u64.*)", + std::regex(R"(.*ReferenceExpLayerTest.*u32.*)"), + std::regex(R"(.*ReferenceExpLayerTest.*u64.*)"), // CVS-64054 - R"(.*ReferenceTopKTest.*aType=i8.*)", - R"(.*ReferenceTopKTest.*aType=i16.*)", - R"(.*ReferenceTopKTest.*aType=u8.*)", - R"(.*ReferenceTopKTest.*aType=u16.*)", - R"(.*ReferenceTopKTest.*aType=bf16.*)", - R"(.*ReferenceTopKTest.*aType=f64.*)", + std::regex(R"(.*ReferenceTopKTest.*aType=i8.*)"), + std::regex(R"(.*ReferenceTopKTest.*aType=i16.*)"), + std::regex(R"(.*ReferenceTopKTest.*aType=u8.*)"), + std::regex(R"(.*ReferenceTopKTest.*aType=u16.*)"), + std::regex(R"(.*ReferenceTopKTest.*aType=bf16.*)"), + std::regex(R"(.*ReferenceTopKTest.*aType=f64.*)"), // CVS-63947 - R"(.*ReferenceConcatTest.*concat_zero_.*)", + std::regex(R"(.*ReferenceConcatTest.*concat_zero_.*)"), // CVS-64102 - R"(.*ReferenceExperimentalPGGLayerTest.*iType=bf16.*stride_x=(32|64).*)", + std::regex(R"(.*ReferenceExperimentalPGGLayerTest.*iType=bf16.*stride_x=(32|64).*)"), // New plugin API doesn't support legacy NV12 I420 preprocessing - R"(.*ConvertNV12WithLegacyTest.*)", - R"(.*ConvertI420WithLegacyTest.*)", + std::regex(R"(.*ConvertNV12WithLegacyTest.*)"), + std::regex(R"(.*ConvertI420WithLegacyTest.*)"), // Plugin version was changed to ov::Version - R"(.*VersionTest.*pluginCurrentVersionIsCorrect.*)", + std::regex(R"(.*VersionTest.*pluginCurrentVersionIsCorrect.*)"), // New plugin API doesn't support changes of pre-processing - R"(.*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", - R"(.*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", + std::regex(R"(.*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)"), + std::regex(R"(.*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)"), // New plugin work with tensors, so it means that blob in old API can have different pointers - R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", - R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)", - R"(.*InferRequestIOBBlobTest.*secondCallGetInputAfterInferSync.*)", - R"(.*InferRequestIOBBlobTest.*secondCallGetOutputAfterInferSync.*)", + std::regex(R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)"), + std::regex(R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)"), + std::regex(R"(.*InferRequestIOBBlobTest.*secondCallGetInputAfterInferSync.*)"), + std::regex(R"(.*InferRequestIOBBlobTest.*secondCallGetOutputAfterInferSync.*)"), // Old API cannot deallocate tensor - R"(.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)", + std::regex(R"(.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)"), // Why query state should throw an exception - R"(.*InferRequestQueryStateExceptionTest.*inferreq_smoke_QueryState_ExceptionTest.*)", - R"(.*OVInferRequestCheckTensorPrecision.*get(Input|Output|Inputs|Outputs)From.*FunctionWith(Single|Several).*type=(u4|u1|i4|boolean).*)", - R"(.*LSTMSequence_With_Hardcoded_Refs.*ReferenceLSTMSequenceTest.*iType=f16.*)", + std::regex(R"(.*InferRequestQueryStateExceptionTest.*inferreq_smoke_QueryState_ExceptionTest.*)"), + std::regex( + R"(.*OVInferRequestCheckTensorPrecision.*get(Input|Output|Inputs|Outputs)From.*FunctionWith(Single|Several).*type=(u4|u1|i4|boolean).*)"), + std::regex(R"(.*LSTMSequence_With_Hardcoded_Refs.*ReferenceLSTMSequenceTest.*iType=f16.*)"), // Interpreter backend doesn't implement evaluate method for OP Multiply (by GroupNormalizationDecomposition) - R"(.*ReferenceGroupNormalization.*_f64*)", + std::regex(R"(.*ReferenceGroupNormalization.*_f64*)"), // Precision not high enough to get exact result for the complex test cases // (both tiny values and very high values necessary) - R"(.*ReferenceInverse.*bf16.*[4,4].*)", + std::regex(R"(.*ReferenceInverse.*bf16.*[4,4].*)"), // model import is not supported - R"(.*OVCompiledModelBaseTest.import_from_.*)"}; + std::regex(R"(.*OVCompiledModelBaseTest.import_from_.*)"), #ifdef _WIN32 - // CVS-63989 - retVector.emplace_back(R"(.*ReferenceSigmoidLayerTest.*u64.*)"); - // CVS-120988 - retVector.emplace_back(R"(.*ReferenceTopKTest.*topk_max_sort_none)"); - retVector.emplace_back(R"(.*ReferenceTopKTest.*topk_min_sort_none)"); + // CVS-63989 + std::regex(R"(.*ReferenceSigmoidLayerTest.*u64.*)"), + // CVS-120988 + std::regex(R"(.*ReferenceTopKTest.*topk_max_sort_none)"), + std::regex(R"(.*ReferenceTopKTest.*topk_min_sort_none)"), #endif #if defined(__APPLE__) && defined(OPENVINO_ARCH_X86_64) - // CVS-120988 - retVector.emplace_back(R"(.*ReferenceTopKTest.*aType=(u32|u64).*topk_(max|min)_sort_none)"); - retVector.emplace_back(R"(.*ReferenceTopKTest.*aType=(i32|i64|f16|f32).*topk_min_sort_none)"); + // CVS-120988 + std::regex(R"(.*ReferenceTopKTest.*aType=(u32|u64).*topk_(max|min)_sort_none)"), + std::regex(R"(.*ReferenceTopKTest.*aType=(i32|i64|f16|f32).*topk_min_sort_none)"), #endif #if defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) - retVector.emplace_back(R"(.*smoke_TopK_With_Hardcoded_Refs/ReferenceTopKTestMaxMinSort.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_TopK_With_Hardcoded_Refs/ReferenceTopKTestBackend.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_TopK_With_Hardcoded_Refs/ReferenceTopKTestMaxMinSortV3.CompareWithRefs.*)"); - retVector.emplace_back(R"(.*smoke_TopK_With_Hardcoded_Refs/ReferenceTopKTestBackendV3.CompareWithRefs.*)"); - // fails only on Linux arm64 - retVector.emplace_back( - R"(.*ReferenceConversionLayerTest.CompareWithHardcodedRefs/conversionType=(Convert|ConvertLike)_shape=.*_iType=(f16|f32|bf16)_oType=u4.*)"); + std::regex(R"(.*smoke_TopK_With_Hardcoded_Refs/ReferenceTopKTestMaxMinSort.CompareWithRefs.*)"), + std::regex(R"(.*smoke_TopK_With_Hardcoded_Refs/ReferenceTopKTestBackend.CompareWithRefs.*)"), + std::regex(R"(.*smoke_TopK_With_Hardcoded_Refs/ReferenceTopKTestMaxMinSortV3.CompareWithRefs.*)"), + std::regex(R"(.*smoke_TopK_With_Hardcoded_Refs/ReferenceTopKTestBackendV3.CompareWithRefs.*)"), + // fails only on Linux arm64 + + std::regex( + R"(.*ReferenceConversionLayerTest.CompareWithHardcodedRefs/conversionType=(Convert|ConvertLike)_shape=.*_iType=(f16|f32|bf16)_oType=u4.*)"), #endif - return retVector; + }; + + return patterns; } diff --git a/src/tests/functional/base_func_tests/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/base_func_tests/include/behavior/compiled_model/compiled_model_base.hpp index ffba72498830d8..8a3a63bc279567 100644 --- a/src/tests/functional/base_func_tests/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/base_func_tests/include/behavior/compiled_model/compiled_model_base.hpp @@ -94,7 +94,7 @@ class OVCompiledModelBaseTest : public testing::WithParamInterfaceGetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(); diff --git a/src/tests/functional/base_func_tests/include/behavior/ov_infer_request/properties_tests.hpp b/src/tests/functional/base_func_tests/include/behavior/ov_infer_request/properties_tests.hpp index 91651cfc7bdf42..e2f04f354b4e92 100644 --- a/src/tests/functional/base_func_tests/include/behavior/ov_infer_request/properties_tests.hpp +++ b/src/tests/functional/base_func_tests/include/behavior/ov_infer_request/properties_tests.hpp @@ -22,7 +22,7 @@ class InferRequestPropertiesTest : public testing::WithParamInterfaceGetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); // Create model diff --git a/src/tests/functional/base_func_tests/include/shared_test_classes/base/ov_behavior_test_utils.hpp b/src/tests/functional/base_func_tests/include/shared_test_classes/base/ov_behavior_test_utils.hpp index 7b7b5a976a6e10..7ed58e24967294 100644 --- a/src/tests/functional/base_func_tests/include/shared_test_classes/base/ov_behavior_test_utils.hpp +++ b/src/tests/functional/base_func_tests/include/shared_test_classes/base/ov_behavior_test_utils.hpp @@ -149,7 +149,7 @@ class OVInferRequestTests : public testing::WithParamInterfaceGetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); function = ov::test::behavior::getDefaultNGraphFunctionForTheDevice(); diff --git a/src/tests/functional/base_func_tests/include/shared_test_classes/base/utils/compare_results.hpp b/src/tests/functional/base_func_tests/include/shared_test_classes/base/utils/compare_results.hpp index d9d785fd49acde..36c1f586b0074c 100644 --- a/src/tests/functional/base_func_tests/include/shared_test_classes/base/utils/compare_results.hpp +++ b/src/tests/functional/base_func_tests/include/shared_test_classes/base/utils/compare_results.hpp @@ -20,7 +20,7 @@ using CompareMap = std::map>; -CompareMap getCompareMap(); +const CompareMap& getCompareMap(); } // namespace utils } // namespace test diff --git a/src/tests/functional/base_func_tests/include/shared_test_classes/base/utils/generate_inputs.hpp b/src/tests/functional/base_func_tests/include/shared_test_classes/base/utils/generate_inputs.hpp index 6405bbbdd73bf8..0eb50af2a6be69 100644 --- a/src/tests/functional/base_func_tests/include/shared_test_classes/base/utils/generate_inputs.hpp +++ b/src/tests/functional/base_func_tests/include/shared_test_classes/base/utils/generate_inputs.hpp @@ -22,7 +22,7 @@ using InputsMap = std::map inGenRangeData)>>; -InputsMap getInputMap(); +const InputsMap& getInputMap(); } // namespace utils } // namespace test diff --git a/src/tests/functional/base_func_tests/src/base/ov_subgraph.cpp b/src/tests/functional/base_func_tests/src/base/ov_subgraph.cpp index 2c846dadfea2fe..a98cd57090140a 100644 --- a/src/tests/functional/base_func_tests/src/base/ov_subgraph.cpp +++ b/src/tests/functional/base_func_tests/src/base/ov_subgraph.cpp @@ -250,14 +250,14 @@ void SubgraphBaseTest::compare(const std::vector& expected, ASSERT_EQ(expected.size(), actual.size()); ASSERT_EQ(expected.size(), function->get_results().size()); init_thresholds(); - auto compareMap = utils::getCompareMap(); + const auto& compare_map = utils::getCompareMap(); const auto& results = function->get_results(); for (size_t j = 0; j < results.size(); j++) { const auto result = results[j]; for (size_t i = 0; i < result->get_input_size(); ++i) { std::shared_ptr inputNode = result->get_input_node_shared_ptr(i); - auto it = compareMap.find(inputNode->get_type_info()); - ASSERT_NE(it, compareMap.end()); + auto it = compare_map.find(inputNode->get_type_info()); + ASSERT_NE(it, compare_map.end()); it->second(inputNode, i, inference_precision, expected[j], actual[j], abs_threshold, rel_threshold, topk_threshold, mvn_threshold); diff --git a/src/tests/functional/base_func_tests/src/base/utils/compare_results.cpp b/src/tests/functional/base_func_tests/src/base/utils/compare_results.cpp index a8644736c89808..6e7025d68d3877 100644 --- a/src/tests/functional/base_func_tests/src/base/utils/compare_results.cpp +++ b/src/tests/functional/base_func_tests/src/base/utils/compare_results.cpp @@ -187,9 +187,9 @@ void compareResults(const std::shared_ptr &node, } // namespace -CompareMap getCompareMap() { +const CompareMap& getCompareMap() { OPENVINO_SUPPRESS_DEPRECATED_START - CompareMap compareMap{ + const static CompareMap compareMap{ #define _OPENVINO_OP_REG(NAME, NAMESPACE) {NAMESPACE::NAME::get_type_info_static(), compareResults}, #include "openvino/opsets/opset1_tbl.hpp" #include "openvino/opsets/opset2_tbl.hpp" diff --git a/src/tests/functional/base_func_tests/src/base/utils/generate_inputs.cpp b/src/tests/functional/base_func_tests/src/base/utils/generate_inputs.cpp index 1eedcc07f8be85..0f4f38953a8f56 100644 --- a/src/tests/functional/base_func_tests/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/base_func_tests/src/base/utils/generate_inputs.cpp @@ -1038,8 +1038,8 @@ ov::Tensor generateInput(const std::shared_ptr& node, } } // namespace -InputsMap getInputMap() { - static InputsMap inputsMap{ +const InputsMap& getInputMap() { + const static InputsMap inputsMap{ #define _OPENVINO_OP_REG(NAME, NAMESPACE) {NAMESPACE::NAME::get_type_info_static(), generateInput}, #include "openvino/opsets/opset1_tbl.hpp" @@ -1062,6 +1062,7 @@ InputsMap getInputMap() { #include "ov_ops/opset_private_tbl.hpp" #undef _OPENVINO_OP_REG }; + return inputsMap; } diff --git a/src/tests/functional/base_func_tests/src/base/utils/ranges.cpp b/src/tests/functional/base_func_tests/src/base/utils/ranges.cpp index ea5274204b7568..2d88784c41e1d8 100644 --- a/src/tests/functional/base_func_tests/src/base/utils/ranges.cpp +++ b/src/tests/functional/base_func_tests/src/base/utils/ranges.cpp @@ -31,7 +31,7 @@ std::string ModelRange::get_range_id(const std::shared_ptr& node) { } ov::Tensor ModelRange::generate_input(std::shared_ptr node, size_t port, const ov::Shape& targetShape) { - auto inputMap = ov::test::utils::getInputMap(); + const auto& inputMap = ov::test::utils::getInputMap(); auto it = inputMap.find(node->get_type_info()); if (it == inputMap.end()) { throw std::runtime_error("Couln't find Operation in inputMap: " + std::string(node->get_type_name())); diff --git a/src/tests/functional/base_func_tests/src/behavior/compiled_model/import_export.cpp b/src/tests/functional/base_func_tests/src/behavior/compiled_model/import_export.cpp index 360d502bd64654..d78ad17defdd00 100644 --- a/src/tests/functional/base_func_tests/src/behavior/compiled_model/import_export.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/compiled_model/import_export.cpp @@ -36,7 +36,7 @@ std::string OVCompiledGraphImportExportTest::getTestCaseName(testing::TestParamI } void OVCompiledGraphImportExportTest::SetUp() { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED(); std::tie(elementType, target_device, configuration) = this->GetParam(); APIBaseTest::SetUp(); diff --git a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/inference_chaining.cpp b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/inference_chaining.cpp index c5d3a2be4de186..75906d1a082fe8 100644 --- a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/inference_chaining.cpp @@ -145,7 +145,7 @@ void OVInferenceChaining::Run() { // DEPRECATED VERSION TEST_P(OVInferenceChaining, StaticOutputToStaticInput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() function0 = getFirstStaticFunction(); @@ -156,7 +156,7 @@ TEST_P(OVInferenceChaining, StaticOutputToStaticInput) { } TEST_P(OVInferenceChainingStatic, StaticOutputToStaticInput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() function0 = getFirstStaticFunction(); @@ -167,7 +167,7 @@ TEST_P(OVInferenceChainingStatic, StaticOutputToStaticInput) { } TEST_P(OVInferenceChaining, StaticOutputToDynamicInput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() const auto dynamic = ov::PartialShape::dynamic(ov::Rank(1)); @@ -179,7 +179,7 @@ TEST_P(OVInferenceChaining, StaticOutputToDynamicInput) { } TEST_P(OVInferenceChaining, DynamicOutputToDynamicInput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() const auto dynamic = ov::PartialShape::dynamic(); @@ -191,7 +191,7 @@ TEST_P(OVInferenceChaining, DynamicOutputToDynamicInput) { } TEST_P(OVInferenceChaining, DynamicInputToDynamicOutput) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() this->outputToInput = false; diff --git a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/io_tensor.cpp index c6c32ed53ea34c..b9d5556f639f09 100644 --- a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/io_tensor.cpp @@ -19,7 +19,7 @@ namespace test { namespace behavior { void OVInferRequestIOTensorTest::SetUp() { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() OVInferRequestTests::SetUp(); try { diff --git a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/iteration_chaining.cpp b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/iteration_chaining.cpp index 8a9ae8f3b1702f..699dba41de516e 100644 --- a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/iteration_chaining.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/iteration_chaining.cpp @@ -55,7 +55,7 @@ std::shared_ptr OVIterationChaining::getIterativeFunction() { void OVIterationChaining::SetUp() { std::tie(target_device, configuration) = this->GetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); function = getIterativeFunction(); @@ -114,7 +114,7 @@ void OVIterationChaining::Run() { } TEST_P(OVIterationChaining, Simple) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() Run(); diff --git a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/memory_states.cpp b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/memory_states.cpp index 86881ea1b1200c..001f3405279ef6 100644 --- a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/memory_states.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/memory_states.cpp @@ -30,7 +30,7 @@ std::string OVInferRequestVariableStateTest::getTestCaseName(const testing::Test } void OVInferRequestVariableStateTest::SetUp() { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() std::tie(net, statesToQuery, deviceName, configuration) = GetParam(); OVInferRequestTestBase::SetUp(); diff --git a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/wait.cpp b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/wait.cpp index 67a7f615f8306e..9d6c7e8f531634 100644 --- a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/wait.cpp @@ -10,7 +10,7 @@ namespace test { namespace behavior { void OVInferRequestWaitTests::SetUp() { OVInferRequestTests::SetUp(); - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() req = execNet.create_infer_request(); input = execNet.input(); diff --git a/src/tests/functional/base_func_tests/src/behavior/ov_plugin/life_time.cpp b/src/tests/functional/base_func_tests/src/behavior/ov_plugin/life_time.cpp index 19da91cce5b330..69614090e2c675 100644 --- a/src/tests/functional/base_func_tests/src/behavior/ov_plugin/life_time.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/ov_plugin/life_time.cpp @@ -99,7 +99,7 @@ TEST_P(OVHoldersTest, LoadedAny) { } TEST_P(OVHoldersTest, LoadedRemoteContext) { - // Skip test according to plugin specific disabledTestPatterns() (if any) + // Skip test according to plugin specific disabled_test_patterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::RemoteContext ctx; { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/skip_tests_config.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/skip_tests_config.cpp index da0fa428432aaa..2a485b8fcd26a7 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/skip_tests_config.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/skip_tests_config.cpp @@ -4,7 +4,7 @@ #include "functional_test_utils/skip_tests_config.hpp" -std::vector disabledTestPatterns() { - std::vector retVector {}; - return retVector; +const std::vector& disabled_test_patterns() { + const static std::vector patterns{}; + return patterns; } diff --git a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/skip_tests_config.cpp b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/skip_tests_config.cpp index 9f7fc83111d590..94d196e87fe89a 100644 --- a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/skip_tests_config.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/skip_tests_config.cpp @@ -18,12 +18,6 @@ const char *targetPluginName = ""; const char *refCachePath = ""; std::vector IRFolderPaths = {}; -std::vector disabledTests = { - R"(.*OVCompiledModelBaseTest.*import_from_.*_blob.*targetDevice=(MULTI|AUTO|CPU).*)", - R"(.*OVCompiledModelBaseTest.*compile_from_.*_blob.*targetDevice=(MULTI|AUTO|CPU).*)", - R"(.*OVCompiledModelBaseTest.*compile_from_cached_weightless_blob.*targetDevice=(MULTI|AUTO|CPU).*)", - R"(.*OVCompiledModelBaseTest.*use_blob_hint_.*targetDevice=CPU.*)", -}; ShapeMode shapeMode = ov::test::conformance::ShapeMode::BOTH; @@ -31,6 +25,13 @@ ShapeMode shapeMode = ov::test::conformance::ShapeMode::BOTH; } // namespace test } // namespace ov -std::vector disabledTestPatterns() { - return ov::test::conformance::disabledTests; +const std::vector& disabled_test_patterns() { + const static std::vector patterns{ + std::regex(R"(.*OVCompiledModelBaseTest.*import_from_.*_blob.*targetDevice=(MULTI|AUTO|CPU).*)"), + std::regex(R"(.*OVCompiledModelBaseTest.*compile_from_.*_blob.*targetDevice=(MULTI|AUTO|CPU).*)"), + std::regex(R"(.*OVCompiledModelBaseTest.*compile_from_cached_weightless_blob.*targetDevice=(MULTI|AUTO|CPU).*)"), + std::regex(R"(.*OVCompiledModelBaseTest.*use_blob_hint_.*targetDevice=CPU.*)"), + }; + + return patterns; } diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp index 8c75a45434895b..5caa138302b058 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp @@ -134,7 +134,7 @@ void ReadIRTest::SetUp() { auto input_info = meta_info.get_input_info(); rel_influence_coef = meta_info.get_graph_priority(); - auto inputMap = utils::getInputMap(); + const auto& inputMap = utils::getInputMap(); std::vector> parameter_to_remove; for (const auto& param : function->get_parameters()) { auto in_info = input_info.find(param->get_friendly_name())->second; diff --git a/src/tests/test_utils/common_test_utils/tests/skip_tests_config.cpp b/src/tests/test_utils/common_test_utils/tests/skip_tests_config.cpp index dbb6d86001b0d4..7c033ea3f485ee 100644 --- a/src/tests/test_utils/common_test_utils/tests/skip_tests_config.cpp +++ b/src/tests/test_utils/common_test_utils/tests/skip_tests_config.cpp @@ -7,6 +7,7 @@ #include #include -std::vector disabledTestPatterns() { - return std::vector{}; +const std::vector& disabled_test_patterns() { + const static std::vector patterns{}; + return patterns; } diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/skip_tests_config.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/skip_tests_config.hpp index dd926d60add99e..c8a43376c95e2c 100644 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/skip_tests_config.hpp +++ b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/skip_tests_config.hpp @@ -10,7 +10,7 @@ #include #include -std::vector disabledTestPatterns(); +const std::vector& disabled_test_patterns(); namespace ov { namespace test { diff --git a/src/tests/test_utils/functional_test_utils/src/skip_tests_config.cpp b/src/tests/test_utils/functional_test_utils/src/skip_tests_config.cpp index 7b979a40d92a5c..8a8cd75c10d1a8 100644 --- a/src/tests/test_utils/functional_test_utils/src/skip_tests_config.cpp +++ b/src/tests/test_utils/functional_test_utils/src/skip_tests_config.cpp @@ -9,9 +9,7 @@ #include "common_test_utils/file_utils.hpp" -namespace ov { -namespace test { -namespace utils { +namespace ov::test::utils { bool disable_tests_skipping = false; @@ -19,18 +17,16 @@ bool current_test_is_disabled() { if (disable_tests_skipping) return false; - const auto fullName = ::testing::UnitTest::GetInstance()->current_test_info()->test_case_name() + std::string(".") + - ::testing::UnitTest::GetInstance()->current_test_info()->name(); + const auto full_name = ::testing::UnitTest::GetInstance()->current_test_info()->test_case_name() + + std::string(".") + ::testing::UnitTest::GetInstance()->current_test_info()->name(); - for (const auto& pattern : disabledTestPatterns()) { - std::regex re(pattern); - if (std::regex_match(fullName, re)) + for (const auto& re : disabled_test_patterns()) { + if (std::regex_match(full_name, re)) { return true; + } } return false; } -} // namespace utils -} // namespace test -} // namespace ov +} // namespace ov::test::utils