From 16fec6a4528ca7468bc2cc0bab67a46be2d7089c Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Wed, 22 Oct 2025 18:01:20 +0000 Subject: [PATCH 1/3] Add int8/uint8/int16/uint16 support for Max and Min operators Fixes #26382 This commit adds support for int8, uint8, int16, and uint16 data types to the Max and Min operators for opset 12 and later, bringing the implementation into compliance with the ONNX specification. Changes: - Updated type registration for Max and Min operators (opset 12+) - Updated type dispatchers in Min_8::Compute and Max_8::Compute - Added comprehensive unit tests for all new data types The existing Eigen-based implementation already handles all numeric types generically, so only type registration and dispatching needed updates. This change is fully backward compatible. --- .../providers/cpu/math/element_wise_ops.cc | 12 +- .../cpu/math/element_wise_ops_test.cc | 136 ++++++++++++++++++ 2 files changed, 144 insertions(+), 4 deletions(-) diff --git a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc index 4d8a2bc1106ad..9b9330d6cc383 100644 --- a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc +++ b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc @@ -21,14 +21,16 @@ namespace op_kernel_type_control { ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 8, Input, 0, float, double); ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 12, Input, 0, - float, double, MLFloat16, int32_t, uint32_t, int64_t, uint64_t); + float, double, MLFloat16, int8_t, int16_t, int32_t, uint32_t, + int64_t, uint8_t, uint16_t, uint64_t); ORT_SPECIFY_OP_KERNEL_ARG_REQUIRED_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 12, Input, 0, int32_t, int64_t); // Min ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 8, Input, 0, float, double); ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 12, Input, 0, - float, double, MLFloat16, int32_t, uint32_t, int64_t, uint64_t); + float, double, MLFloat16, int8_t, int16_t, int32_t, uint32_t, + int64_t, uint8_t, uint16_t, uint64_t); ORT_SPECIFY_OP_KERNEL_ARG_REQUIRED_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 12, Input, 0, int32_t, int64_t); @@ -922,7 +924,8 @@ Status Min_8::Compute(OpKernelContext* context) const { return MinMaxMLFloat16(*this, context); break; default: - utils::MLTypeCallDispatcher + utils::MLTypeCallDispatcher t_disp(dt_type); return t_disp.InvokeRet(*this, context); } @@ -988,7 +991,8 @@ Status Max_8::Compute(OpKernelContext* context) const { return MinMaxMLFloat16(*this, context); break; default: - utils::MLTypeCallDispatcher + utils::MLTypeCallDispatcher t_disp(dt_type); return t_disp.InvokeRet(*this, context); } diff --git a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc index 6806b47766da5..f3cc1cdcd05c3 100644 --- a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc +++ b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc @@ -3699,6 +3699,142 @@ TEST(MathOpTest, Equal_multidirectional_broadcastAB_bool) { test.Run(); } +TEST(MathOpTest, Max_12_Int8) { + OpTester test("Max", 12); + test.AddInput("data_0", {1, 3}, + {1, 2, 3}); + test.AddInput("data_2", {3, 3}, + {10, 20, 30, + 40, 50, 60, + 70, 80, 90}); + test.AddInput("data_1", {3, 1}, + {-1, -2, 127}); + test.AddOutput("max", {3, 3}, + {10, 20, 30, + 40, 50, 60, + 127, 127, 127}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); +} + +TEST(MathOpTest, Max_12_UInt8) { + OpTester test("Max", 12); + test.AddInput("data_0", {1, 3}, + {1, 20, 30}); + test.AddInput("data_2", {3, 3}, + {10, 20, 30, + 40, 50, 60, + 70, 80, 90}); + test.AddInput("data_1", {3, 1}, + {100, 20, 30}); + test.AddOutput("max", {3, 3}, + {100, 100, 100, + 40, 50, 60, + 70, 80, 90}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); +} + +TEST(MathOpTest, Min_12_Int8) { + OpTester test("Min", 12); + test.AddInput("data_0", {1, 3}, + {1, 2, 3}); + test.AddInput("data_2", {3, 3}, + {10, 20, 30, + 40, 50, 60, + -70, -80, -90}); + test.AddInput("data_1", {3, 1}, + {-1, 20, 127}); + test.AddOutput("min", {3, 3}, + {-1, -1, -1, + 1, 2, 3, + -70, -80, -90}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); +} + +TEST(MathOpTest, Min_12_UInt8) { + OpTester test("Min", 12); + test.AddInput("data_0", {1, 3}, + {1, 20, 30}); + test.AddInput("data_2", {3, 3}, + {10, 20, 30, + 40, 50, 60, + 70, 80, 90}); + test.AddInput("data_1", {3, 1}, + {1, 20, 30}); + test.AddOutput("min", {3, 3}, + {1, 1, 1, + 1, 20, 20, + 1, 20, 30}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); +} + +TEST(MathOpTest, Max_12_Int16) { + OpTester test("Max", 12); + test.AddInput("data_0", {1, 3}, + {1, 2, 3}); + test.AddInput("data_2", {3, 3}, + {10, 20, 30, + 40, 50, 60, + 70, 80, 90}); + test.AddInput("data_1", {3, 1}, + {-1, -2, 300}); + test.AddOutput("max", {3, 3}, + {10, 20, 30, + 40, 50, 60, + 300, 300, 300}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); +} + +TEST(MathOpTest, Max_12_UInt16) { + OpTester test("Max", 12); + test.AddInput("data_0", {1, 3}, + {1, 20, 30}); + test.AddInput("data_2", {3, 3}, + {10, 20, 30, + 40, 50, 60, + 70, 80, 90}); + test.AddInput("data_1", {3, 1}, + {100, 20, 30}); + test.AddOutput("max", {3, 3}, + {100, 100, 100, + 40, 50, 60, + 70, 80, 90}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); +} + +TEST(MathOpTest, Min_12_Int16) { + OpTester test("Min", 12); + test.AddInput("data_0", {1, 3}, + {1, 2, 3}); + test.AddInput("data_2", {3, 3}, + {10, 20, 30, + 40, 50, 60, + -70, -80, -90}); + test.AddInput("data_1", {3, 1}, + {-1, 20, 300}); + test.AddOutput("min", {3, 3}, + {-1, -1, -1, + 1, 2, 3, + -70, -80, -90}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); +} + +TEST(MathOpTest, Min_12_UInt16) { + OpTester test("Min", 12); + test.AddInput("data_0", {1, 3}, + {1, 20, 30}); + test.AddInput("data_2", {3, 3}, + {10, 20, 30, + 40, 50, 60, + 70, 80, 90}); + test.AddInput("data_1", {3, 1}, + {1, 20, 30}); + test.AddOutput("min", {3, 3}, + {1, 1, 1, + 1, 20, 20, + 1, 20, 30}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); +} + TEST(MathOpTest, Mean_6) { OpTester test("Mean", 6); std::vector dims{3, 3}; From e657cb1dbd8597d7b312400f52d184376d64372a Mon Sep 17 00:00:00 2001 From: Changming Sun Date: Wed, 22 Oct 2025 14:11:36 -0700 Subject: [PATCH 2/3] Update doc --- docs/OperatorKernels.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/OperatorKernels.md b/docs/OperatorKernels.md index 0d66e69c8e925..87a32c9f2e5f9 100644 --- a/docs/OperatorKernels.md +++ b/docs/OperatorKernels.md @@ -242,8 +242,8 @@ Do not modify directly.* |||[9, 12]|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)| |||[1, 8]|**T** = tensor(double), tensor(float)| |MatMulInteger|*in* A:**T1**
*in* B:**T2**
*in* a_zero_point:**T1**
*in* b_zero_point:**T2**
*out* Y:**T3**|10+|**T1** = tensor(int8), tensor(uint8)
**T2** = tensor(int8), tensor(uint8)
**T3** = tensor(int32)| -|Max|*in* data_0:**T**
*out* max:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)| -|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)| +|Max|*in* data_0:**T**
*out* max:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| +|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| |||[8, 11]|**T** = tensor(double), tensor(float)| |||[6, 7]|**T** = tensor(float)| |MaxPool|*in* X:**T**
*out* Y:**T**

or

*in* X:**T**
*out* Y:**T**
*out* Indices:**I**|22+|**I** = tensor(int64)
**T** = tensor(double), tensor(float), tensor(int8), tensor(uint8)| @@ -263,8 +263,8 @@ Do not modify directly.* |MelWeightMatrix|*in* num_mel_bins:**T1**
*in* dft_length:**T1**
*in* sample_rate:**T1**
*in* lower_edge_hertz:**T2**
*in* upper_edge_hertz:**T2**
*out* output:**T3**|17+|**T1** = tensor(int32), tensor(int64)
**T2** = tensor(float)
**T3** = tensor(double), tensor(float), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| |MemcpyFromHost|*in* X:**T**
*out* Y:**T**|1+|**T** = seq(tensor(bfloat16)), seq(tensor(bool)), seq(tensor(double)), seq(tensor(float)), seq(tensor(float16)), seq(tensor(float8e4m3fn)), seq(tensor(float8e4m3fnuz)), seq(tensor(float8e5m2)), seq(tensor(float8e5m2fnuz)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(int8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(uint8)), tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| |MemcpyToHost|*in* X:**T**
*out* Y:**T**|1+|**T** = seq(tensor(bfloat16)), seq(tensor(bool)), seq(tensor(double)), seq(tensor(float)), seq(tensor(float16)), seq(tensor(float8e4m3fn)), seq(tensor(float8e4m3fnuz)), seq(tensor(float8e5m2)), seq(tensor(float8e5m2fnuz)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(int8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(uint8)), tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| -|Min|*in* data_0:**T**
*out* min:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)| -|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)| +|Min|*in* data_0:**T**
*out* min:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| +|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| |||[8, 11]|**T** = tensor(double), tensor(float)| |||[6, 7]|**T** = tensor(float)| |Mod|*in* A:**T**
*in* B:**T**
*out* C:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| From 74ec33b4b4a25366ab8b5371e54c7da165839d39 Mon Sep 17 00:00:00 2001 From: Gemini Date: Mon, 27 Oct 2025 13:54:29 -0700 Subject: [PATCH 3/3] Remove int16/uint16 support from Max/Min operators Address review feedback from skottmckay: remove int16/uint16 types as they are not commonly used in real models. Keep only int8/uint8 support which was the original requirement from the WebNN EP testing. Changes: - Removed int16_t and uint16_t from type registration for Max/Min opset 12 - Removed int16_t and uint16_t from MLTypeCallDispatcher in Compute methods - Removed all int16/uint16 test cases (4 tests removed) - Updated documentation to reflect supported types The change reduces code maintenance burden while still providing the necessary int8/uint8 support for quantized models. --- docs/OperatorKernels.md | 8 +-- .../providers/cpu/math/element_wise_ops.cc | 16 ++--- .../cpu/math/element_wise_ops_test.cc | 68 ------------------- 3 files changed, 12 insertions(+), 80 deletions(-) diff --git a/docs/OperatorKernels.md b/docs/OperatorKernels.md index 87a32c9f2e5f9..73b38afff34f3 100644 --- a/docs/OperatorKernels.md +++ b/docs/OperatorKernels.md @@ -242,8 +242,8 @@ Do not modify directly.* |||[9, 12]|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)| |||[1, 8]|**T** = tensor(double), tensor(float)| |MatMulInteger|*in* A:**T1**
*in* B:**T2**
*in* a_zero_point:**T1**
*in* b_zero_point:**T2**
*out* Y:**T3**|10+|**T1** = tensor(int8), tensor(uint8)
**T2** = tensor(int8), tensor(uint8)
**T3** = tensor(int32)| -|Max|*in* data_0:**T**
*out* max:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| -|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| +|Max|*in* data_0:**T**
*out* max:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(int8), tensor(uint32), tensor(uint64), tensor(uint8)| +|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(int8), tensor(uint32), tensor(uint64), tensor(uint8)| |||[8, 11]|**T** = tensor(double), tensor(float)| |||[6, 7]|**T** = tensor(float)| |MaxPool|*in* X:**T**
*out* Y:**T**

or

*in* X:**T**
*out* Y:**T**
*out* Indices:**I**|22+|**I** = tensor(int64)
**T** = tensor(double), tensor(float), tensor(int8), tensor(uint8)| @@ -263,8 +263,8 @@ Do not modify directly.* |MelWeightMatrix|*in* num_mel_bins:**T1**
*in* dft_length:**T1**
*in* sample_rate:**T1**
*in* lower_edge_hertz:**T2**
*in* upper_edge_hertz:**T2**
*out* output:**T3**|17+|**T1** = tensor(int32), tensor(int64)
**T2** = tensor(float)
**T3** = tensor(double), tensor(float), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| |MemcpyFromHost|*in* X:**T**
*out* Y:**T**|1+|**T** = seq(tensor(bfloat16)), seq(tensor(bool)), seq(tensor(double)), seq(tensor(float)), seq(tensor(float16)), seq(tensor(float8e4m3fn)), seq(tensor(float8e4m3fnuz)), seq(tensor(float8e5m2)), seq(tensor(float8e5m2fnuz)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(int8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(uint8)), tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| |MemcpyToHost|*in* X:**T**
*out* Y:**T**|1+|**T** = seq(tensor(bfloat16)), seq(tensor(bool)), seq(tensor(double)), seq(tensor(float)), seq(tensor(float16)), seq(tensor(float8e4m3fn)), seq(tensor(float8e4m3fnuz)), seq(tensor(float8e5m2)), seq(tensor(float8e5m2fnuz)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(int8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(uint8)), tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| -|Min|*in* data_0:**T**
*out* min:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| -|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| +|Min|*in* data_0:**T**
*out* min:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(int8), tensor(uint32), tensor(uint64), tensor(uint8)| +|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(int8), tensor(uint32), tensor(uint64), tensor(uint8)| |||[8, 11]|**T** = tensor(double), tensor(float)| |||[6, 7]|**T** = tensor(float)| |Mod|*in* A:**T**
*in* B:**T**
*out* C:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)| diff --git a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc index e9aed56310953..b940d71e1165e 100644 --- a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc +++ b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc @@ -20,16 +20,16 @@ namespace op_kernel_type_control { ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 8, Input, 0, float, double); ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 12, Input, 0, - float, double, MLFloat16, int8_t, int16_t, int32_t, uint32_t, - int64_t, uint8_t, uint16_t, uint64_t); + float, double, MLFloat16, int8_t, int32_t, uint32_t, + int64_t, uint8_t, uint64_t); ORT_SPECIFY_OP_KERNEL_ARG_REQUIRED_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 12, Input, 0, int32_t, int64_t); // Min ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 8, Input, 0, float, double); ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 12, Input, 0, - float, double, MLFloat16, int8_t, int16_t, int32_t, uint32_t, - int64_t, uint8_t, uint16_t, uint64_t); + float, double, MLFloat16, int8_t, int32_t, uint32_t, + int64_t, uint8_t, uint64_t); ORT_SPECIFY_OP_KERNEL_ARG_REQUIRED_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 12, Input, 0, int32_t, int64_t); @@ -991,8 +991,8 @@ Status Min_8::Compute(OpKernelContext* context) const { return MinMaxMLFloat16(*this, context); break; default: - utils::MLTypeCallDispatcher + utils::MLTypeCallDispatcher t_disp(dt_type); return t_disp.InvokeRet(*this, context); } @@ -1058,8 +1058,8 @@ Status Max_8::Compute(OpKernelContext* context) const { return MinMaxMLFloat16(*this, context); break; default: - utils::MLTypeCallDispatcher + utils::MLTypeCallDispatcher t_disp(dt_type); return t_disp.InvokeRet(*this, context); } diff --git a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc index 0f82572bf43a2..cbb8ca43e8f06 100644 --- a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc +++ b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc @@ -3768,74 +3768,6 @@ TEST(MathOpTest, Min_12_UInt8) { test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); } -TEST(MathOpTest, Max_12_Int16) { - OpTester test("Max", 12); - test.AddInput("data_0", {1, 3}, - {1, 2, 3}); - test.AddInput("data_2", {3, 3}, - {10, 20, 30, - 40, 50, 60, - 70, 80, 90}); - test.AddInput("data_1", {3, 1}, - {-1, -2, 300}); - test.AddOutput("max", {3, 3}, - {10, 20, 30, - 40, 50, 60, - 300, 300, 300}); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); -} - -TEST(MathOpTest, Max_12_UInt16) { - OpTester test("Max", 12); - test.AddInput("data_0", {1, 3}, - {1, 20, 30}); - test.AddInput("data_2", {3, 3}, - {10, 20, 30, - 40, 50, 60, - 70, 80, 90}); - test.AddInput("data_1", {3, 1}, - {100, 20, 30}); - test.AddOutput("max", {3, 3}, - {100, 100, 100, - 40, 50, 60, - 70, 80, 90}); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); -} - -TEST(MathOpTest, Min_12_Int16) { - OpTester test("Min", 12); - test.AddInput("data_0", {1, 3}, - {1, 2, 3}); - test.AddInput("data_2", {3, 3}, - {10, 20, 30, - 40, 50, 60, - -70, -80, -90}); - test.AddInput("data_1", {3, 1}, - {-1, 20, 300}); - test.AddOutput("min", {3, 3}, - {-1, -1, -1, - 1, 2, 3, - -70, -80, -90}); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); -} - -TEST(MathOpTest, Min_12_UInt16) { - OpTester test("Min", 12); - test.AddInput("data_0", {1, 3}, - {1, 20, 30}); - test.AddInput("data_2", {3, 3}, - {10, 20, 30, - 40, 50, 60, - 70, 80, 90}); - test.AddInput("data_1", {3, 1}, - {1, 20, 30}); - test.AddOutput("min", {3, 3}, - {1, 1, 1, - 1, 20, 20, - 1, 20, 30}); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); -} - TEST(MathOpTest, Mean_6) { OpTester test("Mean", 6); std::vector dims{3, 3};