diff --git a/src/frontends/pytorch/src/op/histc.cpp b/src/frontends/pytorch/src/op/histc.cpp new file mode 100644 index 00000000000000..eb5bd91cab34ae --- /dev/null +++ b/src/frontends/pytorch/src/op/histc.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2026 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/floor.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/scatter_elements_update.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/subtract.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_histc(const NodeContext& context) { + num_inputs_check(context, 1, 4); + auto input = context.get_input(0); + int64_t bins = context.const_input(1); + double min_val = context.const_input(2); + double max_val = context.const_input(3); + + // Flatten input and convert into f64 + auto flat_shape = v0::Constant::create(element::i64, Shape{1}, {-1}); + auto flat_input = context.mark_node(std::make_shared(input, flat_shape, false)); + auto f64_input = context.mark_node(std::make_shared(flat_input, element::f64)); + + // calculate bin width: (max - min) / bins + auto min_const = v0::Constant::create(element::f64, Shape{}, {min_val}); + auto max_const = v0::Constant::create(element::f64, Shape{}, {max_val}); + auto bins_const = v0::Constant::create(element::f64, Shape{}, {static_cast(bins)}); + auto range = context.mark_node(std::make_shared(max_const, min_const)); + auto bin_width = context.mark_node(std::make_shared(range, bins_const)); + + // calculate bin indexes: floor((x - min) / bin_width) + auto shift = context.mark_node(std::make_shared(f64_input, min_const)); + auto normalized = context.mark_node(std::make_shared(shift, bin_width)); + auto floored = context.mark_node(std::make_shared(normalized)); + auto bin_idxs = context.mark_node(std::make_shared(floored, element::i64)); + + // Indexes should be in valid range + auto bin_idx_range = context.mark_node( + std::make_shared(bin_idxs, 0.0, static_cast(bins - 1))); + + // Init histogram with zeros + auto zero_const = v0::Constant::create(element::f64, Shape{}, {0.0}); + auto bins_shape = v0::Constant::create(element::i64, Shape{1}, {bins}); + auto histogram = context.mark_node(std::make_shared(zero_const, bins_shape)); + + // create ones for counting elements + auto one_const = v0::Constant::create(element::f64, Shape{}, {1.0}); + auto input_shape = context.mark_node(std::make_shared(flat_input, element::i64)); + auto histogram_ones = context.mark_node(std::make_shared(one_const, input_shape)); + + // Count elements per bin using ScatterElementsUpdate with SUM reduction + auto axis = v0::Constant::create(element::i64, Shape{}, {0}); + auto histogram_res = context.mark_node(std::make_shared( + histogram, bin_idx_range, histogram_ones, axis, + v12::ScatterElementsUpdate::Reduction::SUM)); + + // type check + auto dtype_input = input.get_element_type(); + auto dtype_res = dtype_input.is_static() ? dtype_input : element::f32; + + return {context.mark_node(std::make_shared(histogram_res, dtype_res))}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 89bb929581a20e..54ac5243a0b9de 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -127,6 +127,7 @@ OP_CONVERTER(translate_group_norm); OP_CONVERTER(translate_gru); OP_CONVERTER(translate_hann_window); OP_CONVERTER(translate_hardtanh); +OP_CONVERTER(translate_histc); OP_CONVERTER(translate_hstack); OP_CONVERTER(translate_if); OP_CONVERTER(translate_cond_fx); @@ -554,6 +555,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::hardsigmoid", op::quantizable_op>}, {"aten::hardswish", op::quantizable_op>}, {"aten::hardtanh", op::quantizable_op}, + {"aten::histc", op::translate_histc}, {"aten::hstack", op::translate_hstack}, {"aten::im2col", op::translate_im2col}, {"aten::imag", common_translators::translate_imag}, diff --git a/tests/layer_tests/pytorch_tests/test_histc.py b/tests/layer_tests/pytorch_tests/test_histc.py new file mode 100644 index 00000000000000..582e26d790cb02 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_histc.py @@ -0,0 +1,74 @@ +# Copyright (C) 2018-2026 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestHistc(PytorchLayerTest): + + def _prepare_input(self, input_shape, input_dtype, min_val, max_val): + if min_val < max_val: + data = np.random.uniform(min_val, max_val, input_shape).astype(input_dtype) + else: + data = np.random.randn(*input_shape).astype(input_dtype) + return (data,) + + def create_model(self, bins, min_val, max_val): + class aten_histc(torch.nn.Module): + def __init__(self, bins, min_val, max_val): + super().__init__() + self.bins = bins + self.min_val = min_val + self.max_val = max_val + + def forward(self, input): + return torch.histc(input, bins=self.bins, min=self.min_val, max=self.max_val) + + ref_net = None + return aten_histc(bins, min_val, max_val), ref_net, "aten::histc" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("input_shape", [[10], [5, 5], [2, 3, 4]]) + @pytest.mark.parametrize("input_dtype", ["float32"]) + @pytest.mark.parametrize("bins", [10, 50, 100]) + @pytest.mark.parametrize("min_max", [(0, 10), (-5, 5), (0, 100)]) + def test_histc(self, input_shape, input_dtype, bins, min_max, ie_device, precision, ir_version): + min_val, max_val = min_max + self._test(*self.create_model(bins, min_val, max_val), ie_device, precision, ir_version, + kwargs_to_prepare_input={ + "input_shape": input_shape, + "input_dtype": input_dtype, + "min_val": min_val, + "max_val": max_val + }) + + +class TestHistcDefaultParams(PytorchLayerTest): + + def _prepare_input(self, input_shape, input_dtype): + return (np.random.uniform(0, 100, input_shape).astype(input_dtype),) + + def create_model(self): + class aten_histc_default(torch.nn.Module): + def forward(self, input): + # Use default parameters: bins=100, min=0, max=0 + return torch.histc(input, bins=100, min=0, max=100) + + ref_net = None + return aten_histc_default(), ref_net, "aten::histc" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("input_shape", [[100], [10, 10]]) + @pytest.mark.parametrize("input_dtype", ["float32"]) + def test_histc_default(self, input_shape, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version, + kwargs_to_prepare_input={ + "input_shape": input_shape, + "input_dtype": input_dtype + })