Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 80 additions & 0 deletions src/frontends/pytorch/src/op/histc.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
// Copyright (C) 2018-2026 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/op/broadcast.hpp"
#include "openvino/op/clamp.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/floor.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/scatter_elements_update.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/op/subtract.hpp"
#include "utils.hpp"

namespace ov {
namespace frontend {
namespace pytorch {
namespace op {

using namespace ov::op;

OutputVector translate_histc(const NodeContext& context) {
num_inputs_check(context, 1, 4);
auto input = context.get_input(0);
int64_t bins = context.const_input<int64_t>(1);
double min_val = context.const_input<double>(2);
double max_val = context.const_input<double>(3);

// Flatten input and convert into f64
auto flat_shape = v0::Constant::create(element::i64, Shape{1}, {-1});
auto flat_input = context.mark_node(std::make_shared<v1::Reshape>(input, flat_shape, false));
auto f64_input = context.mark_node(std::make_shared<v0::Convert>(flat_input, element::f64));

// calculate bin width: (max - min) / bins
auto min_const = v0::Constant::create(element::f64, Shape{}, {min_val});
auto max_const = v0::Constant::create(element::f64, Shape{}, {max_val});
auto bins_const = v0::Constant::create(element::f64, Shape{}, {static_cast<double>(bins)});
auto range = context.mark_node(std::make_shared<v1::Subtract>(max_const, min_const));
auto bin_width = context.mark_node(std::make_shared<v1::Divide>(range, bins_const));

// calculate bin indexes: floor((x - min) / bin_width)
auto shift = context.mark_node(std::make_shared<v1::Subtract>(f64_input, min_const));
auto normalized = context.mark_node(std::make_shared<v1::Divide>(shift, bin_width));
auto floored = context.mark_node(std::make_shared<v0::Floor>(normalized));
auto bin_idxs = context.mark_node(std::make_shared<v0::Convert>(floored, element::i64));

// Indexes should be in valid range
auto bin_idx_range = context.mark_node(
std::make_shared<v0::Clamp>(bin_idxs, 0.0, static_cast<double>(bins - 1)));

// Init histogram with zeros
auto zero_const = v0::Constant::create(element::f64, Shape{}, {0.0});
auto bins_shape = v0::Constant::create(element::i64, Shape{1}, {bins});
auto histogram = context.mark_node(std::make_shared<v3::Broadcast>(zero_const, bins_shape));

// create ones for counting elements
auto one_const = v0::Constant::create(element::f64, Shape{}, {1.0});
auto input_shape = context.mark_node(std::make_shared<v3::ShapeOf>(flat_input, element::i64));
auto histogram_ones = context.mark_node(std::make_shared<v3::Broadcast>(one_const, input_shape));

// Count elements per bin using ScatterElementsUpdate with SUM reduction
auto axis = v0::Constant::create(element::i64, Shape{}, {0});
auto histogram_res = context.mark_node(std::make_shared<v12::ScatterElementsUpdate>(
histogram, bin_idx_range, histogram_ones, axis,
v12::ScatterElementsUpdate::Reduction::SUM));

// type check
auto dtype_input = input.get_element_type();
auto dtype_res = dtype_input.is_static() ? dtype_input : element::f32;

return {context.mark_node(std::make_shared<v0::Convert>(histogram_res, dtype_res))};
}

} // namespace op
} // namespace pytorch
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ OP_CONVERTER(translate_group_norm);
OP_CONVERTER(translate_gru);
OP_CONVERTER(translate_hann_window);
OP_CONVERTER(translate_hardtanh);
OP_CONVERTER(translate_histc);
OP_CONVERTER(translate_hstack);
OP_CONVERTER(translate_if);
OP_CONVERTER(translate_cond_fx);
Expand Down Expand Up @@ -554,6 +555,7 @@ const std::unordered_map<std::string, CreatorFunction> get_supported_ops_ts() {
{"aten::hardsigmoid", op::quantizable_op<op::translate_1to1_match_1_inputs<opset10::HSigmoid>>},
{"aten::hardswish", op::quantizable_op<op::translate_1to1_match_1_inputs<opset10::HSwish>>},
{"aten::hardtanh", op::quantizable_op<op::translate_hardtanh>},
{"aten::histc", op::translate_histc},
{"aten::hstack", op::translate_hstack},
{"aten::im2col", op::translate_im2col},
{"aten::imag", common_translators::translate_imag},
Expand Down
74 changes: 74 additions & 0 deletions tests/layer_tests/pytorch_tests/test_histc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# Copyright (C) 2018-2026 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import numpy as np
import pytest
import torch

from pytorch_layer_test_class import PytorchLayerTest


class TestHistc(PytorchLayerTest):

def _prepare_input(self, input_shape, input_dtype, min_val, max_val):
if min_val < max_val:
data = np.random.uniform(min_val, max_val, input_shape).astype(input_dtype)
else:
data = np.random.randn(*input_shape).astype(input_dtype)
return (data,)

def create_model(self, bins, min_val, max_val):
class aten_histc(torch.nn.Module):
def __init__(self, bins, min_val, max_val):
super().__init__()
self.bins = bins
self.min_val = min_val
self.max_val = max_val

def forward(self, input):
return torch.histc(input, bins=self.bins, min=self.min_val, max=self.max_val)

ref_net = None
return aten_histc(bins, min_val, max_val), ref_net, "aten::histc"

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("input_shape", [[10], [5, 5], [2, 3, 4]])
@pytest.mark.parametrize("input_dtype", ["float32"])
@pytest.mark.parametrize("bins", [10, 50, 100])
@pytest.mark.parametrize("min_max", [(0, 10), (-5, 5), (0, 100)])
def test_histc(self, input_shape, input_dtype, bins, min_max, ie_device, precision, ir_version):
min_val, max_val = min_max
self._test(*self.create_model(bins, min_val, max_val), ie_device, precision, ir_version,
kwargs_to_prepare_input={
"input_shape": input_shape,
"input_dtype": input_dtype,
"min_val": min_val,
"max_val": max_val
})


class TestHistcDefaultParams(PytorchLayerTest):

def _prepare_input(self, input_shape, input_dtype):
return (np.random.uniform(0, 100, input_shape).astype(input_dtype),)

def create_model(self):
class aten_histc_default(torch.nn.Module):
def forward(self, input):
# Use default parameters: bins=100, min=0, max=0
return torch.histc(input, bins=100, min=0, max=100)

ref_net = None
return aten_histc_default(), ref_net, "aten::histc"

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("input_shape", [[100], [10, 10]])
@pytest.mark.parametrize("input_dtype", ["float32"])
def test_histc_default(self, input_shape, input_dtype, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version,
kwargs_to_prepare_input={
"input_shape": input_shape,
"input_dtype": input_dtype
})