diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index 7798f5e4c59a0..e80786ca4e0e3 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -774,6 +774,10 @@ def is_cuda(self): def is_meta(self): return "meta" in ivy.dev(self.ivy_array) + @with_unsupported_dtypes({"2.1.0 and below": ("uint16", "bool")}, "torch") + def positive(self): + return torch_frontend.positive(self) + @with_unsupported_dtypes({"2.1.0 and below": ("bfloat16",)}, "torch") def pow(self, exponent): return torch_frontend.pow(self, exponent) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index bb2c45ac04085..7617ee64bee35 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -10370,6 +10370,44 @@ def test_torch_permute( ) +# positive +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="torch.tensor", + method_name="positive", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + min_value=-1e04, + max_value=1e04, + allow_inf=False, + ), +) +def test_torch_tensor_positive( + dtype_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, + backend_fw, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + backend_to_test=backend_fw, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={}, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + ) + + # pow @handle_frontend_method( class_tree=CLASS_TREE,