-
Notifications
You must be signed in to change notification settings - Fork 350
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
chore: bug fixes #3065
base: main
Are you sure you want to change the base?
chore: bug fixes #3065
Conversation
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/pool.py 2024-08-17 21:38:03.174815+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/pool.py 2024-08-17 21:38:24.979987+00:00
@@ -28,11 +28,11 @@
padding: Union[int, Sequence[int]] = 0,
ceil_mode: bool = False,
count_include_pad: bool = True,
divisor_override: Optional[int] = None,
) -> TRTTensor:
-
+
padding_mode = trt.PaddingMode.EXPLICIT_ROUND_DOWN
if ceil_mode:
padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP
if divisor_override is not None:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/lowering/passes/replace_full_like_with_full.py 2024-08-17 21:38:03.178815+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/lowering/passes/replace_full_like_with_full.py 2024-08-17 21:38:25.214966+00:00
@@ -22,11 +22,11 @@
# Extract arguments from full_like
input_tensor = node.args[0]
fill_value = node.args[1]
shape = list(input_tensor.meta["tensor_meta"].shape)
-
+
new_kwargs = {}
for key, val in node.kwargs.items():
if key != "memory_format":
new_kwargs[key] = val
# Replace full_like with full, using the shape as a list
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py 2024-08-17 21:38:03.174815+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py 2024-08-17 21:38:27.095553+00:00
@@ -2699,11 +2699,11 @@
dilation = args_bounds_check(pool_node.args, 4, 1)
ceil_mode = args_bounds_check(pool_node.args, 5, False)
if not isinstance(dilation, (list, tuple)):
dilation = (dilation,)
-
+
for dil in dilation:
if dil != 1:
_LOGGER.debug("Currently we don't support dilation > 1 at any dimension.")
return False
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/full.py 2024-08-18 03:35:59.991813+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/full.py 2024-08-18 03:36:20.489931+00:00
@@ -20,11 +20,11 @@
target: Union[Target, str],
source_ir: Optional[SourceIR],
name: str,
shape: Union[List[int], TRTTensor],
fill_value: Union[int, float, bool],
- dtype: Union[torch.dtype, trt.DataType]
+ dtype: Union[torch.dtype, trt.DataType],
) -> TRTTensor:
output_dtype = _enums.dtype._from(dtype)
if isinstance(shape, List):
# in static shape scenario, shape is a list of int
if all(isinstance(dim, int) for dim in shape):
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/lowering/passes/replace_full_like_with_full.py 2024-08-18 03:35:59.995813+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/lowering/passes/replace_full_like_with_full.py 2024-08-18 03:36:21.060814+00:00
@@ -34,11 +34,11 @@
input_dtype = input_tensor.meta["tensor_meta"].dtype
input_device = input_tensor.meta["tensor_meta"].device
shape = list(input_tensor.meta["tensor_meta"].shape)
- # There's no memory format argument for torch.full.
+ # There's no memory format argument for torch.full.
# Set the input_device and dtype correspondingly.
new_kwargs = {}
for key, val in node.kwargs.items():
if key != "memory_format":
new_kwargs[key] = val
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py 2024-08-18 03:35:59.991813+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py 2024-08-18 03:36:22.843055+00:00
@@ -2702,11 +2702,11 @@
for dil in dilation:
if dil != 1:
_LOGGER.debug("Currently we don't support dilation > 1 at any dimension.")
return False
-
+
return True
# Note: MaxPool1d uses max_pool2d as it converts to 2D first.
@dynamo_tensorrt_converter(
@@ -3856,7 +3856,7 @@
target,
SourceIR.ATEN,
name,
shape=args[0],
fill_value=args[1],
- dtype=kwargs["dtype"]
- )
+ dtype=kwargs["dtype"],
+ )
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/full.py 2024-08-18 06:00:03.147977+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/full.py 2024-08-18 06:00:23.686044+00:00
@@ -20,11 +20,11 @@
target: Union[Target, str],
source_ir: Optional[SourceIR],
name: str,
shape: Union[List[int], TRTTensor],
fill_value: Union[int, float, bool],
- dtype: Union[torch.dtype, trt.DataType]
+ dtype: Union[torch.dtype, trt.DataType],
) -> TRTTensor:
output_dtype = _enums.dtype._from(dtype)
if isinstance(shape, List):
# in static shape scenario, shape is a list of int
if all(isinstance(dim, int) for dim in shape):
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/lowering/passes/replace_full_like_with_full.py 2024-08-18 06:00:03.151977+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/lowering/passes/replace_full_like_with_full.py 2024-08-18 06:00:24.276030+00:00
@@ -34,11 +34,11 @@
input_dtype = input_tensor.meta["tensor_meta"].dtype
input_device = input_tensor.meta["tensor_meta"].device
shape = list(input_tensor.meta["tensor_meta"].shape)
- # There's no memory format argument for torch.full.
+ # There's no memory format argument for torch.full.
# Set the input_device and dtype correspondingly.
new_kwargs = {}
for key, val in node.kwargs.items():
if key != "memory_format":
new_kwargs[key] = val
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py 2024-08-18 06:00:03.147977+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py 2024-08-18 06:00:25.976231+00:00
@@ -2694,11 +2694,11 @@
for dil in dilation:
if dil != 1:
_LOGGER.debug("Currently we don't support dilation > 1 at any dimension.")
return False
-
+
return True
# Note: MaxPool1d uses max_pool2d as it converts to 2D first.
@dynamo_tensorrt_converter(
@@ -3848,7 +3848,7 @@
target,
SourceIR.ATEN,
name,
shape=args[0],
fill_value=args[1],
- dtype=kwargs["dtype"]
- )
+ dtype=kwargs["dtype"],
+ )
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-18 06:00:03.175977+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-18 06:00:28.087506+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-21 00:24:16.805992+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-21 00:24:50.723758+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-21 00:30:09.776031+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-21 00:30:45.350177+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-23 03:05:32.567575+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-23 03:05:56.052071+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-28 21:23:05.146803+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-28 21:23:31.914698+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-28 23:35:05.049467+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-28 23:35:28.258183+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-29 17:37:32.829319+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-29 17:37:56.926341+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-29 23:56:13.538224+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-08-29 23:56:39.163776+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-09-10 18:44:52.655743+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/conversion/test_pool_aten.py 2024-09-10 18:45:27.216624+00:00
@@ -73,11 +73,13 @@
count_include_pad,
divisor_override,
)
inputs = [torch.randn(1, 3, 32, 32)]
- self.run_test(TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True)
+ self.run_test(
+ TestModule(), inputs, rtol=5e-03, atol=5e-03, use_dynamo_tracer=True
+ )
@parameterized.expand(
[
(3, 1, 0),
(3, 1, 1),
@@ -181,11 +183,11 @@
(3, 3, 3, 3),
torch.float,
(3, 3),
(1, 1),
(1, 1),
- True
+ True,
),
]
)
def test_dynamic_shape_pool2d(
self,
Description
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
Fixes # (issue)
Type of change
Please delete options that are not relevant and/or add your own.
Checklist: