diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 93aa71b88c17f0..78794d721f4b57 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -213,7 +213,11 @@ Conv1d = Conv1D Conv2d = Conv2D Conv3d = Conv3D - +AdaptiveMaxPool1d = AdaptiveMaxPool1D +AdaptiveMaxPool2d = AdaptiveMaxPool2D +AdaptiveMaxPool3d = AdaptiveMaxPool3D +LPPool2d = LPPool2D +LPPool1d = LPPool1D __all__ = [ 'BatchNorm', diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 8a6cbc00767215..e8348dea06846f 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, Literal from paddle.framework import get_default_dtype +from paddle.utils.decorator_utils import param_one_alias from .. import functional as F from ..initializer import Constant @@ -159,11 +160,13 @@ class GLU(Layer): [-1.05778778, -0.46985325]]) """ + @param_one_alias(["axis", "dim"]) def __init__(self, axis: int = -1, name: str | None = None) -> None: super().__init__() self._axis = axis self._name = name + @param_one_alias(["x", "input"]) def forward(self, x: Tensor) -> Tensor: return F.glu(x, self._axis, self._name) @@ -171,6 +174,14 @@ def extra_repr(self) -> str: name_str = f', name={self._name}' if self._name else '' return f'axis={self._axis}{name_str}' + @property + def dim(self) -> int: + return self._axis + + @dim.setter + def dim(self, value: int) -> None: + self._axis = value + class GELU(Layer): r""" @@ -291,11 +302,13 @@ class Hardshrink(Layer): [-1. , 0. , 2.50000000]) """ + @param_one_alias(["threshold", "lambd"]) def __init__(self, threshold: float = 0.5, name: str | None = None) -> None: super().__init__() self._threshold = threshold self._name = name + @param_one_alias(["x", "input"]) def forward(self, x: Tensor) -> Tensor: return F.hardshrink(x, self._threshold, self._name) @@ -303,6 +316,14 @@ def extra_repr(self) -> str: name_str = f', name={self._name}' if self._name else '' return f'threshold={self._threshold}{name_str}' + @property + def lambd(self) -> float: + return self._threshold + + @lambd.setter + def lambd(self, value: float) -> None: + self._threshold = value + class Hardswish(Layer): r""" @@ -1011,11 +1032,13 @@ class Softshrink(Layer): [-0.39999998, 0. , 0. , 0.30000001]) """ + @param_one_alias(["threshold", "lambd"]) def __init__(self, threshold: float = 0.5, name: str | None = None) -> None: super().__init__() self._threshold = threshold self._name = name + @param_one_alias(["x", "input"]) def forward(self, x: Tensor) -> Tensor: return F.softshrink(x, self._threshold, self._name) @@ -1023,6 +1046,14 @@ def extra_repr(self) -> str: name_str = f', name={self._name}' if self._name else '' return f'threshold={self._threshold}{name_str}' + @property + def lambd(self) -> float: + return self._threshold + + @lambd.setter + def lambd(self, value: float) -> None: + self._threshold = value + class Softsign(Layer): r""" diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index e7a1149edc934e..3f1932e022c3e9 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -18,7 +18,7 @@ import paddle from paddle import in_dynamic_mode -from paddle.utils.decorator_utils import param_one_alias +from paddle.utils.decorator_utils import ParamAliasDecorator, param_one_alias from .. import functional as F from .layers import Layer @@ -2247,6 +2247,7 @@ class CosineSimilarity(Layer): [0.65079135, 0.98058069, 1. ]) """ + @param_one_alias(["axis", "dim"]) def __init__(self, axis: int = 1, eps: float = 1e-8) -> None: super().__init__() self._axis = axis @@ -2258,6 +2259,14 @@ def forward(self, x1: Tensor, x2: Tensor) -> Tensor: def extra_repr(self) -> str: return 'axis={_axis}, eps={_eps}'.format(**self.__dict__) + @property + def dim(self) -> int: + return self._axis + + @dim.setter + def dim(self, value: int) -> None: + self._axis = value + class Embedding(Layer): r""" @@ -2642,6 +2651,15 @@ class Fold(Layer): strides: Size2 name: str | None + @ParamAliasDecorator( + { + "output_sizes": ["output_size"], + "kernel_sizes": ["kernel_size"], + "strides": ["stride"], + "paddings": ["padding"], + "dilations": ["dilation"], + } + ) def __init__( self, output_sizes: Size2, @@ -2675,6 +2693,46 @@ def extra_repr(self) -> str: name_str = f', name={self.name}' if self.name else '' return f'kernel_size={self.kernel_sizes}, dilation={self.dilations}, padding={self.paddings}, stride={self.strides}{name_str}' + @property + def output_size(self) -> Size2: + return self.output_sizes + + @output_size.setter + def output_size(self, value: Size2) -> None: + self.output_sizes = value + + @property + def kernel_size(self) -> Size2: + return self.kernel_sizes + + @kernel_size.setter + def kernel_size(self, value: Size2) -> None: + self.kernel_sizes = value + + @property + def stride(self) -> Size2: + return self.strides + + @stride.setter + def stride(self, value: Size2) -> None: + self.strides = value + + @property + def padding(self) -> Size2 | Size4: + return self.paddings + + @padding.setter + def padding(self, value: Size2 | Size4) -> None: + self.paddings = value + + @property + def dilation(self) -> Size2: + return self.dilations + + @dilation.setter + def dilation(self, value: Size2) -> None: + self.dilations = value + class Flatten(Layer): """ diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index 872382b2addc42..3433215be8ff1d 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -16,6 +16,8 @@ from typing import TYPE_CHECKING +from python.paddle.utils.decorator_utils import param_one_alias, param_two_alias + from .. import functional as F from .layers import Layer @@ -68,6 +70,7 @@ class PairwiseDistance(Layer): [4.99999860, 4.99999860]) """ + @param_one_alias(["epsilon", "eps"]) def __init__( self, p: float = 2.0, @@ -81,6 +84,7 @@ def __init__( self.keepdim = keepdim self.name = name + @param_two_alias(["x", "x1"], ["y", "x2"]) def forward(self, x: paddle.Tensor, y: paddle.Tensor) -> paddle.Tensor: return F.pairwise_distance( x, y, self.p, self.epsilon, self.keepdim, self.name @@ -95,3 +99,19 @@ def extra_repr(self) -> str: if self.name is not None: main_str += ', name={name}' return main_str.format(**self.__dict__) + + @property + def eps(self) -> float: + return self.epsilon + + @eps.setter + def eps(self, value: float) -> None: + self.epsilon = value + + @property + def norm(self) -> float: + return self.p + + @norm.setter + def norm(self, value: float) -> None: + self.p = value diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 6dfd78ba0b5258..6ad2f0f8694f40 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -20,6 +20,7 @@ from paddle import base, in_dynamic_mode from paddle.base.framework import in_dynamic_or_pir_mode from paddle.utils.decorator_utils import ParamAliasDecorator +from python.paddle.utils.decorator_utils import param_one_alias, param_two_alias from .. import functional as F from .layers import Layer @@ -2458,6 +2459,7 @@ class GaussianNLLLoss(Layer): reduction: _ReduceMode name: str | None + @param_one_alias(["epsilon", "eps"]) def __init__( self, full: bool = False, @@ -2477,6 +2479,7 @@ def __init__( self.reduction = reduction self.name = name + @param_two_alias(["label", "target"], ["variance", "var"]) def forward(self, input: Tensor, label: Tensor, variance: Tensor) -> Tensor: out = F.gaussian_nll_loss( input, @@ -2489,6 +2492,14 @@ def forward(self, input: Tensor, label: Tensor, variance: Tensor) -> Tensor: ) return out + @property + def eps(self) -> float: + return self.epsilon + + @eps.setter + def eps(self, value: float) -> None: + self.epsilon = value + class AdaptiveLogSoftmaxWithLoss(Layer): r"""Adaptive softmax is an approximate strategy for training models with large output spaces. It is most effective when diff --git a/python/paddle/nn/layer/pooling.py b/python/paddle/nn/layer/pooling.py index 2ed94798e30a7a..81e01b5f3620db 100755 --- a/python/paddle/nn/layer/pooling.py +++ b/python/paddle/nn/layer/pooling.py @@ -17,6 +17,11 @@ from typing import TYPE_CHECKING +from python.paddle.utils.decorator_utils import ( + LPPool_decorator, + param_one_alias, +) + from .. import functional as F from .layers import Layer @@ -439,6 +444,7 @@ class LPPool1D(Layer): data_format: DataLayout1D name: str | None + @LPPool_decorator() def __init__( self, norm_type: float, @@ -458,6 +464,7 @@ def __init__( self.data_format = data_format self.name = name + @param_one_alias(["x", "input"]) def forward(self, x: Tensor) -> Tensor: out = F.lp_pool1d( x, @@ -554,6 +561,7 @@ class LPPool2D(Layer): data_format: DataLayout2D name: str | None + @LPPool_decorator() def __init__( self, norm_type: float, @@ -573,6 +581,7 @@ def __init__( self.data_format = data_format self.name = name + @param_one_alias(["x", "input"]) def forward(self, x: Tensor) -> Tensor: return F.lp_pool2d( x, @@ -1253,6 +1262,7 @@ class AdaptiveMaxPool1D(Layer): return_mask: bool name: str | None + @param_one_alias(["return_mask", "return_indices"]) def __init__( self, output_size: int, @@ -1272,6 +1282,14 @@ def forward(self, input: Tensor) -> Tensor: def extra_repr(self) -> str: return f'output_size={self.output_size}, return_mask={self.return_mask}' + @property + def return_indices(self) -> bool: + return self.return_mask + + @return_indices.setter + def return_indices(self, value: bool) -> None: + self.return_mask = value + class AdaptiveMaxPool2D(Layer): """ @@ -1339,6 +1357,7 @@ class AdaptiveMaxPool2D(Layer): [2, 3, 3, 3] """ + @param_one_alias(["return_mask", "return_indices"]) def __init__( self, output_size: Size2, @@ -1350,6 +1369,7 @@ def __init__( self._return_mask = return_mask self._name = name + @param_one_alias(["x", "input"]) def forward(self, x: Tensor) -> Tensor: return F.adaptive_max_pool2d( x, @@ -1363,6 +1383,14 @@ def extra_repr(self) -> str: f'output_size={self._output_size}, return_mask={self._return_mask}' ) + @property + def return_indices(self) -> bool: + return self._return_mask + + @return_indices.setter + def return_indices(self, value: bool) -> None: + self._return_mask = value + class AdaptiveMaxPool3D(Layer): """ @@ -1441,6 +1469,7 @@ class AdaptiveMaxPool3D(Layer): """ + @param_one_alias(["return_mask", "return_indices"]) def __init__( self, output_size: Size3, @@ -1452,6 +1481,7 @@ def __init__( self._return_mask = return_mask self._name = name + @param_one_alias(["x", "input"]) def forward(self, x: Tensor) -> Tensor: return F.adaptive_max_pool3d( x, @@ -1465,6 +1495,14 @@ def extra_repr(self) -> str: f'output_size={self._output_size}, return_mask={self._return_mask}' ) + @property + def return_indices(self) -> bool: + return self._return_mask + + @return_indices.setter + def return_indices(self, value: bool) -> None: + self._return_mask = value + class MaxUnPool1D(Layer): r""" diff --git a/python/paddle/utils/decorator_utils.py b/python/paddle/utils/decorator_utils.py index 0b0df8c9958a9f..d74d8bd45c71eb 100644 --- a/python/paddle/utils/decorator_utils.py +++ b/python/paddle/utils/decorator_utils.py @@ -223,6 +223,23 @@ def wrapper(*args: _InputT.args, **kwargs: _InputT.kwargs) -> _RetT: return decorator +def LPPool_decorator() -> Callable[ + [Callable[_InputT, _RetT]], Callable[_InputT, _RetT] +]: + def decorator(func: Callable[_InputT, _RetT]) -> Callable[_InputT, _RetT]: + @functools.wraps(func) + def wrapper(*args: _InputT.args, **kwargs: _InputT.kwargs) -> _RetT: + if len(args) == 5 and isinstance(args[4], bool): + kwargs["ceil_mode"] = args[4] + args = args[:5] + return func(*args, **kwargs) + + wrapper.__signature__ = inspect.signature(func) + return wrapper + + return decorator + + def tensor_split_decorator( func: Callable[_InputT, _RetT], ) -> Callable[_InputT, _RetT]: