diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index 0f551b1aa6c416..9e51bfd6e3b81b 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -30,28 +30,36 @@ in_dynamic_or_pir_mode, in_pir_mode, ) -from paddle.tensor.layer_function_generator import templatedoc __all__ = [] -@templatedoc() def clip_by_norm(x, max_norm, name=None): - """ - ${comment} + r""" + + Limits the L2 norm of the input :math:`x` within :math:`max\_norm`. + If the L2 norm of :math:`x` is less than or equal to :math:`max\_norm`, :math:`out` will be + the same as :math:`x`. If the L2 norm of :math:`x` is greater than :math:`max\_norm`, :math:`x` will + be linearly scaled to make the L2 norm of :math:`out` equal to :math:`max\_norm`, as + shown in the following formula: + + .. math:: + + out = \frac{max\_norm * x}{norm(x)} + + where :math:`norm(x)` represents the L2 norm of :math:`x`. Args: - x(${x_type}): ${x_comment} - max_norm(${max_norm_type}): ${max_norm_comment} + x(Tensor): The input of clip_by_norm and data type is float32. + The number of dimensions must be between [1, 9]. + max_norm(float): The maximum norm value. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: - Tensor: - - out(${out_type}): ${out_comment} - + Tensor: The output of clip_by_norm with shape as input. + The data type is float32. Examples: @@ -96,17 +104,16 @@ def clip_by_norm(x, max_norm, name=None): return out -@templatedoc() def merge_selected_rows(x, name=None): """ - ${comment} + Merge by adding duplicated rows in the input SelectedRows object. Args: - x(${x_type}): ${x_comment} + x(Tensor): The input selected rows to be merge. name(basestring|None): Name of the output. Returns: - out(${out_type}): ${out_comment} + Tensor, merged output. Examples: @@ -135,7 +142,6 @@ def merge_selected_rows(x, name=None): return out -@templatedoc() def get_tensor_from_selected_rows(x, name=None): """ Get tensor data from input with SelectedRows type, and outputs a Tensor. diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index 1ee83d374b6979..ab315c6b5f0167 100644 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -32,7 +32,6 @@ program_guard, static_only, ) -from paddle.base.layers.layer_function_generator import templatedoc from paddle.base.param_attr import ParamAttr from paddle.base.wrapped_decorator import signature_safe_contextmanager from paddle.common_ops_import import ( @@ -665,7 +664,6 @@ def data_norm( return helper.append_activation(data_norm_out) -@templatedoc() def group_norm( input, groups, @@ -3117,7 +3115,6 @@ def __call__(self, *args): @static_only -@templatedoc() def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): """ This is used to register customized Python OP to Paddle. The design @@ -3334,23 +3331,57 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): return out -@templatedoc() def row_conv(input, future_context_size, param_attr=None, act=None): - """ + r""" :api_attr: Static Graph - ${comment} + The row convolution is called lookahead convolution. It was + introduced in the following paper for DeepSpeech2: + http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf + + The main motivation is that a bidirectional RNN, useful in DeepSpeech + like speech models, learns representation for a sequence by performing a + forward and a backward pass through the entire sequence. However, unlike + unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online + and low-latency setting. The lookahead convolution incorporates information + from future subsequences in a computationally efficient manner to improve + unidirectional recurrent neural networks. The row convolution is + different from the 1D sequence convolution, and is computed as follows: + + Given an input sequence :math:`X` of length :math:`t` and input dimension :math:`D`, + and a filter (:math:`W`) of size :math:`context \times D`, + the output sequence is convolved as: + + .. math:: + + Out_{i} = \sum_{j=i}^{i + context - 1} X_{j} \cdot W_{j-i} + + + In the above equation: + + * :math:`Out_{i}`: The i-th row of output variable with shape [1, D]. + + * :math:`context`: Future context size. + + * :math:`X_{j}`: The j-th row of input variable with shape [1, D]. + + * :math:`W_{j-i}`: The (j-i)-th row of parameters with shape [1, D]. + + More details about row_conv please refer to + the design document + https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 . Args: - input (${x_type}): ${x_comment}. + input (Tensor): The input is a Tensor, the shape of Tensor input has shape + (B x T x N), B is batch size. future_context_size (int): Future context size. Please note, the shape of convolution kernel is [future_context_size + 1, D]. param_attr (ParamAttr): Attributes of parameters, including name, initializer etc. - act (str): Non-linear activation to be applied to output variable. + act (str): Non-linear activation to be applied to output Tensor. Returns: - ${out_comment}. + Tensor: The output is a Tensor, which has same type and same shape as input. Examples: @@ -3521,7 +3552,6 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): py_func.registered_func_num = PyFuncRegistry.registered_func_num -@templatedoc() def layer_norm( input, scale=True, diff --git a/python/paddle/static/nn/loss.py b/python/paddle/static/nn/loss.py index 870e2144fa86c8..d33ce86f0aa675 100644 --- a/python/paddle/static/nn/loss.py +++ b/python/paddle/static/nn/loss.py @@ -18,7 +18,6 @@ # TODO: define loss functions of neural network from paddle.base.layer_helper import LayerHelper -from paddle.base.layers.layer_function_generator import templatedoc from paddle.base.param_attr import ParamAttr from paddle.nn.initializer import Assign @@ -31,7 +30,6 @@ # For now, the comments in c++ use types like Tensor, but in python side # the type is often "Variable", and arguments may vary. @static_only -@templatedoc(op_type="nce") def nce( input, label, @@ -49,14 +47,16 @@ def nce( """ :api_attr: Static Graph - ${comment} + Compute and return the noise-contrastive estimation training loss. See `Noise-contrastive estimation: A new estimation principle + for unnormalized statistical models `_. + By default this operator uses a uniform distribution for sampling. Args: input (Tensor): Input tensor, 2-D tensor with shape [batch_size, dim], and data type is float32 or float64. label (Tensor): Input label, 2-D tensor with shape [batch_size, num_true_class], and data type is int64. - num_total_classes (int):${num_total_classes_comment}. + num_total_classes (int): Total number of classes in all samples. sample_weight (Tensor|None): A Tensor of shape [batch_size, 1] storing a weight for each sample. The default weight for each sample is 1.0. @@ -66,7 +66,7 @@ def nce( bias_attr (ParamAttr|None): To specify the bias parameter attribute. Default: None, which means the default bias parameter property is used. See usage for details in :ref:`api_paddle_ParamAttr` . - num_neg_samples (int): ${num_neg_samples_comment}. + num_neg_samples (int): The number of negative classes. The default value is 10. name(str|None): For detailed information, please refer to :ref:`api_guide_Name` . Usually name is no need to set and None by default. sampler (str, optional): The sampler used to sample class from negative classes. diff --git a/python/paddle/static/nn/sequence_lod.py b/python/paddle/static/nn/sequence_lod.py index 3740a9be3dbbfe..b9837aee3de0b1 100644 --- a/python/paddle/static/nn/sequence_lod.py +++ b/python/paddle/static/nn/sequence_lod.py @@ -17,12 +17,10 @@ from paddle.base.data_feeder import check_type, check_variable_and_dtype from paddle.base.framework import Variable, in_dygraph_mode from paddle.base.layer_helper import LayerHelper -from paddle.base.layers.layer_function_generator import templatedoc __all__ = [] -@templatedoc() def sequence_conv( input, num_filters, diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index a5a2ea7846578b..38855bd4221478 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -18,7 +18,6 @@ from ..base.data_feeder import check_type, check_variable_and_dtype from ..common_ops_import import Variable -from .layer_function_generator import templatedoc Tensor = paddle.base.framework.core.eager.Tensor @@ -427,7 +426,6 @@ def equal_all(x, y, name=None): return out -@templatedoc() def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): r""" Check if all :math:`x` and :math:`y` satisfy the condition: @@ -443,7 +441,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): y (Tensor): The input tensor, it's data type should be float16, float32, float64. rtol (rtoltype, optional): The relative tolerance. Default: :math:`1e-5` . atol (atoltype, optional): The absolute tolerance. Default: :math:`1e-8` . - equal_nan (equalnantype, optional): ${equal_nan_comment}. Default: False. + equal_nan (bool, optional): Whether to compare nan as equal. Default: False. name (str, optional): Name for the operation. For more information, please refer to :ref:`api_guide_Name`. Default: None. @@ -503,7 +501,6 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): return out -@templatedoc() def equal(x, y, name=None): """ @@ -605,7 +602,6 @@ def equal_(x, y, name=None): return _C_ops.equal_(x, y) -@templatedoc() def greater_equal(x, y, name=None): """ Returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`. @@ -697,7 +693,6 @@ def greater_equal_(x, y, name=None): return _C_ops.greater_equal_(x, y) -@templatedoc() def greater_than(x, y, name=None): """ Returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`. @@ -789,7 +784,6 @@ def greater_than_(x, y, name=None): return _C_ops.greater_than_(x, y) -@templatedoc() def less_equal(x, y, name=None): """ Returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`. @@ -882,7 +876,6 @@ def less_equal_(x, y, name=None): return _C_ops.less_equal_(x, y) -@templatedoc() def less_than(x, y, name=None): """ Returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`. @@ -975,7 +968,6 @@ def less_than_(x, y, name=None): return _C_ops.less_than_(x, y) -@templatedoc() def not_equal(x, y, name=None): """ Returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`. @@ -1365,7 +1357,6 @@ def bitwise_not_(x, name=None): return _C_ops.bitwise_not_(x) -@templatedoc() def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): r""" Check if all :math:`x` and :math:`y` satisfy the condition: diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 24611628d08c63..9bde343f185fdc 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -43,7 +43,7 @@ in_pir_mode, ) from .creation import _complex_to_real_dtype -from .layer_function_generator import generate_layer_fn, templatedoc +from .layer_function_generator import generate_layer_fn from .manipulation import cast, cast_ from .ops import ( # noqa: F401 abs, @@ -1993,7 +1993,6 @@ def count_nonzero(x, axis=None, keepdim=False, name=None): return paddle.sum(int_tensor, axis=axis, keepdim=keepdim, name=name) -@templatedoc(op_type="sum") def add_n(inputs, name=None): """ Sum one or more Tensor of the input.