Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 19 additions & 15 deletions python/paddle/nn/clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,28 +30,34 @@
in_dynamic_or_pir_mode,
in_pir_mode,
)
from paddle.tensor.layer_function_generator import templatedoc

__all__ = []


@templatedoc()
def clip_by_norm(x, max_norm, name=None):
"""
${comment}
r"""

Limits the L2 norm of the input :math:`x` within :math:`max\_norm`.
If the L2 norm of :math:`x` is less than or equal to :math:`max\_norm`, :math:`out` will be
the same as :math:`x`. If the L2 norm of :math:`x` is greater than :math:`max\_norm`, :math:`x` will
be linearly scaled to make the L2 norm of :math:`out` equal to :math:`max\_norm`, as
shown in the following formula:

:math:`out = \frac{max\_norm * x}{norm(x)}`

where :math:`norm(x)` represents the L2 norm of :math:`x`.

Args:
x(${x_type}): ${x_comment}
max_norm(${max_norm_type}): ${max_norm_comment}
x(Tensor): The input of clip_by_norm and data type is float32.
The number of dimensions must be between [1, 9].
max_norm(float): The maximum norm value.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.

Returns:
Tensor:

out(${out_type}): ${out_comment}

Tensor: The output of clip_by_norm with shape as input.
The data type is float32.

Examples:

Expand Down Expand Up @@ -96,17 +102,16 @@ def clip_by_norm(x, max_norm, name=None):
return out


@templatedoc()
def merge_selected_rows(x, name=None):
"""
${comment}
Merge by adding duplicated rows in the input SelectedRows object.

Args:
x(${x_type}): ${x_comment}
x(Tensor): The input selected rows to be merge.
name(basestring|None): Name of the output.

Returns:
out(${out_type}): ${out_comment}
Tensor, merged output.

Examples:

Expand Down Expand Up @@ -135,7 +140,6 @@ def merge_selected_rows(x, name=None):
return out


@templatedoc()
def get_tensor_from_selected_rows(x, name=None):
"""
Get tensor data from input with SelectedRows type, and outputs a Tensor.
Expand Down
48 changes: 38 additions & 10 deletions python/paddle/static/nn/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
program_guard,
static_only,
)
from paddle.base.layers.layer_function_generator import templatedoc
from paddle.base.param_attr import ParamAttr
from paddle.base.wrapped_decorator import signature_safe_contextmanager
from paddle.common_ops_import import (
Expand Down Expand Up @@ -665,7 +664,6 @@ def data_norm(
return helper.append_activation(data_norm_out)


@templatedoc()
def group_norm(
input,
groups,
Expand Down Expand Up @@ -3117,7 +3115,6 @@ def __call__(self, *args):


@static_only
@templatedoc()
def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
"""
This is used to register customized Python OP to Paddle. The design
Expand Down Expand Up @@ -3334,23 +3331,55 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
return out


@templatedoc()
def row_conv(input, future_context_size, param_attr=None, act=None):
"""
r"""
:api_attr: Static Graph

${comment}
The row convolution is called lookahead convolution. It was
introduced in the following paper for DeepSpeech2:
http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf

The main motivation is that a bidirectional RNN, useful in DeepSpeech
like speech models, learns representation for a sequence by performing a
forward and a backward pass through the entire sequence. However, unlike
unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online
and low-latency setting. The lookahead convolution incorporates information
from future subsequences in a computationally efficient manner to improve
unidirectional recurrent neural networks. The row convolution is
different from the 1D sequence convolution, and is computed as follows:

Given an input sequence :math:`X` of length :math:`t` and input dimension :math:`D`,
and a filter (:math:`W`) of size :math:`context \times D`,
the output sequence is convolved as:

:math:`out_{i} = \sum_{j=i}^{i + context - 1} X_{j} \cdot W_{j-i}`


In the above equation:

* :math:`Out_{i}`: The i-th row of output variable with shape [1, D].

* :math:`context`: Future context size.

* :math:`X_{j}`: The j-th row of input variable with shape [1, D].

* :math:`W_{j-i}`: The (j-i)-th row of parameters with shape [1, D].

More details about row_conv please refer to
the design document
https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .

Args:
input (${x_type}): ${x_comment}.
input (Tensor): The input is a Tensor, the shape of Tensor input has shape
(B x T x N), B is batch size.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc.
act (str): Non-linear activation to be applied to output variable.
act (str): Non-linear activation to be applied to output Tensor.

Returns:
${out_comment}.
Tensor: The output is a Tensor, which has same type and same shape as input.

Examples:

Expand Down Expand Up @@ -3521,7 +3550,6 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
py_func.registered_func_num = PyFuncRegistry.registered_func_num


@templatedoc()
def layer_norm(
input,
scale=True,
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/static/nn/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

# TODO: define loss functions of neural network
from paddle.base.layer_helper import LayerHelper
from paddle.base.layers.layer_function_generator import templatedoc
from paddle.base.param_attr import ParamAttr
from paddle.nn.initializer import Assign

Expand All @@ -31,7 +30,6 @@
# For now, the comments in c++ use types like Tensor, but in python side
# the type is often "Variable", and arguments may vary.
@static_only
@templatedoc(op_type="nce")
def nce(
input,
label,
Expand All @@ -49,14 +47,16 @@ def nce(
"""
:api_attr: Static Graph

${comment}
Compute and return the noise-contrastive estimation training loss. See `Noise-contrastive estimation: A new estimation principle
for unnormalized statistical models <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`_.
By default this operator uses a uniform distribution for sampling.

Args:
input (Tensor): Input tensor, 2-D tensor with shape [batch_size, dim],
and data type is float32 or float64.
label (Tensor): Input label, 2-D tensor with shape [batch_size, num_true_class],
and data type is int64.
num_total_classes (int):${num_total_classes_comment}.
num_total_classes (int): Total number of classes in all samples.
sample_weight (Tensor|None): A Tensor of shape [batch_size, 1]
storing a weight for each sample. The default weight for each
sample is 1.0.
Expand All @@ -66,7 +66,7 @@ def nce(
bias_attr (ParamAttr|None): To specify the bias parameter attribute.
Default: None, which means the default bias parameter property is
used. See usage for details in :ref:`api_paddle_ParamAttr` .
num_neg_samples (int): ${num_neg_samples_comment}.
num_neg_samples (int): The number of negative classes. The default value is 10.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
sampler (str, optional): The sampler used to sample class from negative classes.
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/static/nn/sequence_lod.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,10 @@
from paddle.base.data_feeder import check_type, check_variable_and_dtype
from paddle.base.framework import Variable, in_dygraph_mode
from paddle.base.layer_helper import LayerHelper
from paddle.base.layers.layer_function_generator import templatedoc

__all__ = []


@templatedoc()
def sequence_conv(
input,
num_filters,
Expand Down
11 changes: 1 addition & 10 deletions python/paddle/tensor/logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

from ..base.data_feeder import check_type, check_variable_and_dtype
from ..common_ops_import import Variable
from .layer_function_generator import templatedoc

Tensor = paddle.base.framework.core.eager.Tensor

Expand Down Expand Up @@ -427,7 +426,6 @@ def equal_all(x, y, name=None):
return out


@templatedoc()
def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
r"""
Check if all :math:`x` and :math:`y` satisfy the condition:
Expand All @@ -443,7 +441,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
y (Tensor): The input tensor, it's data type should be float16, float32, float64.
rtol (rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
atol (atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
equal_nan (equalnantype, optional): ${equal_nan_comment}. Default: False.
equal_nan (bool, optional): Whether to compare nan as equal. Default: False.
name (str, optional): Name for the operation. For more information, please
refer to :ref:`api_guide_Name`. Default: None.

Expand Down Expand Up @@ -503,7 +501,6 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
return out


@templatedoc()
def equal(x, y, name=None):
"""

Expand Down Expand Up @@ -605,7 +602,6 @@ def equal_(x, y, name=None):
return _C_ops.equal_(x, y)


@templatedoc()
def greater_equal(x, y, name=None):
"""
Returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
Expand Down Expand Up @@ -697,7 +693,6 @@ def greater_equal_(x, y, name=None):
return _C_ops.greater_equal_(x, y)


@templatedoc()
def greater_than(x, y, name=None):
"""
Returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
Expand Down Expand Up @@ -789,7 +784,6 @@ def greater_than_(x, y, name=None):
return _C_ops.greater_than_(x, y)


@templatedoc()
def less_equal(x, y, name=None):
"""
Returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
Expand Down Expand Up @@ -882,7 +876,6 @@ def less_equal_(x, y, name=None):
return _C_ops.less_equal_(x, y)


@templatedoc()
def less_than(x, y, name=None):
"""
Returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
Expand Down Expand Up @@ -975,7 +968,6 @@ def less_than_(x, y, name=None):
return _C_ops.less_than_(x, y)


@templatedoc()
def not_equal(x, y, name=None):
"""
Returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
Expand Down Expand Up @@ -1365,7 +1357,6 @@ def bitwise_not_(x, name=None):
return _C_ops.bitwise_not_(x)


@templatedoc()
def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
r"""
Check if all :math:`x` and :math:`y` satisfy the condition:
Expand Down
3 changes: 1 addition & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
in_pir_mode,
)
from .creation import _complex_to_real_dtype
from .layer_function_generator import generate_layer_fn, templatedoc
from .layer_function_generator import generate_layer_fn
from .manipulation import cast, cast_
from .ops import ( # noqa: F401
abs,
Expand Down Expand Up @@ -1993,7 +1993,6 @@ def count_nonzero(x, axis=None, keepdim=False, name=None):
return paddle.sum(int_tensor, axis=axis, keepdim=keepdim, name=name)


@templatedoc(op_type="sum")
def add_n(inputs, name=None):
"""
Sum one or more Tensor of the input.
Expand Down