Skip to content
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/dataset/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -1493,7 +1493,7 @@ def load_into_memory(self):
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
"""
"""
self._prepare_to_run()
self.boxps.load_into_memory()

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/nn/layer/fused_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -684,7 +684,7 @@ class FusedTransformerEncoderLayer(Layer):

.. code-block:: python

# required: gpu
# required: gpu
import paddle
from paddle.incubate.nn import FusedTransformerEncoderLayer

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/sparse/unary.py
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,7 @@ def coalesce(x):
#[[0, 1], [1, 2]]
print(sp_x.values())
#[3.0, 3.0]
"""
"""
return _C_ops.sparse_coalesce(x)


Expand Down
188 changes: 94 additions & 94 deletions python/paddle/nn/functional/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,23 +347,23 @@ def interpolate(x,
Examples:
.. code-block:: python

import paddle
import paddle.nn.functional as F

input_data = paddle.randn(shape=(2,3,6,10)).astype(paddle.float32)
output_1 = F.interpolate(x=input_data, size=[12,12])
print(output_1.shape)
# [2L, 3L, 12L, 12L]

# given scale
output_2 = F.interpolate(x=input_data, scale_factor=[2,1])
print(output_2.shape)
# [2L, 3L, 12L, 10L]

# bilinear interp
output_3 = F.interpolate(x=input_data, scale_factor=[2,1], mode="bilinear")
print(output_2.shape)
# [2L, 3L, 12L, 10L]
import paddle
import paddle.nn.functional as F

input_data = paddle.randn(shape=(2,3,6,10)).astype(paddle.float32)
output_1 = F.interpolate(x=input_data, size=[12,12])
print(output_1.shape)
# [2L, 3L, 12L, 12L]

# given scale
output_2 = F.interpolate(x=input_data, scale_factor=[2,1])
print(output_2.shape)
# [2L, 3L, 12L, 10L]

# bilinear interp
output_3 = F.interpolate(x=input_data, scale_factor=[2,1], mode="bilinear")
print(output_2.shape)
# [2L, 3L, 12L, 10L]
"""
data_format = data_format.upper()
resample = mode.upper()
Expand Down Expand Up @@ -818,17 +818,17 @@ def upsample(x,
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).

Examples:
.. code-block:: python
.. code-block:: python

import paddle
import paddle.nn as nn
import paddle
import paddle.nn as nn

input_data = paddle.randn(shape=(2,3,6,10)).astype(paddle.float32)
upsample_out = paddle.nn.Upsample(size=[12,12])
input_data = paddle.randn(shape=(2,3,6,10)).astype(paddle.float32)
upsample_out = paddle.nn.Upsample(size=[12,12])

output = upsample_out(x=input_data)
print(output.shape)
# [2L, 3L, 12L, 12L]
output = upsample_out(x=input_data)
print(output.shape)
# [2L, 3L, 12L, 12L]

"""
return interpolate(x, size, scale_factor, mode, align_corners, align_mode,
Expand All @@ -842,30 +842,30 @@ def bilinear(x1, x2, weight, bias=None, name=None):
See :ref:`api_nn_Bilinear` for details and output shape.

Parameters:
x1 (Tensor): the first input tensor, it's data type should be float32, float64.
x2 (Tensor): the second input tensor, it's data type should be float32, float64.
weight (Parameter): The learnable weights of this layer, shape is [out_features, in1_features, in2_features].
bias (Parameter, optional): The learnable bias(Bias) of this layer, shape is [1, out_features]. If it is set to None, no bias will be added to the output units. The default value is None.
name (str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
x1 (Tensor): the first input tensor, it's data type should be float32, float64.
x2 (Tensor): the second input tensor, it's data type should be float32, float64.
weight (Parameter): The learnable weights of this layer, shape is [out_features, in1_features, in2_features].
bias (Parameter, optional): The learnable bias(Bias) of this layer, shape is [1, out_features]. If it is set to None, no bias will be added to the output units. The default value is None.
name (str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.

Returns:
Tensor: A 2-D Tensor of shape [batch_size, out_features].
Tensor: A 2-D Tensor of shape [batch_size, out_features].

Examples:
.. code-block:: python
.. code-block:: python

import paddle
import paddle.nn.functional as F
import paddle
import paddle.nn.functional as F

x1 = paddle.randn((5, 5)).astype(paddle.float32)
x2 = paddle.randn((5, 4)).astype(paddle.float32)
w = paddle.randn((1000, 5, 4)).astype(paddle.float32)
b = paddle.randn((1, 1000)).astype(paddle.float32)
x1 = paddle.randn((5, 5)).astype(paddle.float32)
x2 = paddle.randn((5, 4)).astype(paddle.float32)
w = paddle.randn((1000, 5, 4)).astype(paddle.float32)
b = paddle.randn((1, 1000)).astype(paddle.float32)

result = F.bilinear(x1, x2, w, b)
print(result.shape)
# [5, 1000]
result = F.bilinear(x1, x2, w, b)
print(result.shape)
# [5, 1000]
"""

if in_dygraph_mode():
Expand Down Expand Up @@ -1008,38 +1008,38 @@ def dropout(x,

.. code-block:: python

import paddle

x = paddle.to_tensor([[1,2,3], [4,5,6]]).astype(paddle.float32)
y_train = paddle.nn.functional.dropout(x, 0.5)
y_test = paddle.nn.functional.dropout(x, 0.5, training=False)
y_0 = paddle.nn.functional.dropout(x, axis=0)
y_1 = paddle.nn.functional.dropout(x, axis=1)
y_01 = paddle.nn.functional.dropout(x, axis=[0,1])
print(x)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
print(y_train)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[2. , 0. , 6. ],
# [8. , 0. , 12.]])
print(y_test)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
print(y_0)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0. , 0. , 0. ],
# [8. , 10., 12.]])
print(y_1)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[2. , 0. , 6. ],
# [8. , 0. , 12.]])
print(y_01)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0. , 0. , 0. ],
# [8. , 0. , 12.]])
import paddle

x = paddle.to_tensor([[1,2,3], [4,5,6]]).astype(paddle.float32)
y_train = paddle.nn.functional.dropout(x, 0.5)
y_test = paddle.nn.functional.dropout(x, 0.5, training=False)
y_0 = paddle.nn.functional.dropout(x, axis=0)
y_1 = paddle.nn.functional.dropout(x, axis=1)
y_01 = paddle.nn.functional.dropout(x, axis=[0,1])
print(x)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
print(y_train)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[2. , 0. , 6. ],
# [8. , 0. , 12.]])
print(y_test)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
print(y_0)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0. , 0. , 0. ],
# [8. , 10., 12.]])
print(y_1)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[2. , 0. , 6. ],
# [8. , 0. , 12.]])
print(y_01)
# Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0. , 0. , 0. ],
# [8. , 0. , 12.]])

"""
if not isinstance(p, (float, int, Variable)):
Expand Down Expand Up @@ -1239,14 +1239,14 @@ def dropout3d(x, p=0.5, training=True, data_format='NCDHW', name=None):
Examples:
.. code-block:: python

import paddle
import paddle

x = paddle.randn(shape=(2, 3, 4, 5, 6)).astype(paddle.float32)
y_train = paddle.nn.functional.dropout3d(x) #train
y_test = paddle.nn.functional.dropout3d(x, training=False) #test
print(x[0,0,:,:,:])
print(y_train[0,0,:,:,:]) # may all 0
print(y_test[0,0,:,:,:])
x = paddle.randn(shape=(2, 3, 4, 5, 6)).astype(paddle.float32)
y_train = paddle.nn.functional.dropout3d(x) #train
y_test = paddle.nn.functional.dropout3d(x, training=False) #test
print(x[0,0,:,:,:])
print(y_train[0,0,:,:,:]) # may all 0
print(y_test[0,0,:,:,:])

"""

Expand Down Expand Up @@ -1287,19 +1287,19 @@ def alpha_dropout(x, p=0.5, training=True, name=None):
Examples:
.. code-block:: python

import paddle

x = paddle.to_tensor([[-1, 1], [-1, 1]]).astype(paddle.float32)
y_train = paddle.nn.functional.alpha_dropout(x, 0.5)
y_test = paddle.nn.functional.alpha_dropout(x, 0.5, training=False)
print(y_train)
# Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[-0.10721093, -0.77919382],
# [-0.10721093, 1.66559887]]) (randomly)
print(y_test)
# Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[-1., 1.],
# [-1., 1.]])
import paddle

x = paddle.to_tensor([[-1, 1], [-1, 1]]).astype(paddle.float32)
y_train = paddle.nn.functional.alpha_dropout(x, 0.5)
y_test = paddle.nn.functional.alpha_dropout(x, 0.5, training=False)
print(y_train)
# Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[-0.10721093, -0.77919382],
# [-0.10721093, 1.66559887]]) (randomly)
print(y_test)
# Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[-1., 1.],
# [-1., 1.]])
"""
if not isinstance(p, (float, int)):
raise TypeError("p argument should be a float or int")
Expand Down
51 changes: 25 additions & 26 deletions python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,10 +472,9 @@ def edit_distance(input,
NOTE: This Api is different from fluid.metrics.EditDistance

Returns:
Tuple:

distance(Tensor): edit distance result, its data type is float32, and its shape is (batch_size, 1).
sequence_num(Tensor): sequence number, its data type is float32, and its shape is (1,).
Tuple:
distance(Tensor): edit distance result, its data type is float32, and its shape is (batch_size, 1).
sequence_num(Tensor): sequence number, its data type is float32, and its shape is (1,).

Examples:
.. code-block:: python
Expand Down Expand Up @@ -2959,29 +2958,29 @@ def multi_label_soft_margin_loss(input,
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.

Shape:
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements.
label: N-D Tensor, same shape as the input.
weight:N-D Tensor, the shape is [N,1]
output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input.
Shape:
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements.
label: N-D Tensor, same shape as the input.
weight:N-D Tensor, the shape is [N,1]
output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input.

Returns:
Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label.
Returns:
Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label.

Examples:
.. code-block:: python
Examples:
.. code-block:: python

import paddle
import paddle.nn.functional as F
input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
# label elements in {1., -1.}
label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32)
loss = F.multi_label_soft_margin_loss(input, label, reduction='none')
print(loss)
# Tensor([3.49625897, 0.71111226, 0.43989015])
loss = F.multi_label_soft_margin_loss(input, label, reduction='mean')
print(loss)
# Tensor([1.54908717])
import paddle
import paddle.nn.functional as F
input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
# label elements in {1., -1.}
label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32)
loss = F.multi_label_soft_margin_loss(input, label, reduction='none')
print(loss)
# Tensor([3.49625897, 0.71111226, 0.43989015])
loss = F.multi_label_soft_margin_loss(input, label, reduction='mean')
print(loss)
# Tensor([1.54908717])
"""
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
Expand Down Expand Up @@ -3266,8 +3265,8 @@ def triplet_margin_with_distance_loss(input,

distance_function (callable, optional): Quantifies the distance between two tensors. if not specified, 2 norm functions will be used.

margin (float, optional):Default: :math:`1`.A nonnegative margin representing the minimum difference
between the positive and negative distances required for the loss to be 0.
margin (float, optional): A nonnegative margin representing the minimum difference
between the positive and negative distances required for the loss to be 0. Default value is :math:`1`.

swap (bool, optional):The distance swap changes the negative distance to the swap distance (distance between positive samples
and negative samples) if swap distance smaller than negative distance. Default: ``False``.
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/nn/layer/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -1219,7 +1219,7 @@ class MultiLabelSoftMarginLoss(Layer):
:math:`y` and :math:`x` must have the same size.

Parameters:
weight (Tensor,optional): a manual rescaling weight given to each class.
weight (Tensor,optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size C and the data type is float32, float64.
Default is ``'None'`` .
reduction (str, optional): Indicate how to average the loss by batch_size,
Expand Down Expand Up @@ -1482,7 +1482,7 @@ class TripletMarginWithDistanceLoss(Layer):
where the default `distance_function`

.. math::
d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_2
d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_2

or user can define their own distance function. `margin` is a nonnegative margin representing the minimum difference
between the positive and negative distances that is required for the loss to be 0. If `swap` is true, it will compare distance of (input, negative) with
Expand Down Expand Up @@ -1510,15 +1510,15 @@ class TripletMarginWithDistanceLoss(Layer):

Shapes:
input (Tensor):Input tensor, the data type is float32 or float64.
the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.
the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.

positive (Tensor):Positive tensor, the data type is float32 or float64.
The shape of label is the same as the shape of input.
The shape of label is the same as the shape of input.

negative (Tensor):Negative tensor, the data type is float32 or float64.
The shape of label is the same as the shape of input.
The shape of label is the same as the shape of input.

output(Tensor): The tensor variable storing the triplet_margin_with_distance_loss of input and positive and negative.
output(Tensor): The tensor variable storing the triplet_margin_with_distance_loss of input and positive and negative.

Return:
A callable object of TripletMarginWithDistanceLoss
Expand Down
Loading