Skip to content

Commit 2cd10fc

Browse files
authored
fix 2.0 api docs (#28445)
1 parent a083c76 commit 2cd10fc

File tree

6 files changed

+154
-170
lines changed

6 files changed

+154
-170
lines changed

python/paddle/fluid/layers/nn.py

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -9730,15 +9730,13 @@ def swish(x, beta=1.0, name=None):
97309730
return out
97319731

97329732

9733-
@deprecated(since="2.0.0", update_to="paddle.nn.functional.prelu")
9733+
@deprecated(since="2.0.0", update_to="paddle.static.nn.prelu")
97349734
def prelu(x, mode, param_attr=None, name=None):
97359735
"""
9736-
:api_attr: Static Graph
9737-
9738-
Equation:
9736+
prelu activation.
97399737

97409738
.. math::
9741-
y = \max(0, x) + \\alpha * \min(0, x)
9739+
prelu(x) = max(0, x) + \\alpha * min(0, x)
97429740

97439741
There are three modes for the activation:
97449742

@@ -9748,34 +9746,28 @@ def prelu(x, mode, param_attr=None, name=None):
97489746
channel: Elements in same channel share same alpha.
97499747
element: All elements do not share alpha. Each element has its own alpha.
97509748

9751-
Args:
9752-
x (Variable): The input Tensor or LoDTensor with data type float32.
9749+
Parameters:
9750+
x (Tensor): The input Tensor or LoDTensor with data type float32.
97539751
mode (str): The mode for weight sharing.
9754-
param_attr(ParamAttr|None): The parameter attribute for the learnable
9755-
weight (alpha), it can be create by ParamAttr. None by default.
9756-
For detailed information, please refer to :ref:`api_fluid_ParamAttr`.
9757-
name(str|None): For detailed information, please refer
9758-
to :ref:`api_guide_Name`. Usually name is no need to set and
9759-
None by default.
9752+
param_attr (ParamAttr|None, optional): The parameter attribute for the learnable
9753+
weight (alpha), it can be create by ParamAttr. None by default.
9754+
For detailed information, please refer to :ref:`api_fluid_ParamAttr`.
9755+
name (str, optional): Name for the operation (optional, default is None).
9756+
For more information, please refer to :ref:`api_guide_Name`.
97609757

97619758
Returns:
9762-
Variable:
9763-
9764-
output(Variable): The tensor or LoDTensor with the same shape as input.
9765-
The data type is float32.
9759+
Tensor: A tensor with the same shape and data type as x.
97669760

97679761
Examples:
97689762

97699763
.. code-block:: python
97709764

9771-
import paddle.fluid as fluid
97729765
import paddle
9773-
paddle.enable_static()
9774-
from paddle.fluid.param_attr import ParamAttr
9775-
x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
9776-
mode = 'channel'
9777-
output = fluid.layers.prelu(
9778-
x,mode,param_attr=ParamAttr(name='alpha'))
9766+
9767+
x = paddle.to_tensor([-1., 2., 3.])
9768+
param = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.2))
9769+
out = paddle.static.nn.prelu(x, 'all', param)
9770+
# [-0.2, 2., 3.]
97799771

97809772
"""
97819773
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'prelu')

python/paddle/nn/functional/activation.py

Lines changed: 20 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,8 @@ def elu(x, alpha=1.0, name=None):
7979
8080
import paddle
8181
import paddle.nn.functional as F
82-
import numpy as np
8382
84-
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
83+
x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
8584
out = F.elu(x, alpha=0.2)
8685
# [[-0.12642411 6. ]
8786
# [ 1. 15.6 ]]
@@ -131,11 +130,14 @@ def gelu(x, approximate=False, name=None):
131130
132131
import paddle
133132
import paddle.nn.functional as F
134-
import numpy as np
135133
136-
x = paddle.to_tensor(np.array([[-1, 0.5],[1, 1.5]]))
137-
out1 = F.gelu(x) # [-0.158655 0.345731 0.841345 1.39979]
138-
out2 = F.gelu(x, True) # [-0.158808 0.345714 0.841192 1.39957]
134+
x = paddle.to_tensor([[-1, 0.5], [1, 1.5]])
135+
out1 = F.gelu(x)
136+
# [[-0.15865529, 0.34573123],
137+
# [ 0.84134471, 1.39978933]]
138+
out2 = F.gelu(x, True)
139+
# [[-0.15880799, 0.34571400],
140+
# [ 0.84119201, 1.39957154]]
139141
"""
140142

141143
if in_dygraph_mode():
@@ -181,11 +183,8 @@ def hardshrink(x, threshold=0.5, name=None):
181183
182184
import paddle
183185
import paddle.nn.functional as F
184-
import numpy as np
185-
186-
paddle.disable_static()
187186
188-
x = paddle.to_tensor(np.array([-1, 0.3, 2.5]))
187+
x = paddle.to_tensor([-1, 0.3, 2.5])
189188
out = F.hardshrink(x) # [-1., 0., 2.5]
190189
191190
"""
@@ -385,11 +384,8 @@ def leaky_relu(x, negative_slope=0.01, name=None):
385384
386385
import paddle
387386
import paddle.nn.functional as F
388-
import numpy as np
389-
390-
paddle.disable_static()
391387
392-
x = paddle.to_tensor(np.array([-2, 0, 1], 'float32'))
388+
x = paddle.to_tensor([-2., 0., 1.])
393389
out = F.leaky_relu(x) # [-0.02, 0., 1.]
394390
395391
"""
@@ -1147,8 +1143,10 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
11471143
11481144
.. math::
11491145
1150-
log\\_softmax[i, j] = log(softmax(x))
1151-
= log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])})
1146+
\\begin{aligned}
1147+
log\\_softmax[i, j] &= log(softmax(x)) \\\\
1148+
&= log(\\frac{\\exp(X[i, j])}{\\sum_j(\\exp(X[i, j])})
1149+
\\end{aligned}
11521150
11531151
Parameters:
11541152
x (Tensor): The input Tensor with data type float32, float64.
@@ -1174,16 +1172,13 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
11741172
11751173
import paddle
11761174
import paddle.nn.functional as F
1177-
import numpy as np
1178-
1179-
paddle.disable_static()
11801175
1181-
x = np.array([[[-2.0, 3.0, -4.0, 5.0],
1182-
[3.0, -4.0, 5.0, -6.0],
1183-
[-7.0, -8.0, 8.0, 9.0]],
1184-
[[1.0, -2.0, -3.0, 4.0],
1185-
[-5.0, 6.0, 7.0, -8.0],
1186-
[6.0, 7.0, 8.0, 9.0]]], 'float32')
1176+
x = [[[-2.0, 3.0, -4.0, 5.0],
1177+
[3.0, -4.0, 5.0, -6.0],
1178+
[-7.0, -8.0, 8.0, 9.0]],
1179+
[[1.0, -2.0, -3.0, 4.0],
1180+
[-5.0, 6.0, 7.0, -8.0],
1181+
[6.0, 7.0, 8.0, 9.0]]]
11871182
x = paddle.to_tensor(x)
11881183
out1 = F.log_softmax(x)
11891184
out2 = F.log_softmax(x, dtype='float64')

python/paddle/nn/layer/activation.py

Lines changed: 16 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,8 @@ class ELU(layers.Layer):
7070
.. code-block:: python
7171
7272
import paddle
73-
import numpy as np
7473
75-
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
74+
x = paddle.to_tensor([[-1. ,6.], [1., 15.6]])
7675
m = paddle.nn.ELU(0.2)
7776
out = m(x)
7877
# [[-0.12642411 6. ]
@@ -166,11 +165,8 @@ class Hardshrink(layers.Layer):
166165
.. code-block:: python
167166
168167
import paddle
169-
import numpy as np
170168
171-
paddle.disable_static()
172-
173-
x = paddle.to_tensor(np.array([-1, 0.3, 2.5]))
169+
x = paddle.to_tensor([-1, 0.3, 2.5])
174170
m = paddle.nn.Hardshrink()
175171
out = m(x) # [-1., 0., 2.5]
176172
"""
@@ -293,11 +289,10 @@ class Hardtanh(layers.Layer):
293289
.. code-block:: python
294290
295291
import paddle
296-
import numpy as np
297292
298-
x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5]))
293+
x = paddle.to_tensor([-1.5, 0.3, 2.5])
299294
m = paddle.nn.Hardtanh()
300-
out = m(x) # # [-1., 0.3, 1.]
295+
out = m(x) # [-1., 0.3, 1.]
301296
"""
302297

303298
def __init__(self, min=-1.0, max=1.0, name=None):
@@ -397,9 +392,8 @@ class ReLU(layers.Layer):
397392
.. code-block:: python
398393
399394
import paddle
400-
import numpy as np
401395
402-
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
396+
x = paddle.to_tensor([-2., 0., 1.])
403397
m = paddle.nn.ReLU()
404398
out = m(x) # [0., 0., 1.]
405399
"""
@@ -613,7 +607,7 @@ class Hardsigmoid(layers.Layer):
613607
614608
import paddle
615609
616-
m = paddle.nn.Sigmoid()
610+
m = paddle.nn.Hardsigmoid()
617611
x = paddle.to_tensor([-4., 5., 1.])
618612
out = m(x) # [0., 1, 0.666667]
619613
"""
@@ -1016,8 +1010,10 @@ class LogSoftmax(layers.Layer):
10161010
10171011
.. math::
10181012
1019-
Out[i, j] = log(softmax(x))
1020-
= log(\\frac{\exp(X[i, j])}{\\sum_j(exp(X[i, j])})
1013+
\\begin{aligned}
1014+
Out[i, j] &= log(softmax(x)) \\\\
1015+
&= log(\\frac{\\exp(X[i, j])}{\\sum_j(\\exp(X[i, j])})
1016+
\\end{aligned}
10211017
10221018
Parameters:
10231019
axis (int, optional): The axis along which to perform log_softmax
@@ -1035,16 +1031,13 @@ class LogSoftmax(layers.Layer):
10351031
.. code-block:: python
10361032
10371033
import paddle
1038-
import numpy as np
1039-
1040-
paddle.disable_static()
10411034
1042-
x = np.array([[[-2.0, 3.0, -4.0, 5.0],
1043-
[3.0, -4.0, 5.0, -6.0],
1044-
[-7.0, -8.0, 8.0, 9.0]],
1045-
[[1.0, -2.0, -3.0, 4.0],
1046-
[-5.0, 6.0, 7.0, -8.0],
1047-
[6.0, 7.0, 8.0, 9.0]]])
1035+
x = [[[-2.0, 3.0, -4.0, 5.0],
1036+
[3.0, -4.0, 5.0, -6.0],
1037+
[-7.0, -8.0, 8.0, 9.0]],
1038+
[[1.0, -2.0, -3.0, 4.0],
1039+
[-5.0, 6.0, 7.0, -8.0],
1040+
[6.0, 7.0, 8.0, 9.0]]]
10481041
m = paddle.nn.LogSoftmax()
10491042
x = paddle.to_tensor(x)
10501043
out = m(x)

python/paddle/tensor/creation.py

Lines changed: 19 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -300,9 +300,6 @@ def ones(shape, dtype=None, name=None):
300300

301301
def ones_like(x, dtype=None, name=None):
302302
"""
303-
:alias_main: paddle.ones_like
304-
:alias: paddle.tensor.ones_like, paddle.tensor.creation.ones_like
305-
306303
This OP returns a Tensor filled with the value 1, with the same shape and
307304
data type (use ``dtype`` if ``dtype`` is not None) as ``x``.
308305
@@ -323,18 +320,16 @@ def ones_like(x, dtype=None, name=None):
323320
324321
Raise:
325322
TypeError: If ``dtype`` is not None and is not bool, float16, float32,
326-
float64, int32 or int64.
323+
float64, int32 or int64.
327324
328325
Examples:
329326
.. code-block:: python
330327
331328
import paddle
332329
333-
paddle.disable_static()
334-
335330
x = paddle.to_tensor([1,2,3])
336-
out1 = paddle.zeros_like(x) # [1., 1., 1.]
337-
out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1]
331+
out1 = paddle.ones_like(x) # [1., 1., 1.]
332+
out2 = paddle.ones_like(x, dtype='int32') # [1, 1, 1]
338333
339334
"""
340335
return full_like(x=x, fill_value=1, dtype=dtype, name=name)
@@ -380,9 +375,6 @@ def zeros(shape, dtype=None, name=None):
380375

381376
def zeros_like(x, dtype=None, name=None):
382377
"""
383-
:alias_main: paddle.zeros_like
384-
:alias: paddle.tensor.zeros_like, paddle.tensor.creation.zeros_like
385-
386378
This OP returns a Tensor filled with the value 0, with the same shape and
387379
data type (use ``dtype`` if ``dtype`` is not None) as ``x``.
388380
@@ -403,16 +395,14 @@ def zeros_like(x, dtype=None, name=None):
403395
404396
Raise:
405397
TypeError: If ``dtype`` is not None and is not bool, float16, float32,
406-
float64, int32 or int64.
398+
float64, int32 or int64.
407399
408400
Examples:
409401
.. code-block:: python
410402
411403
import paddle
412404
413-
paddle.disable_static()
414-
415-
x = paddle.to_tensor([1,2,3])
405+
x = paddle.to_tensor([1, 2, 3])
416406
out1 = paddle.zeros_like(x) # [0., 0., 0.]
417407
out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0]
418408
@@ -519,9 +509,6 @@ def full(shape, fill_value, dtype=None, name=None):
519509

520510
def arange(start=0, end=None, step=1, dtype=None, name=None):
521511
"""
522-
:alias_main: paddle.arange
523-
:alias: paddle.tensor.arange, paddle.tensor.creation.arange
524-
525512
This OP returns a 1-D Tensor with spaced values within a given interval.
526513
527514
Values are generated into the half-open interval [``start``, ``end``) with
@@ -552,33 +539,30 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
552539
553540
Returns:
554541
Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
555-
taken with common difference ``step`` beginning from ``start``. Its
556-
data type is set by ``dtype``.
542+
taken with common difference ``step`` beginning from ``start``. Its
543+
data type is set by ``dtype``.
557544
558545
Raises:
559546
TypeError: If ``dtype`` is not int32, int64, float32, float64.
560547
561-
examples:
562-
548+
Examples:
563549
.. code-block:: python
564550
565-
import paddle
566-
567-
paddle.disable_static()
551+
import paddle
568552
569-
out1 = paddle.arange(5)
570-
# [0, 1, 2, 3, 4]
553+
out1 = paddle.arange(5)
554+
# [0, 1, 2, 3, 4]
571555
572-
out2 = paddle.arange(3, 9, 2.0)
573-
# [3, 5, 7]
556+
out2 = paddle.arange(3, 9, 2.0)
557+
# [3, 5, 7]
574558
575-
# use 4.999 instead of 5.0 to avoid floating point rounding errors
576-
out3 = paddle.arange(4.999, dtype='float32')
577-
# [0., 1., 2., 3., 4.]
559+
# use 4.999 instead of 5.0 to avoid floating point rounding errors
560+
out3 = paddle.arange(4.999, dtype='float32')
561+
# [0., 1., 2., 3., 4.]
578562
579-
start_var = paddle.to_tensor([3])
580-
out4 = paddle.arange(start_var, 7)
581-
# [3, 4, 5, 6]
563+
start_var = paddle.to_tensor([3])
564+
out4 = paddle.arange(start_var, 7)
565+
# [3, 4, 5, 6]
582566
583567
"""
584568
if dtype is None:

0 commit comments

Comments
 (0)