Skip to content

Commit 08b2d10

Browse files
committed
Remove reduntant numpy input in Example code
1 parent c5d9913 commit 08b2d10

File tree

23 files changed

+488
-449
lines changed

23 files changed

+488
-449
lines changed

python/paddle/incubate/optimizer/lookahead.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -158,8 +158,7 @@ def step(self):
158158
.. code-block:: python
159159
160160
import paddle
161-
import numpy as np
162-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
161+
inp = paddle.rand([1,10], dtype="float32")
163162
linear = paddle.nn.Linear(10, 1)
164163
out = linear(inp)
165164
loss = paddle.mean(out)
@@ -272,8 +271,8 @@ def minimize(
272271
.. code-block:: python
273272
274273
import paddle
275-
import numpy as np
276-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
274+
275+
inp = paddle.rand([1, 10], dtype="float32")
277276
linear = paddle.nn.Linear(10, 1)
278277
out = linear(inp)
279278
loss = paddle.mean(out)

python/paddle/incubate/optimizer/modelaverage.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -341,8 +341,7 @@ def minimize(
341341
.. code-block:: python
342342
343343
import paddle
344-
import numpy as np
345-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
344+
inp = paddle.rand([1, 10], dtype="float32")
346345
linear = paddle.nn.Linear(10, 1)
347346
out = linear(inp)
348347
loss = paddle.mean(out)
@@ -377,8 +376,7 @@ def step(self):
377376
.. code-block:: python
378377
379378
import paddle
380-
import numpy as np
381-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
379+
inp = paddle.rand([1, 10], dtype="float32")
382380
linear = paddle.nn.Linear(10, 1)
383381
out = linear(inp)
384382
loss = paddle.mean(out)
@@ -424,8 +422,7 @@ def apply(self, executor=None, need_restore=True):
424422
.. code-block:: python
425423
426424
import paddle
427-
import numpy as np
428-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
425+
inp = paddle.rand([1, 10], dtype="float32")
429426
linear = paddle.nn.Linear(10, 1)
430427
out = linear(inp)
431428
loss = paddle.mean(out)
@@ -499,8 +496,7 @@ def restore(self, executor=None):
499496
.. code-block:: python
500497
501498
import paddle
502-
import numpy as np
503-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
499+
inp = paddle.rand([1, 10], dtype="float32")
504500
linear = paddle.nn.Linear(10, 1)
505501
out = linear(inp)
506502
loss = paddle.mean(out)

python/paddle/nn/functional/activation.py

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1286,10 +1286,12 @@ def softshrink(x, threshold=0.5, name=None):
12861286
12871287
import paddle
12881288
import paddle.nn.functional as F
1289-
import numpy as np
12901289
1291-
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
1292-
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
1290+
x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
1291+
out = F.softshrink(x)
1292+
print(out)
1293+
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1294+
# [-0.39999998, 0. , 0. , 0.30000001])
12931295
"""
12941296
if threshold < 0:
12951297
raise ValueError(
@@ -1337,10 +1339,12 @@ def softsign(x, name=None):
13371339
13381340
import paddle
13391341
import paddle.nn.functional as F
1340-
import numpy as np
13411342
1342-
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
1343-
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
1343+
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
1344+
out = F.softsign(x)
1345+
print(out)
1346+
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1347+
# [-0.28571430, -0.16666666, 0.09090909, 0.23076925])
13441348
"""
13451349
if in_dygraph_mode():
13461350
return _C_ops.softsign(x)
@@ -1376,10 +1380,12 @@ def swish(x, name=None):
13761380
13771381
import paddle
13781382
import paddle.nn.functional as F
1379-
import numpy as np
13801383
1381-
x = paddle.to_tensor(np.array([-2., 0., 1.]))
1382-
out = F.swish(x) # [-0.238406, 0., 0.731059]
1384+
x = paddle.to_tensor([-2., 0., 1.])
1385+
out = F.swish(x)
1386+
print(out)
1387+
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1388+
# [-0.23840584, 0. , 0.73105854])
13831389
"""
13841390
if in_dygraph_mode():
13851391
return _C_ops.swish(x, 1.0)
@@ -1456,10 +1462,12 @@ def tanhshrink(x, name=None):
14561462
14571463
import paddle
14581464
import paddle.nn.functional as F
1459-
import numpy as np
14601465
1461-
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
1462-
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
1466+
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
1467+
out = F.tanhshrink(x)
1468+
print(out)
1469+
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1470+
# [-0.02005106, -0.00262468, 0.00033200, 0.00868741])
14631471
"""
14641472
if in_dygraph_mode():
14651473
return _C_ops.tanh_shrink(x)
@@ -1504,10 +1512,12 @@ def thresholded_relu(x, threshold=1.0, name=None):
15041512
15051513
import paddle
15061514
import paddle.nn.functional as F
1507-
import numpy as np
15081515
1509-
x = paddle.to_tensor(np.array([2., 0., 1.]))
1510-
out = F.thresholded_relu(x) # [2., 0., 0.]
1516+
x = paddle.to_tensor([2., 0., 1.])
1517+
out = F.thresholded_relu(x)
1518+
print(out)
1519+
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1520+
# [2., 0., 0.])
15111521
"""
15121522

15131523
if in_dygraph_mode():

python/paddle/nn/functional/common.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1963,18 +1963,16 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
19631963
.. code-block:: python
19641964
19651965
import paddle
1966-
import numpy as np
1967-
1968-
x_data = np.array([[[0, 1, 0],
1969-
[ 1, 0, 1]]]).astype("float32")
1970-
print(x_data.shape)
19711966
paddle.disable_static()
1972-
x = paddle.to_tensor(x_data, stop_gradient=False)
1967+
1968+
x = paddle.to_tensor([[[0, 1, 0],
1969+
[ 1, 0, 1]]], dtype="float32", stop_gradient=False)
1970+
19731971
output = paddle.nn.functional.label_smooth(x)
19741972
print(output)
1975-
1976-
#[[[0.03333334 0.93333334 0.03333334]
1977-
# [0.93333334 0.03333334 0.93333334]]]
1973+
# Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
1974+
# [[[0.03333334, 0.93333334, 0.03333334],
1975+
# [0.93333334, 0.03333334, 0.93333334]]])
19781976
"""
19791977
if epsilon > 1.0 or epsilon < 0.0:
19801978
raise ValueError("The value of epsilon must be between 0 and 1.")

python/paddle/nn/functional/conv.py

Lines changed: 24 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -375,26 +375,22 @@ def conv1d(
375375
376376
import paddle
377377
import paddle.nn.functional as F
378-
import numpy as np
379-
x = np.array([[[4, 8, 1, 9],
380-
[7, 2, 0, 9],
381-
[6, 9, 2, 6]]]).astype(np.float32)
382-
w=np.array(
383-
[[[9, 3, 4],
384-
[0, 0, 7],
385-
[2, 5, 6]],
386-
[[0, 3, 4],
387-
[2, 9, 7],
388-
[5, 6, 8]]]).astype(np.float32)
389-
390-
x_var = paddle.to_tensor(x)
391-
w_var = paddle.to_tensor(w)
392-
y_var = F.conv1d(x_var, w_var)
393-
y_np = y_var.numpy()
394-
print(y_np)
395378
396-
# [[[133. 238.]
397-
# [160. 211.]]]
379+
x = paddle.to_tensor([[[4, 8, 1, 9],
380+
[7, 2, 0, 9],
381+
[6, 9, 2, 6]]], dtype="float32")
382+
w = paddle.to_tensor([[[9, 3, 4],
383+
[0, 0, 7],
384+
[2, 5, 6]],
385+
[[0, 3, 4],
386+
[2, 9, 7],
387+
[5, 6, 8]]], dtype="float32")
388+
389+
y = F.conv1d(x, w)
390+
print(y)
391+
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
392+
# [[[133., 238.],
393+
# [160., 211.]]])
398394
"""
399395
cudnn_version = get_cudnn_version()
400396
if cudnn_version is not None:
@@ -912,24 +908,20 @@ def conv1d_transpose(
912908
Examples:
913909
.. code-block:: python
914910
915-
916-
917911
import paddle
918912
import paddle.nn.functional as F
919-
import numpy as np
920913
921914
# shape: (1, 2, 4)
922-
x=np.array([[[4, 0, 9, 7],
923-
[8, 0, 9, 2,]]]).astype(np.float32)
915+
x = paddle.to_tensor([[[4, 0, 9, 7],
916+
[8, 0, 9, 2,]]], dtype="float32")
924917
# shape: (2, 1, 2)
925-
w=np.array([[[7, 0]],
926-
[[4, 2]]]).astype(np.float32)
927-
x_var = paddle.to_tensor(x)
928-
w_var = paddle.to_tensor(w)
929-
y_var = F.conv1d_transpose(x_var, w_var)
930-
print(y_var)
931-
932-
# [[[60. 16. 99. 75. 4.]]]
918+
w = paddle.to_tensor([[[7, 0]],
919+
[[4, 2]]], dtype="float32")
920+
921+
y = F.conv1d_transpose(x, w)
922+
print(y)
923+
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
924+
# [[[60., 16., 99., 75., 4. ]]])
933925
"""
934926
cudnn_version = get_cudnn_version()
935927
if cudnn_version is not None:

python/paddle/nn/functional/loss.py

Lines changed: 41 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -251,16 +251,14 @@ def fluid_softmax_with_cross_entropy(
251251
.. code-block:: python
252252
253253
import paddle
254-
import numpy as np
255-
256-
data = np.random.rand(128).astype("float32")
257-
label = np.random.rand(1).astype("int64")
258-
data = paddle.to_tensor(data)
259-
label = paddle.to_tensor(label)
260-
linear = paddle.nn.Linear(128, 100)
261-
x = linear(data)
262-
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
254+
255+
logits = paddle.to_tensor([0.4, 0.6, 0.9])
256+
label = paddle.randint(high=2, shape=[1], dtype="int64")
257+
258+
out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
263259
print(out)
260+
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
261+
# [1.15328646])
264262
"""
265263
if _non_static_mode():
266264
if core.is_compiled_with_npu():
@@ -1772,7 +1770,6 @@ def ctc_loss(
17721770
17731771
# declarative mode
17741772
import paddle.nn.functional as F
1775-
import numpy as np
17761773
import paddle
17771774
17781775
# length of the longest logit sequence
@@ -1784,8 +1781,7 @@ def ctc_loss(
17841781
# class num
17851782
class_num = 3
17861783
1787-
np.random.seed(1)
1788-
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
1784+
log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
17891785
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
17901786
17911787
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
@@ -1798,30 +1794,30 @@ def ctc_loss(
17981794
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
17991795
18001796
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
1801-
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
1802-
labels = np.array([[1, 2, 2],
1803-
[1, 2, 2]]).astype("int32")
1804-
input_lengths = np.array([5, 5]).astype("int64")
1805-
label_lengths = np.array([3, 3]).astype("int64")
1806-
1807-
log_probs = paddle.to_tensor(log_probs)
1808-
labels = paddle.to_tensor(labels)
1809-
input_lengths = paddle.to_tensor(input_lengths)
1810-
label_lengths = paddle.to_tensor(label_lengths)
1797+
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]],
1798+
dtype="float32")
1799+
labels = paddle.to_tensor([[1, 2, 2],
1800+
[1, 2, 2]], dtype="int32")
1801+
input_lengths = paddle.to_tensor([5, 5], dtype="int64")
1802+
label_lengths = paddle.to_tensor([3, 3], dtype="int64")
18111803
18121804
loss = F.ctc_loss(log_probs, labels,
18131805
input_lengths,
18141806
label_lengths,
18151807
blank=0,
18161808
reduction='none')
1817-
print(loss) #[3.9179852 2.9076521]
1809+
print(loss)
1810+
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1811+
# [3.91798496, 2.90765190])
18181812
18191813
loss = F.ctc_loss(log_probs, labels,
18201814
input_lengths,
18211815
label_lengths,
18221816
blank=0,
18231817
reduction='mean')
1824-
print(loss) #[1.1376063]
1818+
print(loss)
1819+
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1820+
# [1.13760614])
18251821
18261822
"""
18271823

@@ -2257,16 +2253,14 @@ def softmax_with_cross_entropy(
22572253
.. code-block:: python
22582254
22592255
import paddle
2260-
import numpy as np
2261-
2262-
data = np.random.rand(128).astype("float32")
2263-
label = np.random.rand(1).astype("int64")
2264-
data = paddle.to_tensor(data)
2265-
label = paddle.to_tensor(label)
2266-
linear = paddle.nn.Linear(128, 100)
2267-
x = linear(data)
2268-
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
2256+
2257+
logits = paddle.to_tensor([0.4, 0.6, 0.9], dtype="float32")
2258+
label = paddle.to_tensor([1], dtype="int64")
2259+
2260+
out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
22692261
print(out)
2262+
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2263+
# [1.15328646])
22702264
"""
22712265
return fluid_softmax_with_cross_entropy(
22722266
logits,
@@ -4003,18 +3997,26 @@ def soft_margin_loss(input, label, reduction='mean', name=None):
40033997
.. code-block:: python
40043998
40053999
import paddle
4006-
import numpy as np
40074000
40084001
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
40094002
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
40104003
output = paddle.nn.functional.soft_margin_loss(input, label)
4004+
print(output)
4005+
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
4006+
# [0.64022040])
4007+
4008+
input = paddle.uniform(shape=(5, 5), dtype="float32", min=0.1, max=0.8)
4009+
label = paddle.randint(0, 2, shape=(5, 5), dtype="int64")
4010+
label[label==0]=-1
40114011
4012-
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
4013-
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
4014-
label_np[label_np==0]=-1
4015-
input = paddle.to_tensor(input_np)
4016-
label = paddle.to_tensor(label_np)
40174012
output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none')
4013+
print(output)
4014+
# Tensor(shape=[5, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
4015+
# [[1.09917796, 0.52613139, 0.56263304, 0.82736146, 0.38776723],
4016+
# [1.07179427, 1.11924267, 0.49877715, 1.10026348, 0.46184641],
4017+
# [0.84367639, 0.74795729, 0.44629076, 0.55123353, 0.77659678],
4018+
# [0.39465919, 0.76651484, 0.54485321, 0.76609844, 0.77166790],
4019+
# [0.51283568, 0.84757161, 0.78913331, 1.05268764, 0.45318675]])
40184020
"""
40194021
if reduction not in ['sum', 'mean', 'none']:
40204022
raise ValueError(

0 commit comments

Comments
 (0)