Skip to content

Commit b43239f

Browse files
kevinng77Ligoml
authored andcommitted
1 parent 7bef960 commit b43239f

File tree

3 files changed

+70
-39
lines changed

3 files changed

+70
-39
lines changed

python/paddle/distributed/fleet/fleet.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1120,7 +1120,6 @@ def amp_init(
11201120
Examples:
11211121
.. code-block:: python
11221122
1123-
import numpy as np
11241123
import paddle
11251124
import paddle.nn.functional as F
11261125
paddle.enable_static()

python/paddle/regularizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ class L1Decay(fluid.regularizer.L1Decay):
4343
# Example1: set Regularizer in optimizer
4444
import paddle
4545
from paddle.regularizer import L1Decay
46-
import numpy as np
46+
4747
linear = paddle.nn.Linear(10, 10)
4848
inp = paddle.rand(shape=[10, 10], dtype="float32")
4949
out = linear(inp)

python/paddle/tensor/manipulation.py

Lines changed: 69 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -2276,21 +2276,33 @@ def unique_consecutive(
22762276
22772277
x = paddle.to_tensor([1, 1, 2, 2, 3, 1, 1, 2])
22782278
output = paddle.unique_consecutive(x) #
2279-
np_output = output.numpy() # [1 2 3 1 2]
2279+
print(output)
2280+
# Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2281+
# [1, 2, 3, 1, 2])
2282+
22802283
_, inverse, counts = paddle.unique_consecutive(x, return_inverse=True, return_counts=True)
2281-
np_inverse = inverse.numpy() # [0 0 1 1 2 3 3 4]
2282-
np_counts = inverse.numpy() # [2 2 1 2 1]
2284+
print(inverse)
2285+
# Tensor(shape=[8], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2286+
# [0, 0, 1, 1, 2, 3, 3, 4])
2287+
print(counts)
2288+
# Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2289+
# [2, 2, 1, 2, 1])
22832290
22842291
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
22852292
output = paddle.unique_consecutive(x, axis=0) #
2286-
np_output = output.numpy() # [2 1 3 0 1 2 1 3 2 1 3]
2293+
print(output)
2294+
# Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2295+
# [[2, 1, 3],
2296+
# [3, 0, 1],
2297+
# [2, 1, 3]])
22872298
22882299
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
22892300
output = paddle.unique_consecutive(x, axis=0) #
2290-
np_output = output.numpy()
2291-
# [[2 1 3]
2292-
# [3 0 1]
2293-
# [2 1 3]]
2301+
print(output)
2302+
# Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2303+
# [[2, 1, 3],
2304+
# [3, 0, 1],
2305+
# [2, 1, 3]])
22942306
"""
22952307

22962308
if axis is None:
@@ -2411,18 +2423,33 @@ def unique(
24112423
unique = paddle.unique(x)
24122424
np_unique = unique.numpy() # [1 2 3 5]
24132425
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
2414-
np_indices = indices.numpy() # [3 0 1 4]
2415-
np_inverse = inverse.numpy() # [1 2 2 0 3 2]
2416-
np_counts = counts.numpy() # [1 1 3 1]
2426+
print(indices)
2427+
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2428+
# [3, 0, 1, 4])
2429+
print(inverse)
2430+
# Tensor(shape=[6], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2431+
# [1, 2, 2, 0, 3, 2])
2432+
print(counts)
2433+
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2434+
# [1, 1, 3, 1])
24172435
24182436
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
24192437
unique = paddle.unique(x)
2420-
np_unique = unique.numpy() # [0 1 2 3]
2438+
print(unique)
2439+
# Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2440+
# [0, 1, 2, 3])
24212441
24222442
unique = paddle.unique(x, axis=0)
2443+
<<<<<<< HEAD
24232444
np_unique = unique.numpy()
24242445
# [[2 1 3]
24252446
# [3 0 1]]
2447+
=======
2448+
print(unique)
2449+
# Tensor(shape=[2, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
2450+
# [[2, 1, 3],
2451+
# [3, 0, 1]])
2452+
>>>>>>> a65746580b (fix numpy issue in codeblock examples (#47042))
24262453
"""
24272454
if axis is None:
24282455
axis = []
@@ -3032,12 +3059,10 @@ def scatter_nd(index, updates, shape, name=None):
30323059
.. code-block:: python
30333060
30343061
import paddle
3035-
import numpy as np
30363062
3037-
index_data = np.array([[1, 1],
3038-
[0, 1],
3039-
[1, 3]]).astype(np.int64)
3040-
index = paddle.to_tensor(index_data)
3063+
index = paddle.to_tensor([[1, 1],
3064+
[0, 1],
3065+
[1, 3]], dtype="int64")
30413066
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
30423067
shape = [3, 5, 9, 10]
30433068
@@ -3109,19 +3134,22 @@ def tile(x, repeat_times, name=None):
31093134
31103135
data = paddle.to_tensor([1, 2, 3], dtype='int32')
31113136
out = paddle.tile(data, repeat_times=[2, 1])
3112-
np_out = out.numpy()
3113-
# [[1, 2, 3]
3114-
# [1, 2, 3]]
3137+
print(out)
3138+
# Tensor(shape=[2, 3], dtype=int32, place=Place(gpu:0), stop_gradient=True,
3139+
# [[1, 2, 3],
3140+
# [1, 2, 3]])
31153141
31163142
out = paddle.tile(data, repeat_times=(2, 2))
3117-
np_out = out.numpy()
3118-
# [[1, 2, 3, 1, 2, 3]
3119-
# [1, 2, 3, 1, 2, 3]]
3143+
print(out)
3144+
# Tensor(shape=[2, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True,
3145+
# [[1, 2, 3, 1, 2, 3],
3146+
# [1, 2, 3, 1, 2, 3]])
31203147
31213148
repeat_times = paddle.to_tensor([1, 2], dtype='int32')
31223149
out = paddle.tile(data, repeat_times=repeat_times)
3123-
np_out = out.numpy()
3124-
# [[1, 2, 3, 1, 2, 3]]
3150+
print(out)
3151+
# Tensor(shape=[1, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True,
3152+
# [[1, 2, 3, 1, 2, 3]])
31253153
"""
31263154
if in_dygraph_mode():
31273155
if isinstance(repeat_times, core.eager.Tensor):
@@ -3221,8 +3249,10 @@ def expand_as(x, y, name=None):
32213249
data_x = paddle.to_tensor([1, 2, 3], 'int32')
32223250
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
32233251
out = paddle.expand_as(data_x, data_y)
3224-
np_out = out.numpy()
3225-
# [[1, 2, 3], [1, 2, 3]]
3252+
print(out)
3253+
# Tensor(shape=[2, 3], dtype=int32, place=Place(gpu:0), stop_gradient=True,
3254+
# [[1, 2, 3],
3255+
# [1, 2, 3]])
32263256
"""
32273257
if in_dygraph_mode():
32283258
return _C_ops.expand_as(x, None, y.shape)
@@ -4232,10 +4262,11 @@ def as_complex(x, name=None):
42324262
import paddle
42334263
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
42344264
y = paddle.as_complex(x)
4235-
print(y.numpy())
4265+
print(y)
42364266
4237-
# [[ 0. +1.j 2. +3.j 4. +5.j]
4238-
# [ 6. +7.j 8. +9.j 10.+11.j]]
4267+
# Tensor(shape=[2, 3], dtype=complex64, place=Place(gpu:0), stop_gradient=True,
4268+
# [[1j , (2+3j) , (4+5j) ],
4269+
# [(6+7j) , (8+9j) , (10+11j)]])
42394270
"""
42404271
if in_dygraph_mode():
42414272
return _C_ops.as_complex(x)
@@ -4279,15 +4310,16 @@ def as_real(x, name=None):
42794310
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
42804311
y = paddle.as_complex(x)
42814312
z = paddle.as_real(y)
4282-
print(z.numpy())
4313+
print(z)
42834314
4284-
# [[[ 0. 1.]
4285-
# [ 2. 3.]
4286-
# [ 4. 5.]]
4315+
# Tensor(shape=[2, 3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
4316+
# [[[0. , 1. ],
4317+
# [2. , 3. ],
4318+
# [4. , 5. ]],
42874319
4288-
# [[ 6. 7.]
4289-
# [ 8. 9.]
4290-
# [10. 11.]]]
4320+
# [[6. , 7. ],
4321+
# [8. , 9. ],
4322+
# [10., 11.]]])
42914323
"""
42924324
if in_dygraph_mode():
42934325
return _C_ops.as_real(x)

0 commit comments

Comments
 (0)