Skip to content
31 changes: 15 additions & 16 deletions test/dygraph_to_static/test_for_enumerate.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,13 @@
)

import paddle
from paddle import base
from paddle.static import InputSpec


# 0. for in range var.numpy()[0]
def for_in_range(x):
z = paddle.tensor.fill_constant([1], 'int32', 0)
x = base.dygraph.to_variable(x)
x = paddle.to_tensor(x)
for i in range(x.numpy().item()):
z = z + i
return z
Expand All @@ -56,7 +55,7 @@ def for_enumerate_list(x_array):
# 3. for iter var.numpy()
def for_iter_var_numpy(x_array):
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for x in x_array.numpy():
z = z + x
return z
Expand All @@ -66,7 +65,7 @@ def for_iter_var_numpy(x_array):
def for_enumerate_var_numpy(x_array):
y = paddle.tensor.fill_constant([1], 'int32', 0)
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for i, x in enumerate(x_array.numpy()):
y = y + i
z = z + x
Expand All @@ -77,7 +76,7 @@ def for_enumerate_var_numpy(x_array):
def for_enumerate_var_numpy_with_start(x_array):
y = paddle.tensor.fill_constant([1], 'int32', 0)
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for i, x in enumerate(x_array.numpy(), 1):
y = y + i
z = z + x
Expand All @@ -87,7 +86,7 @@ def for_enumerate_var_numpy_with_start(x_array):
# 6. for in range with break
def for_in_range_with_break(x):
z = paddle.tensor.fill_constant([1], 'int32', 0)
x = base.dygraph.to_variable(x)
x = paddle.to_tensor(x)
for i in range(x.numpy()[0]):
z = z + i
if i > 2:
Expand All @@ -99,7 +98,7 @@ def for_in_range_with_break(x):
def for_enumerate_var_numpy_with_break(x_array):
y = paddle.tensor.fill_constant([1], 'int32', 0)
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for i, x in enumerate(x_array.numpy()):
y = y + i
z = z + x
Expand All @@ -112,7 +111,7 @@ def for_enumerate_var_numpy_with_break(x_array):
def for_enumerate_var_numpy_with_continue(x_array):
y = paddle.tensor.fill_constant([1], 'int32', 0)
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for i, x in enumerate(x_array.numpy()):
y = y + i
if i > 2:
Expand All @@ -125,7 +124,7 @@ def for_enumerate_var_numpy_with_continue(x_array):
def for_enumerate_var_numpy_with_start_break(x_array):
y = paddle.tensor.fill_constant([1], 'int32', 0)
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for i, x in enumerate(x_array.numpy(), 1):
y = y + i
z = z + x
Expand All @@ -138,7 +137,7 @@ def for_enumerate_var_numpy_with_start_break(x_array):
def for_enumerate_var_numpy_with_start_continue(x_array):
y = paddle.tensor.fill_constant([1], 'int32', 0)
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for i, x in enumerate(x_array.numpy(), 1):
y = y + i
if i > 2:
Expand All @@ -150,7 +149,7 @@ def for_enumerate_var_numpy_with_start_continue(x_array):
# 11. for iter var
def for_iter_var(x_array):
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)

for x in x_array:
z = z + x
Expand All @@ -161,7 +160,7 @@ def for_iter_var(x_array):
def for_enumerate_var(x_array):
y = paddle.tensor.fill_constant([1], 'int32', 0)
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for i, x in enumerate(x_array):
y = y + i
z = z + x
Expand All @@ -171,7 +170,7 @@ def for_enumerate_var(x_array):
# 13. for iter list[var]
def for_iter_var_list(x):
# 1. prepare data, ref test_list.py
x = base.dygraph.to_variable(x)
x = paddle.to_tensor(x)
iter_num = paddle.tensor.fill_constant(shape=[1], value=5, dtype="int32")
a = []
for i in range(iter_num):
Expand All @@ -186,7 +185,7 @@ def for_iter_var_list(x):
# 14. for enumerate list[var]
def for_enumerate_var_list(x):
# 1. prepare data, ref test_list.py
x = base.dygraph.to_variable(x)
x = paddle.to_tensor(x)
iter_num = paddle.tensor.fill_constant(shape=[1], value=5, dtype="int32")
a = []
for i in range(iter_num):
Expand All @@ -203,7 +202,7 @@ def for_enumerate_var_list(x):
# 15. for enumerate list[var] with a nested for range
def for_enumerate_var_with_nested_range(x_array):
x = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)
for i, num in enumerate(x_array):
for idx in range(num):
x = x + num
Expand All @@ -213,7 +212,7 @@ def for_enumerate_var_with_nested_range(x_array):
# 16. for iter var[idx]
def for_iter_var_idx(x_array):
z = paddle.tensor.fill_constant([1], 'int32', 0)
x_array = base.dygraph.to_variable(x_array)
x_array = paddle.to_tensor(x_array)

for x in x_array[0:]:
z = z + x
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_elementwise_pow_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,8 @@ def test_grad(self):
places.append(base.CUDAPlace(0))
for place in places:
with base.dygraph.guard(place):
x = base.dygraph.to_variable(self.x, zero_copy=False)
y = base.dygraph.to_variable(self.y, zero_copy=False)
x = paddle.to_tensor(self.x, zero_copy=False)
y = paddle.to_tensor(self.y, zero_copy=False)
x.stop_gradient = False
y.stop_gradient = False
res = x**y
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_elementwise_sub_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -930,8 +930,8 @@ def test_dygraph(self):
with base.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = base.dygraph.to_variable(np_x)
y = base.dygraph.to_variable(np_y)
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = self._executed_api(x, y)
np_z = z.numpy(False)
z_expected = np.array([1.0, -2.0, 2.0])
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_erf_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def _test_case(self, place):
x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float64)
y_ref = erf(x)
with dg.guard(place) as g:
x_var = dg.to_variable(x)
x_var = paddle.to_tensor(x)
y_var = paddle.erf(x_var)
y_test = y_var.numpy()
np.testing.assert_allclose(y_ref, y_test, rtol=1e-05)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_exception.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def test_exception_in_dynamic_mode(self):
with base.dygraph.guard(place):
x = numpy.random.random(size=(10, 2)).astype('float32')
linear = paddle.nn.Linear(1, 10)
data = base.dygraph.to_variable(x)
data = paddle.to_tensor(x)
with self.assertRaises(ValueError):
res = linear(data)

Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_fill_constant_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,9 +373,9 @@ def test_api(self):
data1 = np.array([1, 2]).astype('int32')
data2 = np.array([1.1]).astype('float32')
data3 = np.array([88]).astype('int32')
shape = base.dygraph.to_variable(data1)
val = base.dygraph.to_variable(data2)
value = base.dygraph.to_variable(data3)
shape = paddle.to_tensor(data1)
val = paddle.to_tensor(data2)
value = paddle.to_tensor(data3)
res1 = paddle.tensor.fill_constant(
shape=[1, 2], dtype='float32', value=1.1
)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_fleet_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def setUp(self):
def test_dygraph_method(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = base.dygraph.to_variable(value)
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(
learning_rate=0.01, parameters=layer.parameters()
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_static_graph(self):
def test_dygraph(self):
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
with base.dygraph.guard():
inputs = base.dygraph.to_variable(img)
inputs = paddle.to_tensor(img)
ret = paddle.flip(inputs, [0])
ret = ret.flip(0)
ret = paddle.flip(ret, 1)
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_functional_conv1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@ def setUp(self):

def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
x = paddle.to_tensor(self.input, dtype=paddle.float32)
w = paddle.to_tensor(self.filter, dtype=paddle.float32)
b = (
None
if self.bias is None
else dg.to_variable(self.bias, dtype=paddle.float32)
else paddle.to_tensor(self.bias, dtype=paddle.float32)
)
y = F.conv1d(
x,
Expand Down
4 changes: 2 additions & 2 deletions test/xpu/test_elementwise_add_op_xpu_kp.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,8 +338,8 @@ def test_dygraph(self):
with base.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float32')
np_y = np.array([1, 5, 2]).astype('float32')
x = base.dygraph.to_variable(np_x)
y = base.dygraph.to_variable(np_y)
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = paddle.add(x, y)
np_z = z.numpy()
z_expected = np.array([3.0, 8.0, 6.0])
Expand Down