Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions test/legacy_test/test_dot_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,18 +157,14 @@ def test_errors(self):
class TestDygraph(unittest.TestCase):
def test_dygraph(self):
with base.dygraph.guard():
x1 = base.dygraph.to_variable(np.array([1, 3]).astype(np.float32))
y1 = base.dygraph.to_variable(np.array([2, 5]).astype(np.float32))
x1 = paddle.to_tensor(np.array([1, 3]).astype(np.float32))
y1 = paddle.to_tensor(np.array([2, 5]).astype(np.float32))
np.testing.assert_allclose(
paddle.dot(x1, y1).numpy(), np.array([17]), rtol=1e-05
)

x1 = base.dygraph.to_variable(
np.array([[1, 3], [3, 5]]).astype(np.float32)
)
y1 = base.dygraph.to_variable(
np.array([[2, 5], [6, 8]]).astype(np.float32)
)
x1 = paddle.to_tensor(np.array([[1, 3], [3, 5]]).astype(np.float32))
y1 = paddle.to_tensor(np.array([[2, 5], [6, 8]]).astype(np.float32))
np.testing.assert_array_equal(
paddle.dot(x1, y1).numpy(), np.array([17, 58])
)
Expand Down
16 changes: 8 additions & 8 deletions test/legacy_test/test_dropout_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -690,7 +690,7 @@ def test_dygraph(self):
in_np = np.random.random([40, 40]).astype("float32")
res_np = in_np
res_np2 = np.zeros_like(in_np)
input = base.dygraph.to_variable(in_np)
input = paddle.to_tensor(in_np)

res1 = paddle.nn.functional.dropout(
x=input, p=0.0, training=False
Expand Down Expand Up @@ -907,7 +907,7 @@ def test_dygraph(self):
with base.dygraph.guard(place):
input_np = np.random.random([40, 40]).astype("float32")
result_np = input_np
input = base.dygraph.to_variable(input_np)
input = paddle.to_tensor(input_np)
m = paddle.nn.Dropout(p=0.0)
m.eval()
result = m(input)
Expand Down Expand Up @@ -961,7 +961,7 @@ def test_dygraph(self):
with base.dygraph.guard(place):
in_np = np.random.random([2, 3, 4, 5]).astype("float32")
res_np = in_np
input = base.dygraph.to_variable(in_np)
input = paddle.to_tensor(in_np)

res1 = paddle.nn.functional.dropout2d(
x=input, p=0.0, training=False, data_format='NCHW'
Expand Down Expand Up @@ -1014,7 +1014,7 @@ def test_dygraph(self):
with base.dygraph.guard(place):
input_np = np.random.random([2, 3, 4, 5]).astype("float32")
result_np = input_np
input = base.dygraph.to_variable(input_np)
input = paddle.to_tensor(input_np)
m = paddle.nn.Dropout2D(p=0.0)
m.eval()
result = m(input)
Expand Down Expand Up @@ -1093,7 +1093,7 @@ def test_dygraph(self):
with base.dygraph.guard(place):
in_np = np.random.random([2, 3, 4, 5, 6]).astype("float32")
res_np = in_np
input = base.dygraph.to_variable(in_np)
input = paddle.to_tensor(in_np)

res1 = paddle.nn.functional.dropout3d(
x=input, p=0.0, training=False, data_format='NCDHW'
Expand Down Expand Up @@ -1146,7 +1146,7 @@ def test_dygraph(self):
with base.dygraph.guard(place):
input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32")
result_np = input_np
input = base.dygraph.to_variable(input_np)
input = paddle.to_tensor(input_np)
m = paddle.nn.Dropout3D(p=0.0)
m.eval()
result = m(input)
Expand Down Expand Up @@ -1202,7 +1202,7 @@ def test_dygraph(self):
in_np = np.random.random([40, 40]).astype("float32")
res_np = in_np
res_np3 = np.zeros_like(in_np)
input = base.dygraph.to_variable(in_np)
input = paddle.to_tensor(in_np)

res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.0)
res2 = paddle.nn.functional.alpha_dropout(
Expand Down Expand Up @@ -1276,7 +1276,7 @@ def test_dygraph(self):
with base.dygraph.guard(place):
input_np = np.random.random([40, 40]).astype("float32")
result_np = input_np
input = base.dygraph.to_variable(input_np)
input = paddle.to_tensor(input_np)
m = paddle.nn.AlphaDropout(p=0.0)
m.eval()
result = m(input)
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_dygraph_mnist_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ def func_mnist_fp16(self):
y = np.random.randint(10, size=[1, 1], dtype="int64")
with base.dygraph.guard(base.CUDAPlace(0)):
model = MNIST(dtype="float32")
x = base.dygraph.to_variable(x)
y = base.dygraph.to_variable(y)
x = paddle.to_tensor(x)
y = paddle.to_tensor(y)

# using amp.auto_cast because paddle.nn.Conv2D doesn't suppport setting dtype
with paddle.amp.auto_cast(dtype='float16'):
Expand Down
5 changes: 2 additions & 3 deletions test/legacy_test/test_dygraph_multi_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.base.dygraph.base import to_variable
from paddle.nn import Linear

SEED = 123123111
Expand Down Expand Up @@ -132,8 +131,8 @@ def test_mnist_forward_float32(self):
.reshape(128, 1)
)

img = to_variable(dy_x_data)
label = to_variable(y_data)
img = paddle.to_tensor(dy_x_data)
label = paddle.to_tensor(y_data)
label.stop_gradient = True

cost = mnist(img)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_dygraph_weight_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def test_check_output(self):
wn = weight_norm(linear, dim=self.dim)
outputs = []
for name, data in self.data.items():
output = linear(base.dygraph.to_variable(data))
output = linear(paddle.to_tensor(data))
outputs.append(output.numpy())
after_weight = linear.weight
self.actual_outputs = [linear.weight_g.numpy(), linear.weight_v.numpy()]
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_egr_python_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -953,10 +953,10 @@ def func_layer_helper_base(self, value):
base = paddle.base.layer_helper_base.LayerHelperBase(
"test_layer", "test_layer"
)
return base.to_variable(value).numpy()
return paddle.to_tensor(value).numpy()

def func_base_to_variable(self, value):
paddle.base.dygraph.base.to_variable(value)
paddle.to_tensor(value)

def test_backward_with_single_tensor(self):
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_eig_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def test_check_grad(self):
grad_x = eig_backward(real_w, real_v, grad_w, grad_v)

with base.dygraph.guard():
x = base.dygraph.to_variable(input_np)
x = paddle.to_tensor(input_np)
x.stop_gradient = False
w, v = paddle.linalg.eig(x)
(w.sum() + v.sum()).backward()
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_elementwise_add_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -650,8 +650,8 @@ def test_dygraph(self):
with base.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = base.dygraph.to_variable(np_x)
y = base.dygraph.to_variable(np_y)
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = self._executed_api(x, y)
np_z = z.numpy()
z_expected = np.array([3.0, 8.0, 6.0])
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_dropout_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def test_dygraph(self):
with base.dygraph.guard(place):
input_np = np.random.random([40, 40]).astype(self.in_type)
result_np = input_np
input = base.dygraph.to_variable(input_np)
input = paddle.to_tensor(input_np)
m = paddle.nn.Dropout(p=0.0)
m.eval()
result = m(input)
Expand Down
4 changes: 2 additions & 2 deletions test/xpu/test_elementwise_add_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,8 +291,8 @@ def test_dygraph(self):
with base.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float32')
np_y = np.array([1, 5, 2]).astype('float32')
x = base.dygraph.to_variable(np_x)
y = base.dygraph.to_variable(np_y)
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
z = paddle.add(x, y)
np_z = z.numpy()
z_expected = np.array([3.0, 8.0, 6.0])
Expand Down