Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 11 additions & 13 deletions test/dygraph_to_static/test_bmn.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,7 @@
from predictor_utils import PredictorTools

import paddle
from paddle import base
from paddle.base import ParamAttr
from paddle.base.dygraph import to_variable
from paddle.base.framework import unique_name
from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX

Expand Down Expand Up @@ -220,7 +218,7 @@ def __init__(self, cfg):
self.num_sample,
self.num_sample_perbin,
)
self.sample_mask = base.dygraph.base.to_variable(sample_mask)
self.sample_mask = paddle.to_tensor(sample_mask)
self.sample_mask.stop_gradient = True

self.p_conv3d1 = paddle.nn.Conv3D(
Expand Down Expand Up @@ -604,10 +602,10 @@ def val_bmn(model, args):
gt_start = np.array([item[2] for item in data]).astype(DATATYPE)
gt_end = np.array([item[3] for item in data]).astype(DATATYPE)

x_data = to_variable(video_feat)
gt_iou_map = to_variable(gt_iou_map)
gt_start = to_variable(gt_start)
gt_end = to_variable(gt_end)
x_data = paddle.to_tensor(video_feat)
gt_iou_map = paddle.to_tensor(gt_iou_map)
gt_start = paddle.to_tensor(gt_start)
gt_end = paddle.to_tensor(gt_end)
gt_iou_map.stop_gradient = True
gt_start.stop_gradient = True
gt_end.stop_gradient = True
Expand Down Expand Up @@ -679,10 +677,10 @@ def train_bmn(self, args, to_static):
DATATYPE
)

x_data = to_variable(video_feat)
gt_iou_map = to_variable(gt_iou_map)
gt_start = to_variable(gt_start)
gt_end = to_variable(gt_end)
x_data = paddle.to_tensor(video_feat)
gt_iou_map = paddle.to_tensor(gt_iou_map)
gt_start = paddle.to_tensor(gt_start)
gt_end = paddle.to_tensor(gt_end)
gt_iou_map.stop_gradient = True
gt_start.stop_gradient = True
gt_end.stop_gradient = True
Expand Down Expand Up @@ -826,7 +824,7 @@ def predict_dygraph(self, data):
bmn.set_dict(model_dict)
bmn.eval()

x = to_variable(data)
x = paddle.to_tensor(data)
pred_res = bmn(x)
pred_res = [var.numpy() for var in pred_res]

Expand Down Expand Up @@ -857,7 +855,7 @@ def predict_dygraph_jit(self, data):
bmn = paddle.jit.load(self.model_save_prefix)
bmn.eval()

x = to_variable(data)
x = paddle.to_tensor(data)
pred_res = bmn(x)
pred_res = [var.numpy() for var in pred_res]

Expand Down
9 changes: 4 additions & 5 deletions test/dygraph_to_static/test_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,26 +22,25 @@
)

import paddle
from paddle.base.dygraph import to_variable

SEED = 2020
np.random.seed(SEED)


def test_bool_cast(x):
x = to_variable(x)
x = paddle.to_tensor(x)
x = bool(x)
return x


def test_int_cast(x):
x = to_variable(x)
x = paddle.to_tensor(x)
x = int(x)
return x


def test_float_cast(x):
x = to_variable(x)
x = paddle.to_tensor(x)
x = float(x)
return x

Expand All @@ -52,7 +51,7 @@ def test_not_var_cast(x):


def test_mix_cast(x):
x = to_variable(x)
x = paddle.to_tensor(x)
x = int(x)
x = float(x)
x = bool(x)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_batch_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -974,7 +974,7 @@ def compute(x, is_test, trainable_statistics):
is_test=is_test,
trainable_statistics=trainable_statistics,
)
y = bn(base.dygraph.to_variable(x))
y = bn(paddle.to_tensor(x))
return y.numpy()

x = np.random.randn(*shape).astype("float32")
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_bicubic_interp_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ def test_case(self):
np.testing.assert_allclose(res, expect_res, rtol=1e-05)

with base.dygraph.guard():
x = base.dygraph.to_variable(x_data)
x = paddle.to_tensor(x_data)
interp = interpolate(
x, size=[12, 12], mode='bicubic', align_corners=False
)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_bicubic_interp_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -661,7 +661,7 @@ def test_case(self):
np.testing.assert_allclose(res, expect_res, rtol=1e-05)

with base.dygraph.guard():
x = base.dygraph.to_variable(x_data)
x = paddle.to_tensor(x_data)
interp = interpolate(
x, size=[12, 12], mode='bicubic', align_corners=False
)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_bincount_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def test_static_graph(self):
def test_dygraph(self):
with base.dygraph.guard():
inputs_np = np.array([0, 1, 1, 3, 2, 1, 7]).astype(np.int64)
inputs = base.dygraph.to_variable(inputs_np)
inputs = paddle.to_tensor(inputs_np)
actual = paddle.bincount(inputs)
expected = np.bincount(inputs)
self.assertTrue(
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_bmm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,8 @@ def test_out(self):
]
)
with base.dygraph.guard():
x = base.dygraph.to_variable(input1)
y = base.dygraph.to_variable(input2)
x = paddle.to_tensor(input1)
y = paddle.to_tensor(input2)
out = paddle.bmm(x, y)
out_np = out.numpy()
expected_result = np.matmul(input1, input2)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_cholesky_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def test_dygraph(self):
[[10, 11, 12], [13, 14, 15], [16, 17, 18]],
]
).astype("float64")
input = base.dygraph.to_variable(input_np)
input = paddle.to_tensor(input_np)
try:
result = paddle.cholesky(input)
except RuntimeError as ex:
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_chunk_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def test_out1(self):
with base.dygraph.guard():
input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6]
input = base.dygraph.to_variable(input_1)
input = paddle.to_tensor(input_1)
x0, x1, x2 = paddle.chunk(input, chunks=3, axis=1)
x0_out = x0.numpy()
x1_out = x1.numpy()
Expand All @@ -126,7 +126,7 @@ def test_out2(self):
with base.dygraph.guard():
input_1 = np.random.random([4, 6, 6]).astype("bool")
# input is a variable which shape is [4, 6, 6]
input = base.dygraph.to_variable(input_1)
input = paddle.to_tensor(input_1)
x0, x1, x2 = paddle.chunk(input, chunks=3, axis=1)
x0_out = x0.numpy()
x1_out = x1.numpy()
Expand All @@ -140,7 +140,7 @@ def test_axis_tensor_input(self):
with base.dygraph.guard():
input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6]
input = base.dygraph.to_variable(input_1)
input = paddle.to_tensor(input_1)
num1 = paddle.full(shape=[1], fill_value=1, dtype='int32')
x0, x1, x2 = paddle.chunk(input, chunks=3, axis=num1)
x0_out = x0.numpy()
Expand Down
35 changes: 19 additions & 16 deletions test/legacy_test/test_coalesce_tensor_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,29 +116,32 @@ def init_output(self, input_list, set_constant, constant):
def verify_output(self, place):
with base.dygraph.base.guard(place=place):
tensor_input = [
base.dygraph.base.to_variable(value=data[1])
for data in self.inputs["Input"]
paddle.to_tensor(data[1]) for data in self.inputs["Input"]
]
eager_outputs, eager_fused_output = coalesce_tensor_eager_api(
tensor_input,
datatype=self.attrs["dtype"],
copy_data=self.attrs["copy_data"]
if "copy_data" in self.attrs
else False,
set_constant=self.attrs["set_constant"]
if "set_constant" in self.attrs
else False,
copy_data=(
self.attrs["copy_data"]
if "copy_data" in self.attrs
else False
),
set_constant=(
self.attrs["set_constant"]
if "set_constant" in self.attrs
else False
),
persist_output=False,
constant=self.attrs["constant"]
if "constant" in self.attrs
else 0.0,
constant=(
self.attrs["constant"] if "constant" in self.attrs else 0.0
),
use_align=True,
align_size=-1,
user_defined_size_of_dtype=self.attrs[
"user_defined_size_of_dtype"
]
if "user_defined_size_of_dtype" in self.attrs
else -1,
user_defined_size_of_dtype=(
self.attrs["user_defined_size_of_dtype"]
if "user_defined_size_of_dtype" in self.attrs
else -1
),
concated_shapes=[],
concated_ranks=[],
)
Expand Down