Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/legacy_test/test_adadelta_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def test_adadelta_dygraph(self):
adam.clear_gradients()


class TestAdadeltaOpMultiPrecison(unittest.TestCase):
class TestAdadeltaOpMultiPrecision(unittest.TestCase):
def _test_adadelta_op_dygraph_place_amp(self, place, use_amp=False):
import paddle

Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adagrad_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def check_with_place(self, place):
param_array = np.full((height, row_numel), 5.0).astype("float32")
param.set(param_array, place)

# create and initialize LeraningRate Variable
# create and initialize LearningRate Variable
lr = scope.var('LearningRate').get_tensor()
lr_array = np.full((1), 2.0).astype("float32")
lr.set(lr_array, place)
Expand Down Expand Up @@ -211,7 +211,7 @@ def test_sparse_adagrad(self):
self.check_with_place(place)


class TestAdagradOpMultiPrecison(unittest.TestCase):
class TestAdagradOpMultiPrecision(unittest.TestCase):
def _test_adagrad_op_dygraph_place_amp(self, place, use_amp=False):
import paddle

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_adam_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ def test_adam_op_with_state_dict(self):
state_dict = adam.state_dict()
adam.set_state_dict(state_dict)

# leanrning_rate is Tensor
# learning_rate is Tensor
with self.assertRaises(TypeError):
learning_rate = np.array([0.01]).astype("float32")
learning_rate = paddle.to_tensor(learning_rate)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_adamax_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ def test_adamax_op_invalid_input(self):
)


class TestAdamaxOpMultiPrecison(unittest.TestCase):
class TestAdamaxOpMultiPrecision(unittest.TestCase):
def _test_adamax_op_dygraph_place_amp(self, place, use_amp=False):
import paddle

Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adamw_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ def test_adamw_op_dygraph_bypassing_step(self):
adam.clear_gradients()


class TestAdamWOpMultiPrecisonWithMainGrad(unittest.TestCase):
class TestAdamWOpMultiPrecisionWithMainGrad(unittest.TestCase):
def _test_adamw_op_dygraph_place_amp_with_maingrad(
self, place, shape, use_main_grad
):
Expand Down Expand Up @@ -543,7 +543,7 @@ def test_main(self):
)


class TestAdamWOpMultiPrecison(unittest.TestCase):
class TestAdamWOpMultiPrecision(unittest.TestCase):
def _test_adamw_op_dygraph_place_amp(self, place, use_amp=False):
paddle.disable_static()
paddle.seed(10)
Expand Down
10 changes: 5 additions & 5 deletions test/legacy_test/test_add_n_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,15 @@ def test_add_n_api(self):
if not paddle.is_compiled_with_cuda():
return
dtypes = ['float32', 'complex64', 'complex128']
for dtyte in dtypes:
if dtyte == 'complex64' or dtyte == 'complex128':
for dtype in dtypes:
if dtype == 'complex64' or dtype == 'complex128':
self.x_np = (
np.random.random([self.l, 16, 256])
+ 1j * np.random.random([self.l, 16, 256])
).astype(dtyte)
).astype(dtype)

y_np_32, x_g_np_32 = self.check_main(self.x_np, dtyte)
y_np_gt = np.sum(self.x_np, axis=0).astype(dtyte)
y_np_32, x_g_np_32 = self.check_main(self.x_np, dtype)
y_np_gt = np.sum(self.x_np, axis=0).astype(dtype)
np.testing.assert_allclose(y_np_32, y_np_gt, rtol=1e-06)


Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_arange.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def init_config(self):
self.case = (0, 5, 1)


class TestFloa16ArangeOp(TestArangeOp):
class TestFloat16ArangeOp(TestArangeOp):
def init_config(self):
self.dtype = np.float16
self.python_api = paddle.arange
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,12 +378,12 @@ def callback(block, context):


class TestGradientsWithOptimizer(unittest.TestCase):
def _check_grad_op_name(self, forward_list, optimiezed_list):
def _check_grad_op_name(self, forward_list, optimized_list):
backward_list = [op + "_grad" for op in reversed(forward_list)]
idx = optimiezed_list.index(backward_list[0], len(backward_list))
idx = optimized_list.index(backward_list[0], len(backward_list))

self.assertListEqual(
backward_list, optimiezed_list[idx : idx + len(backward_list)]
backward_list, optimized_list[idx : idx + len(backward_list)]
)

def test_gradient_with_optimizer(self):
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_bicubic_interp_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,7 @@ def test_case(self):

class TestBicubicOpError(unittest.TestCase):
def test_imperative_errors(self):
# the input of interpoalte must be Variable.
# the input of interpolate must be Variable.
x1 = base.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace()
)
Expand All @@ -687,7 +687,7 @@ def test_mode_type():
)

out = interpolate(
x, size=[12, 12], mode='UNKONWN', align_corners=False
x, size=[12, 12], mode='UNKNOWN', align_corners=False
)

def test_input_shape():
Expand All @@ -696,7 +696,7 @@ def test_input_shape():
x, size=[12, 12], mode='BICUBIC', align_corners=False
)

def test_align_corcers():
def test_align_corners():
x = paddle.static.data(
name="x", shape=[2, 3, 6, 6], dtype="float32"
)
Expand Down Expand Up @@ -887,7 +887,7 @@ def test_input_shape_1():

self.assertRaises(ValueError, test_mode_type)
self.assertRaises(ValueError, test_input_shape)
self.assertRaises(TypeError, test_align_corcers)
self.assertRaises(TypeError, test_align_corners)
self.assertRaises(ValueError, test_attr_data_format)
self.assertRaises(TypeError, test_actual_shape)
self.assertRaises(ValueError, test_scale_value)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_checkpoint_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from paddle.distributed.fleet.utils.fs import HDFSClient


class CheckpointerSaverTest(unittest.TestCase):
class CheckpointSaverTest(unittest.TestCase):
def test(self):
fs = HDFSClient("/usr/local/hadoop-2.7.7", None)
dir_path = "./checkpointsaver_test"
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_collective_api_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -611,19 +611,19 @@ def convertbf16(origin):
result1 = []
result2 = []

def is_empyt_list(x):
def is_empty_list(x):
if isinstance(x, list) and len(x) == 0:
return True
return False

for i in range(tot_expert):
for arr in output1[i]:
if is_empyt_list(arr):
if is_empty_list(arr):
continue
result1.append(arr)
for i in range(tot_expert):
for arr in output2[i]:
if is_empyt_list(arr):
if is_empty_list(arr):
continue
result2.append(arr)

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_conv2d_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def run_5():

self.assertRaises(ValueError, run_5)

# ValueError: channel dimmention
# ValueError: channel dimension
x = paddle.static.data(
name="x",
shape=[2, 5, 5, -1],
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_conv3d_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(
self,
methodName='runTest',
batch_size=4,
spartial_shape=(8, 8, 8),
spatial_shape=(8, 8, 8),
num_channels=6,
num_filters=8,
filter_size=3,
Expand All @@ -43,7 +43,7 @@ def __init__(
self.batch_size = batch_size
self.num_channels = num_channels
self.num_filters = num_filters
self.spartial_shape = spartial_shape
self.spatial_shape = spatial_shape
self.filter_size = filter_size

self.padding = padding
Expand All @@ -58,13 +58,13 @@ def setUp(self):
self.channel_last = self.data_format == "NDHWC"
if self.channel_last:
input_shape = (
(self.batch_size,) + self.spartial_shape + (self.num_channels,)
(self.batch_size,) + self.spatial_shape + (self.num_channels,)
)
else:
input_shape = (
self.batch_size,
self.num_channels,
) + self.spartial_shape
) + self.spatial_shape
self.input = np.random.randn(*input_shape).astype(self.dtype)

if isinstance(self.filter_size, int):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_conv3d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1212,7 +1212,7 @@ def run_5():

self.assertRaises(ValueError, run_5)

# ValueError: channel dimmention
# ValueError: channel dimension
x = paddle.static.data(
name="x",
shape=[2, 5, 5, 5, -1],
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_conv3d_transpose_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(
self,
methodName='runTest',
batch_size=2,
spartial_shape=(8, 8, 8),
spatial_shape=(8, 8, 8),
num_channels=6,
num_filters=8,
filter_size=3,
Expand All @@ -44,7 +44,7 @@ def __init__(
self.batch_size = batch_size
self.num_channels = num_channels
self.num_filters = num_filters
self.spartial_shape = spartial_shape
self.spatial_shape = spatial_shape
self.filter_size = filter_size
self.output_size = output_size

Expand All @@ -60,13 +60,13 @@ def setUp(self):
self.channel_last = self.data_format == "NDHWC"
if self.channel_last:
input_shape = (
(self.batch_size,) + self.spartial_shape + (self.num_channels,)
(self.batch_size,) + self.spatial_shape + (self.num_channels,)
)
else:
input_shape = (
self.batch_size,
self.num_channels,
) + self.spartial_shape
) + self.spatial_shape
self.input = np.random.randn(*input_shape).astype(self.dtype)

if isinstance(self.filter_size, int):
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_fused_attention_op_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@ def layer_norm(x, has_scale, has_bias, weight, bias, epsilon=1e-05):
batch_size, src_len, d_model = x.shape
x = x.reshape((batch_size * src_len, d_model))
mu = np.mean(x, axis=1, keepdims=True)
sigma_squar = np.sum(np.square(x - mu), axis=1) / d_model
sigma_square = np.sum(np.square(x - mu), axis=1) / d_model
x1_up = x - mu
x1_down_1 = sigma_squar + epsilon
x1_down_1 = sigma_square + epsilon
x1_down = np.sqrt(x1_down_1)
x1_down = x1_down.reshape((x1_down.shape[0], 1))
x1 = x1_up / x1_down
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ def layer_norm(x, has_scale, has_bias, weight, bias, epsilon=1e-05):
batch_size, src_len, d_model = x.shape
x = x.reshape((batch_size * src_len, d_model))
mu = np.mean(x, axis=1, keepdims=True)
sigma_squar = np.sum(np.square(x - mu), axis=1) / d_model
sigma_square = np.sum(np.square(x - mu), axis=1) / d_model
x1_up = x - mu
x1_down_1 = sigma_squar + epsilon
x1_down_1 = sigma_square + epsilon
x1_down = np.sqrt(x1_down_1)
x1_down = x1_down.reshape((x1_down.shape[0], 1))
x1 = x1_up / x1_down
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_log_softmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def check_api(self, axis=-1):
out = exe.run(feed={'x': self.x}, fetch_list=[y])
np.testing.assert_allclose(out[0], ref_out, rtol=1e-05)

# test dygrapg api
# test dygraph api
paddle.disable_static()
x = paddle.to_tensor(self.x)
y = logsoftmax(x)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_paddle_save_load_binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def test_replace_save_load_vars(self):
t = np.array(
base.global_scope().find_var(var.name).get_tensor()
)
# make sure all the paramerter or optimizer var have been update
# make sure all the parameter or optimizer var have been update
self.assertTrue(np.sum(np.abs(t)) != 0)
base_map[var.name] = t
# test for replace_save_vars/io.load_vars
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_scatter_nd_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,16 @@ def numpy_scatter_nd(ref, index, updates, fun):
index_shape = index.shape

end_size = index_shape[-1]
remain_numl = 1
remain_numel = 1
for i in range(len(index_shape) - 1):
remain_numl *= index_shape[i]
remain_numel *= index_shape[i]

slice_size = 1
for i in range(end_size, len(ref_shape)):
slice_size *= ref_shape[i]

flat_index = index.reshape([remain_numl] + list(index_shape[-1:]))
flat_updates = updates.reshape((remain_numl, slice_size))
flat_index = index.reshape([remain_numel] + list(index_shape[-1:]))
flat_updates = updates.reshape((remain_numel, slice_size))
flat_output = ref.reshape(list(ref_shape[:end_size]) + [slice_size])

for i_up, i_out in enumerate(flat_index):
Expand Down
Loading