Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions test/legacy_test/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2589,7 +2589,7 @@ def test_unbind(self):
self.assertEqual(out2_2.numpy(), 2)
self.assertEqual(x2.grad.shape, [2])

def test_maseked_select(self):
def test_masked_select(self):
x = paddle.rand([])
x.stop_gradient = False
mask = paddle.full([], True, dtype='bool')
Expand Down Expand Up @@ -4860,7 +4860,7 @@ def test_unbind(self):
self.assertEqual(res[1].shape, (2,))

@prog_scope()
def test_maseked_select(self):
def test_masked_select(self):
x = paddle.rand([])
x.stop_gradient = False
mask = paddle.full([], True, dtype='bool')
Expand Down
4 changes: 2 additions & 2 deletions test/xpu/test_reduce_amin_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ def set_case(self):
'X': np.random.randint(0, 100, self.shape).astype("float32")
}

expect_intput = self.inputs['X']
expect_input = self.inputs['X']
self.outputs = {
'Out': np.amin(
expect_intput,
expect_input,
axis=self.attrs['dim'],
keepdims=self.attrs['keep_dim'],
)
Expand Down
34 changes: 17 additions & 17 deletions test/xpu/test_set_value_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ def _get_answer(self):
self.data[0:, 1:2:2, :] = self.value

# 1.2.3 step < 0
class XPUTestSetValueItemSliceNegetiveStep(XPUTestSetValueApi):
class XPUTestSetValueItemSliceNegativeStep(XPUTestSetValueApi):
def set_dtype(self):
if self.in_type == np.float16:
self.dtype = "float32"
Expand All @@ -333,8 +333,8 @@ def _call_setitem_static_api(self, x):
def _get_answer(self):
self.data[5:2:-1] = self.value

class XPUTestSetValueItemSliceNegetiveStep2(
XPUTestSetValueItemSliceNegetiveStep
class XPUTestSetValueItemSliceNegativeStep2(
XPUTestSetValueItemSliceNegativeStep
):
def set_shape(self):
self.shape = [5]
Expand All @@ -353,8 +353,8 @@ def _call_setitem_static_api(self, x):
def _get_answer(self):
self.data[1::-1] = self.value

class XPUTestSetValueItemSliceNegetiveStep3(
XPUTestSetValueItemSliceNegetiveStep
class XPUTestSetValueItemSliceNegativeStep3(
XPUTestSetValueItemSliceNegativeStep
):
def set_shape(self):
self.shape = [3]
Expand All @@ -372,7 +372,7 @@ def _call_setitem_static_api(self, x):
def _get_answer(self):
self.data[::-1] = self.value

class XPUTestSetValueItemSliceNegetiveStep4(XPUTestSetValueApi):
class XPUTestSetValueItemSliceNegativeStep4(XPUTestSetValueApi):
def set_dtype(self):
if self.in_type == np.float16:
self.dtype = "float32"
Expand Down Expand Up @@ -400,7 +400,7 @@ def _get_answer(self):

# 1.2.3 step < 0 and stride < -1

class XPUTestSetValueItemSliceNegetiveStep5(XPUTestSetValueApi):
class XPUTestSetValueItemSliceNegativeStep5(XPUTestSetValueApi):
def set_dtype(self):
if self.in_type == np.float16:
self.dtype = "float32"
Expand Down Expand Up @@ -1230,14 +1230,14 @@ def set_value(t, value):
np.testing.assert_array_equal(
inps.grad.numpy(),
input_grad,
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
err_msg='The gradient of value should be \n{},\n but received {}'.format(
input_grad, inps.grad.numpy()
),
)
np.testing.assert_array_equal(
value.grad.numpy(),
value_grad,
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
err_msg='The gradient of input should be \n{},\n but received {}'.format(
value_grad, value.grad.numpy()
),
)
Expand Down Expand Up @@ -1266,14 +1266,14 @@ def set_value(t, value):
np.testing.assert_array_equal(
inps2.grad.numpy(),
input_grad2,
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
err_msg='The gradient of value should be \n{},\n but received {}'.format(
input_grad, inps2.grad.numpy()
),
)
np.testing.assert_array_equal(
value2.grad.numpy(),
value_grad2,
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
err_msg='The gradient of input should be \n{},\n but received {}'.format(
value_grad, value2.grad.numpy()
),
)
Expand Down Expand Up @@ -1324,14 +1324,14 @@ def set_value3(t, value):
np.testing.assert_array_equal(
inps.grad.numpy(),
input_grad,
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
err_msg='The gradient of value should be \n{},\n but received {}'.format(
input_grad, inps.grad.numpy()
),
)
np.testing.assert_array_equal(
value.grad.numpy(),
value_grad,
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
err_msg='The gradient of input should be \n{},\n but received {}'.format(
value_grad, value.grad.numpy()
),
)
Expand Down Expand Up @@ -1372,14 +1372,14 @@ def set_value4(t, value):
np.testing.assert_array_equal(
inps.grad.numpy(),
input_grad,
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
err_msg='The gradient of value should be \n{},\n but received {}'.format(
input_grad, inps.grad.numpy()
),
)
np.testing.assert_array_equal(
value.grad.numpy(),
value_grad,
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
err_msg='The gradient of input should be \n{},\n but received {}'.format(
value_grad, value.grad.numpy()
),
)
Expand Down Expand Up @@ -1426,14 +1426,14 @@ def set_value5(t, value):
np.testing.assert_array_equal(
inps.grad.numpy(),
input_grad,
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
err_msg='The gradient of value should be \n{},\n but received {}'.format(
input_grad, inps.grad.numpy()
),
)
np.testing.assert_array_equal(
value.grad.numpy(),
value_grad,
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
err_msg='The gradient of input should be \n{},\n but received {}'.format(
value_grad, value.grad.numpy()
),
)
Expand Down
4 changes: 2 additions & 2 deletions test/xpu/test_sum_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def setUp(self):
self.init_kernel_type()

def check_with_place(self, place, inplace):
self.check_input_and_optput(place, inplace, True, True, True)
self.check_input_and_output(place, inplace, True, True, True)

def init_kernel_type(self):
pass
Expand All @@ -224,7 +224,7 @@ def _get_array(self, rows, row_numel):
array[i] *= rows[i]
return array

def check_input_and_optput(
def check_input_and_output(
self,
place,
inplace,
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_zero_dim_tensor_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -2127,7 +2127,7 @@ def test_unbind(self):
self.assertEqual(out2_2.numpy(), 2)
self.assertEqual(x2.grad.shape, [2])

def test_maseked_select(self):
def test_masked_select(self):
x = paddle.rand([])
x.stop_gradient = False
mask = paddle.full([], True, dtype='bool')
Expand Down