Skip to content

Commit 59b453f

Browse files
authored
Fix maseked_select masked_select (#61419)
* Fix * ci
1 parent 596ca89 commit 59b453f

5 files changed

Lines changed: 24 additions & 24 deletions

File tree

test/legacy_test/test_zero_dim_tensor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2589,7 +2589,7 @@ def test_unbind(self):
25892589
self.assertEqual(out2_2.numpy(), 2)
25902590
self.assertEqual(x2.grad.shape, [2])
25912591

2592-
def test_maseked_select(self):
2592+
def test_masked_select(self):
25932593
x = paddle.rand([])
25942594
x.stop_gradient = False
25952595
mask = paddle.full([], True, dtype='bool')
@@ -4860,7 +4860,7 @@ def test_unbind(self):
48604860
self.assertEqual(res[1].shape, (2,))
48614861

48624862
@prog_scope()
4863-
def test_maseked_select(self):
4863+
def test_masked_select(self):
48644864
x = paddle.rand([])
48654865
x.stop_gradient = False
48664866
mask = paddle.full([], True, dtype='bool')

test/xpu/test_reduce_amin_op_xpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,10 @@ def set_case(self):
4545
'X': np.random.randint(0, 100, self.shape).astype("float32")
4646
}
4747

48-
expect_intput = self.inputs['X']
48+
expect_input = self.inputs['X']
4949
self.outputs = {
5050
'Out': np.amin(
51-
expect_intput,
51+
expect_input,
5252
axis=self.attrs['dim'],
5353
keepdims=self.attrs['keep_dim'],
5454
)

test/xpu/test_set_value_op_xpu.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ def _get_answer(self):
308308
self.data[0:, 1:2:2, :] = self.value
309309

310310
# 1.2.3 step < 0
311-
class XPUTestSetValueItemSliceNegetiveStep(XPUTestSetValueApi):
311+
class XPUTestSetValueItemSliceNegativeStep(XPUTestSetValueApi):
312312
def set_dtype(self):
313313
if self.in_type == np.float16:
314314
self.dtype = "float32"
@@ -333,8 +333,8 @@ def _call_setitem_static_api(self, x):
333333
def _get_answer(self):
334334
self.data[5:2:-1] = self.value
335335

336-
class XPUTestSetValueItemSliceNegetiveStep2(
337-
XPUTestSetValueItemSliceNegetiveStep
336+
class XPUTestSetValueItemSliceNegativeStep2(
337+
XPUTestSetValueItemSliceNegativeStep
338338
):
339339
def set_shape(self):
340340
self.shape = [5]
@@ -353,8 +353,8 @@ def _call_setitem_static_api(self, x):
353353
def _get_answer(self):
354354
self.data[1::-1] = self.value
355355

356-
class XPUTestSetValueItemSliceNegetiveStep3(
357-
XPUTestSetValueItemSliceNegetiveStep
356+
class XPUTestSetValueItemSliceNegativeStep3(
357+
XPUTestSetValueItemSliceNegativeStep
358358
):
359359
def set_shape(self):
360360
self.shape = [3]
@@ -372,7 +372,7 @@ def _call_setitem_static_api(self, x):
372372
def _get_answer(self):
373373
self.data[::-1] = self.value
374374

375-
class XPUTestSetValueItemSliceNegetiveStep4(XPUTestSetValueApi):
375+
class XPUTestSetValueItemSliceNegativeStep4(XPUTestSetValueApi):
376376
def set_dtype(self):
377377
if self.in_type == np.float16:
378378
self.dtype = "float32"
@@ -400,7 +400,7 @@ def _get_answer(self):
400400

401401
# 1.2.3 step < 0 and stride < -1
402402

403-
class XPUTestSetValueItemSliceNegetiveStep5(XPUTestSetValueApi):
403+
class XPUTestSetValueItemSliceNegativeStep5(XPUTestSetValueApi):
404404
def set_dtype(self):
405405
if self.in_type == np.float16:
406406
self.dtype = "float32"
@@ -1230,14 +1230,14 @@ def set_value(t, value):
12301230
np.testing.assert_array_equal(
12311231
inps.grad.numpy(),
12321232
input_grad,
1233-
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
1233+
err_msg='The gradient of value should be \n{},\n but received {}'.format(
12341234
input_grad, inps.grad.numpy()
12351235
),
12361236
)
12371237
np.testing.assert_array_equal(
12381238
value.grad.numpy(),
12391239
value_grad,
1240-
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
1240+
err_msg='The gradient of input should be \n{},\n but received {}'.format(
12411241
value_grad, value.grad.numpy()
12421242
),
12431243
)
@@ -1266,14 +1266,14 @@ def set_value(t, value):
12661266
np.testing.assert_array_equal(
12671267
inps2.grad.numpy(),
12681268
input_grad2,
1269-
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
1269+
err_msg='The gradient of value should be \n{},\n but received {}'.format(
12701270
input_grad, inps2.grad.numpy()
12711271
),
12721272
)
12731273
np.testing.assert_array_equal(
12741274
value2.grad.numpy(),
12751275
value_grad2,
1276-
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
1276+
err_msg='The gradient of input should be \n{},\n but received {}'.format(
12771277
value_grad, value2.grad.numpy()
12781278
),
12791279
)
@@ -1324,14 +1324,14 @@ def set_value3(t, value):
13241324
np.testing.assert_array_equal(
13251325
inps.grad.numpy(),
13261326
input_grad,
1327-
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
1327+
err_msg='The gradient of value should be \n{},\n but received {}'.format(
13281328
input_grad, inps.grad.numpy()
13291329
),
13301330
)
13311331
np.testing.assert_array_equal(
13321332
value.grad.numpy(),
13331333
value_grad,
1334-
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
1334+
err_msg='The gradient of input should be \n{},\n but received {}'.format(
13351335
value_grad, value.grad.numpy()
13361336
),
13371337
)
@@ -1372,14 +1372,14 @@ def set_value4(t, value):
13721372
np.testing.assert_array_equal(
13731373
inps.grad.numpy(),
13741374
input_grad,
1375-
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
1375+
err_msg='The gradient of value should be \n{},\n but received {}'.format(
13761376
input_grad, inps.grad.numpy()
13771377
),
13781378
)
13791379
np.testing.assert_array_equal(
13801380
value.grad.numpy(),
13811381
value_grad,
1382-
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
1382+
err_msg='The gradient of input should be \n{},\n but received {}'.format(
13831383
value_grad, value.grad.numpy()
13841384
),
13851385
)
@@ -1426,14 +1426,14 @@ def set_value5(t, value):
14261426
np.testing.assert_array_equal(
14271427
inps.grad.numpy(),
14281428
input_grad,
1429-
err_msg='The gradient of value should be \n{},\n but reveived {}'.format(
1429+
err_msg='The gradient of value should be \n{},\n but received {}'.format(
14301430
input_grad, inps.grad.numpy()
14311431
),
14321432
)
14331433
np.testing.assert_array_equal(
14341434
value.grad.numpy(),
14351435
value_grad,
1436-
err_msg='The gradient of input should be \n{},\n but reveived {}'.format(
1436+
err_msg='The gradient of input should be \n{},\n but received {}'.format(
14371437
value_grad, value.grad.numpy()
14381438
),
14391439
)

test/xpu/test_sum_op_xpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ def setUp(self):
213213
self.init_kernel_type()
214214

215215
def check_with_place(self, place, inplace):
216-
self.check_input_and_optput(place, inplace, True, True, True)
216+
self.check_input_and_output(place, inplace, True, True, True)
217217

218218
def init_kernel_type(self):
219219
pass
@@ -224,7 +224,7 @@ def _get_array(self, rows, row_numel):
224224
array[i] *= rows[i]
225225
return array
226226

227-
def check_input_and_optput(
227+
def check_input_and_output(
228228
self,
229229
place,
230230
inplace,

test/xpu/test_zero_dim_tensor_xpu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2127,7 +2127,7 @@ def test_unbind(self):
21272127
self.assertEqual(out2_2.numpy(), 2)
21282128
self.assertEqual(x2.grad.shape, [2])
21292129

2130-
def test_maseked_select(self):
2130+
def test_masked_select(self):
21312131
x = paddle.rand([])
21322132
x.stop_gradient = False
21332133
mask = paddle.full([], True, dtype='bool')

0 commit comments

Comments
 (0)