diff --git a/test/cinn/passes/test_transpose_floding_output_pass.py b/test/cinn/passes/test_transpose_floding_output_pass.py index 306e3c1380714f..b2176e28851760 100644 --- a/test/cinn/passes/test_transpose_floding_output_pass.py +++ b/test/cinn/passes/test_transpose_floding_output_pass.py @@ -79,7 +79,9 @@ def trans_out_func(self, builder, out): return builder.transpose(out_s, [0, 2, 1]) -class TestTransposeFoldingOutputPassInvlidTrans(TestTransposeFoldingOutputPass): +class TestTransposeFoldingOutputPassInvalidTrans( + TestTransposeFoldingOutputPass +): def expect_folding_number(self): return 1 @@ -88,7 +90,9 @@ def trans_out_func(self, builder, out): return builder.scale(out_t, scale=2.0) -class TestTransposeFoldingOutputPassInvlidScale(TestTransposeFoldingOutputPass): +class TestTransposeFoldingOutputPassInvalidScale( + TestTransposeFoldingOutputPass +): def expect_folding_number(self): return 1 diff --git a/test/dygraph_to_static/test_pylayer.py b/test/dygraph_to_static/test_pylayer.py index 43aeede333f181..bf09ba3db8a8cd 100644 --- a/test/dygraph_to_static/test_pylayer.py +++ b/test/dygraph_to_static/test_pylayer.py @@ -322,7 +322,7 @@ def _run_and_compare(self, *args, **kwargs): dygraph_inp_kwargs[k].stop_gradient = False static_inp_kwargs[k].stop_gradient = False - # Step2. Run the dygraph and the static seperately + # Step2. Run the dygraph and the static separately dygraph_res = self._run_dygraph(*dygraph_inp_args, **dygraph_inp_kwargs) static_res = self._run_static(*static_inp_args, **static_inp_kwargs) diff --git a/test/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py b/test/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py index cdfe819ce69af2..0b09c218ea9b22 100644 --- a/test/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py +++ b/test/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py @@ -25,7 +25,7 @@ class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest): def setUp(self): self.set_params() - self.tranpose_perm = [0, 2, 1, 3] + self.transpose_perm = [0, 2, 1, 3] self.pass_name = 'reshape_transpose_matmul_mkldnn_fuse_pass' with base.program_guard(self.main_program, self.startup_program): @@ -37,7 +37,7 @@ def setUp(self): ) reshape = paddle.reshape(data, shape=self.reshape_shape) - transpose = paddle.transpose(reshape, self.tranpose_perm) + transpose = paddle.transpose(reshape, self.transpose_perm) matmul = paddle.matmul( transpose, diff --git a/test/ir/inference/test_trt_multiclass_nms3_op.py b/test/ir/inference/test_trt_multiclass_nms3_op.py index a38a597cf856f2..33e823f8fe4acb 100644 --- a/test/ir/inference/test_trt_multiclass_nms3_op.py +++ b/test/ir/inference/test_trt_multiclass_nms3_op.py @@ -48,10 +48,10 @@ def multiclass_nms( In the NMS step, this operator greedily selects a subset of detection bounding boxes that have high scores larger than score_threshold, if providing this threshold, then selects the largest nms_top_k confidences scores if nms_top_k - is larger than -1. Then this operator pruns away boxes that have high IOU + is larger than -1. Then this operator prunes away boxes that have high IOU (intersection over union) overlap with already selected boxes by adaptive threshold NMS based on parameters of nms_threshold and nms_eta. - Aftern NMS step, at most keep_top_k number of total bboxes are to be kept + After NMS step, at most keep_top_k number of total bboxes are to be kept per image if keep_top_k is larger than -1. Args: bboxes (Tensor): Two types of bboxes are supported: @@ -332,7 +332,7 @@ def test_background(self): self.run_test() def test_disable_varseqlen(self): - self.diable_tensorrt_varseqlen = False + self.disable_tensorrt_varseqlen = False self.run_test() diff --git a/test/legacy_test/test_model.py b/test/legacy_test/test_model.py index a34c1404de45e5..d7d55a7c8b6ead 100644 --- a/test/legacy_test/test_model.py +++ b/test/legacy_test/test_model.py @@ -69,14 +69,14 @@ def forward(self, x): return y, 0 -class ModelOutter(paddle.nn.Layer): +class ModelOuter(paddle.nn.Layer): def __init__(self): super().__init__() self.module1 = ModelInner() self.module2 = paddle.nn.Linear(4, 5) def forward(self, x): - y, dummpy = self.module1(x) + y, _ = self.module1(x) y = self.module2(y) return y, 3 @@ -704,7 +704,7 @@ def _get_param_from_state_dict(state_dict): model.summary(input_size=(20), dtype='float32') def test_summary_non_tensor(self): - paddle.summary(ModelOutter(), input_size=(-1, 3)) + paddle.summary(ModelOuter(), input_size=(-1, 3)) def test_summary_nlp(self): def _get_param_from_state_dict(state_dict): @@ -1020,7 +1020,7 @@ def test_fit_by_epoch(self): base_lr = 1e-3 boundaries = [5, 8] epochs = 10 - wamup_epochs = 4 + warmup_epochs = 4 def make_optimizer(parameters=None): momentum = 0.9 @@ -1031,7 +1031,7 @@ def make_optimizer(parameters=None): ) learning_rate = paddle.optimizer.lr.LinearWarmup( learning_rate=learning_rate, - warmup_steps=wamup_epochs, + warmup_steps=warmup_epochs, start_lr=base_lr / 5.0, end_lr=base_lr, verbose=True, @@ -1071,7 +1071,7 @@ def make_optimizer(parameters=None): cnt = 0 for b in boundaries: - if b + wamup_epochs <= epochs: + if b + warmup_epochs <= epochs: cnt += 1 np.testing.assert_allclose( @@ -1104,7 +1104,7 @@ def make_optimizer(parameters=None): cnt = 0 for b in boundaries: - if b + wamup_epochs <= epochs: + if b + warmup_epochs <= epochs: cnt += 1 np.testing.assert_allclose( diff --git a/test/legacy_test/test_sparse_model.py b/test/legacy_test/test_sparse_model.py index 9e71757f903423..96e538ef979b9e 100644 --- a/test/legacy_test/test_sparse_model.py +++ b/test/legacy_test/test_sparse_model.py @@ -22,9 +22,9 @@ class TestGradientAdd(unittest.TestCase): def sparse(self, sp_x): - indentity = sp_x + identity = sp_x out = nn.functional.relu(sp_x) - values = out.values() + indentity.values() + values = out.values() + identity.values() out = paddle.sparse.sparse_coo_tensor( out.indices(), values, @@ -34,9 +34,9 @@ def sparse(self, sp_x): return out def dense(self, x): - indentity = x + identity = x out = paddle.nn.functional.relu(x) - out = out + indentity + out = out + identity return out def test(self): diff --git a/test/mkldnn/test_requantize_mkldnn_op.py b/test/mkldnn/test_requantize_mkldnn_op.py index 4ad8966c33307f..43daa9baadb8ca 100644 --- a/test/mkldnn/test_requantize_mkldnn_op.py +++ b/test/mkldnn/test_requantize_mkldnn_op.py @@ -296,7 +296,7 @@ def set_shifts(self): self.shift_out = 128.0 -# ---------------test non-four dimentional formats-------------------------- +# ---------------test non-four dimensional formats-------------------------- class TestReQuantizeOp_2DimFormat(TestReQuantizeOp): diff --git a/test/prim/model/bert.py b/test/prim/model/bert.py index fe54de520f88f5..2ec9e8924cd6c4 100644 --- a/test/prim/model/bert.py +++ b/test/prim/model/bert.py @@ -175,12 +175,12 @@ def forward( if token_type_ids is None: token_type_ids = paddle.zeros_like(input_ids, dtype="int64") - input_embedings = self.word_embeddings(input_ids) + input_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = ( - input_embedings + position_embeddings + token_type_embeddings + input_embeddings + position_embeddings + token_type_embeddings ) embeddings = self.layer_norm(embeddings) embeddings = self.dropout(embeddings) diff --git a/test/tokenizer/bert_tokenizer.py b/test/tokenizer/bert_tokenizer.py index f8c7f4c55293a3..47aeae8bafa3f3 100755 --- a/test/tokenizer/bert_tokenizer.py +++ b/test/tokenizer/bert_tokenizer.py @@ -466,7 +466,7 @@ def get_special_tokens_mask( Args: token_ids_0 (List[int]): A list of `inputs_ids` for the first sequence. - token_ids_1 (List[int], optinal): + token_ids_1 (List[int], optional): Optional second list of IDs for sequence pairs. Defaults to None. already_has_special_tokens (bool, optional): Whether or not the token list is already formatted with special tokens for the model. Defaults to None.