From c58738082b247e7893964af1ae8ff2d5c4326d2f Mon Sep 17 00:00:00 2001 From: co63oc Date: Fri, 16 Feb 2024 18:04:36 +0800 Subject: [PATCH] Fix --- test/custom_op/test_custom_relu_model.py | 4 +- test/custom_op/test_inference_gap_setup.py | 2 +- test/custom_runtime/extension_header_test.cc | 2 +- test/dataset/test_image.py | 8 +-- .../auto_parallel_pass_test_base.py | 2 +- .../test_auto_parallel_amp_pass.py | 4 +- ...arallel_data_parallel_optimization_pass.py | 4 +- .../test_auto_parallel_fp16_pass.py | 4 +- ...to_parallel_fused_linear_promotion_pass.py | 4 +- .../test_auto_parallel_gradient_merge_pass.py | 4 +- .../test_auto_parallel_recompute_pass.py | 4 +- .../test_auto_parallel_sharding_pass.py | 4 +- test/distribution/config.py | 2 +- test/fft/test_fft.py | 58 +++++++++---------- test/ipu/distributed/test_dist_sample.py | 2 +- 15 files changed, 54 insertions(+), 54 deletions(-) diff --git a/test/custom_op/test_custom_relu_model.py b/test/custom_op/test_custom_relu_model.py index afed82a8c1150f..0e7d2c41257c74 100644 --- a/test/custom_op/test_custom_relu_model.py +++ b/test/custom_op/test_custom_relu_model.py @@ -53,7 +53,7 @@ class Net(nn.Layer): """ - A simple exmaple for Regression Model. + A simple example for Regression Model. """ def __init__(self, in_dim, out_dim, use_custom_op=False): @@ -102,7 +102,7 @@ def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, 'infer_model') self.model_path_template = os.path.join( - self.model_save_dir, 'custom_relu_dygaph_model_{}.pdparams' + self.model_save_dir, 'custom_relu_dygraph_model_{}.pdparams' ) self.model_dy2stat_path = os.path.join( self.model_save_dir, 'infer_model/custom_relu_model_dy2sta' diff --git a/test/custom_op/test_inference_gap_setup.py b/test/custom_op/test_inference_gap_setup.py index 6ff300717621db..976da045b61dc1 100644 --- a/test/custom_op/test_inference_gap_setup.py +++ b/test/custom_op/test_inference_gap_setup.py @@ -45,7 +45,7 @@ def forward(self, x): class TestNewCustomOpSetUpInstall(unittest.TestCase): def setUp(self): - # TODO(ming1753): skip window CI beacuse run_cmd(cmd) filed + # TODO(ming1753): skip window CI because run_cmd(cmd) filed if os.name != 'nt': cur_dir = os.path.dirname(os.path.abspath(__file__)) # compile, install the custom op egg into site-packages under background diff --git a/test/custom_runtime/extension_header_test.cc b/test/custom_runtime/extension_header_test.cc index f3aeb92d906e9b..7f11661d6ec2dd 100644 --- a/test/custom_runtime/extension_header_test.cc +++ b/test/custom_runtime/extension_header_test.cc @@ -19,6 +19,6 @@ // !!! do not fix this ut by adding other header files (PR#60842) !!! #include "paddle/phi/extension.h" -TEST(CustomDevce, extension_header) { +TEST(CustomDevice, extension_header) { VLOG(1) << "check extension header support compile only"; } diff --git a/test/dataset/test_image.py b/test/dataset/test_image.py index 54858da9da123e..e6bd63785ff1a0 100644 --- a/test/dataset/test_image.py +++ b/test/dataset/test_image.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Fliename: +Filename: test_image.py Description: - This scipt test image resize,flip and chw. + This script test image resize,flip and chw. """ import os import unittest @@ -34,10 +34,10 @@ class Image(unittest.TestCase): def test_resize_flip_chw(self): """resize""" - imgdir = os.path.join( + img_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'cat.jpg' ) - images = image.load_image(imgdir) + images = image.load_image(img_dir) images = image.resize_short(images, 256) self.assertEqual(256, min(images.shape[:2])) self.assertEqual(3, images.shape[2]) diff --git a/test/distributed_passes/auto_parallel_pass_test_base.py b/test/distributed_passes/auto_parallel_pass_test_base.py index 90173e43de5722..c52e6c1a4976d4 100644 --- a/test/distributed_passes/auto_parallel_pass_test_base.py +++ b/test/distributed_passes/auto_parallel_pass_test_base.py @@ -33,7 +33,7 @@ ) -class AutoPallelPassTestBase(DistPassTestBase): +class AutoParallelPassTestBase(DistPassTestBase): def setUp(self): paddle.enable_static() seed = int(os.environ.get('SEED', -1)) diff --git a/test/distributed_passes/test_auto_parallel_amp_pass.py b/test/distributed_passes/test_auto_parallel_amp_pass.py index d373e58307433d..35a3764e4b3be4 100755 --- a/test/distributed_passes/test_auto_parallel_amp_pass.py +++ b/test/distributed_passes/test_auto_parallel_amp_pass.py @@ -16,13 +16,13 @@ import unittest import numpy as np -from auto_parallel_pass_test_base import AutoPallelPassTestBase +from auto_parallel_pass_test_base import AutoParallelPassTestBase import paddle from paddle.distributed import fleet -class TestAMPPass(AutoPallelPassTestBase): +class TestAMPPass(AutoParallelPassTestBase): def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) diff --git a/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py b/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py index 46d4e812334b7e..bcd09ec04b33a2 100644 --- a/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py +++ b/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py @@ -17,7 +17,7 @@ import unittest import numpy as np -from auto_parallel_pass_test_base import AutoPallelPassTestBase +from auto_parallel_pass_test_base import AutoParallelPassTestBase import paddle from paddle.distributed import fleet @@ -32,7 +32,7 @@ sys.path.append("..") -class TestDataParallelPassWithScale1(AutoPallelPassTestBase): +class TestDataParallelPassWithScale1(AutoParallelPassTestBase): def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) diff --git a/test/distributed_passes/test_auto_parallel_fp16_pass.py b/test/distributed_passes/test_auto_parallel_fp16_pass.py index 9a542075996a9e..e0f6cc3ef81f0d 100644 --- a/test/distributed_passes/test_auto_parallel_fp16_pass.py +++ b/test/distributed_passes/test_auto_parallel_fp16_pass.py @@ -16,13 +16,13 @@ import unittest import numpy as np -from auto_parallel_pass_test_base import AutoPallelPassTestBase +from auto_parallel_pass_test_base import AutoParallelPassTestBase import paddle from paddle.distributed import fleet -class TestPF16Pass(AutoPallelPassTestBase): +class TestPF16Pass(AutoParallelPassTestBase): def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) diff --git a/test/distributed_passes/test_auto_parallel_fused_linear_promotion_pass.py b/test/distributed_passes/test_auto_parallel_fused_linear_promotion_pass.py index 77b75489f9c512..d42beec5f4cbda 100644 --- a/test/distributed_passes/test_auto_parallel_fused_linear_promotion_pass.py +++ b/test/distributed_passes/test_auto_parallel_fused_linear_promotion_pass.py @@ -181,7 +181,7 @@ def test_fused_linear_promotion_mp(self): rank=0, enable_fused_linear_promotion=False, enable_sp=False ) ops_without_promotion = dist_main_prog.global_block().ops - oringin_fused_gemm_epilogue_ops = [ + origin_fused_gemm_epilogue_ops = [ op for op in ops_without_promotion if op.type == "fused_gemm_epilogue" @@ -196,7 +196,7 @@ def test_fused_linear_promotion_mp(self): ] self.assertEqual( len(fused_gemm_epilogue_ops), - len(oringin_fused_gemm_epilogue_ops) + 2, + len(origin_fused_gemm_epilogue_ops) + 2, ) diff --git a/test/distributed_passes/test_auto_parallel_gradient_merge_pass.py b/test/distributed_passes/test_auto_parallel_gradient_merge_pass.py index f1aef2b30ca7c0..9b2280d2432843 100644 --- a/test/distributed_passes/test_auto_parallel_gradient_merge_pass.py +++ b/test/distributed_passes/test_auto_parallel_gradient_merge_pass.py @@ -17,7 +17,7 @@ import unittest import numpy as np -from auto_parallel_pass_test_base import AutoPallelPassTestBase +from auto_parallel_pass_test_base import AutoParallelPassTestBase import paddle import paddle.nn.functional as F @@ -101,7 +101,7 @@ def mlp_forward(input, label, hidden_size): return loss -class TestGradientMergePass(AutoPallelPassTestBase): +class TestGradientMergePass(AutoParallelPassTestBase): def init(self): paddle.seed(2022) random.seed(2022) diff --git a/test/distributed_passes/test_auto_parallel_recompute_pass.py b/test/distributed_passes/test_auto_parallel_recompute_pass.py index 6e98d9faaf1bb5..c2904b35d11223 100644 --- a/test/distributed_passes/test_auto_parallel_recompute_pass.py +++ b/test/distributed_passes/test_auto_parallel_recompute_pass.py @@ -16,13 +16,13 @@ import unittest import numpy as np -from auto_parallel_pass_test_base import AutoPallelPassTestBase +from auto_parallel_pass_test_base import AutoParallelPassTestBase import paddle from paddle.distributed import fleet -class TestRecomputePass(AutoPallelPassTestBase): +class TestRecomputePass(AutoParallelPassTestBase): def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) diff --git a/test/distributed_passes/test_auto_parallel_sharding_pass.py b/test/distributed_passes/test_auto_parallel_sharding_pass.py index 6b653f9c209f56..e93f526d04e5ee 100644 --- a/test/distributed_passes/test_auto_parallel_sharding_pass.py +++ b/test/distributed_passes/test_auto_parallel_sharding_pass.py @@ -16,13 +16,13 @@ import unittest import numpy as np -from auto_parallel_pass_test_base import AutoPallelPassTestBase +from auto_parallel_pass_test_base import AutoParallelPassTestBase import paddle from paddle.distributed import fleet -class TestShardingPass(AutoPallelPassTestBase): +class TestShardingPass(AutoParallelPassTestBase): def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) diff --git a/test/distribution/config.py b/test/distribution/config.py index 29a27890bad2c5..cfc93aab75d3bf 100644 --- a/test/distribution/config.py +++ b/test/distribution/config.py @@ -20,7 +20,7 @@ DEFAULT_DTYPE = 'float64' TEST_CASE_NAME = 'suffix' -# All test case will use float64 for compare percision, refs: +# All test case will use float64 for compare precision, refs: # https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64 RTOL = { 'float32': 1e-03, diff --git a/test/fft/test_fft.py b/test/fft/test_fft.py index a49e030fa63530..3ca810a26ab84b 100644 --- a/test/fft/test_fft.py +++ b/test/fft/test_fft.py @@ -25,7 +25,7 @@ DEVICES.append(paddle.CUDAPlace(0)) TEST_CASE_NAME = 'suffix' -# All test case will use float64 for compare percision, refs: +# All test case will use float64 for compare precision, refs: # https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64 RTOL = { 'float32': 1e-03, @@ -171,7 +171,7 @@ def test_fft(self): @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_negative', rand_x(2), -1, -1, 'backward', ValueError), ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), ( @@ -194,7 +194,7 @@ def test_fft(self): ) class TestFftException(unittest.TestCase): def test_fft(self): - """Test fft with buoudary condition + """Test fft with boundary condition Test case include: - n out of range - axis out of range @@ -266,7 +266,7 @@ def test_fft2(self): ValueError, ), ('test_x_1dim_tensor', rand_x(1), None, (0, 1), None, ValueError), - ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), + ('test_n_negative', rand_x(2), -1, (0, 1), 'backward', ValueError), ( 'test_n_len_not_equal_axis', rand_x(5, max_dim_len=5), @@ -305,7 +305,7 @@ def test_fft2(self): ) class TestFft2Exception(unittest.TestCase): def test_fft2(self): - """Test fft2 with buoudary condition + """Test fft2 with boundary condition Test case include: - input type error - input dim error @@ -785,7 +785,7 @@ def test_irfft2(self): RuntimeError, ), ( - 'test_n_nagative', + 'test_n_negative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), -1, -1, @@ -836,7 +836,7 @@ def test_irfft2(self): ) class TestHfftException(unittest.TestCase): def test_hfft(self): - """Test hfft with buoudary condition + """Test hfft with boundary condition Test case include: Test case include: - n out of range @@ -857,7 +857,7 @@ def test_hfft(self): (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ ( - 'test_n_nagative', + 'test_n_negative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), -1, -1, @@ -909,7 +909,7 @@ def test_hfft(self): class TestIrfftException(unittest.TestCase): def test_irfft(self): """ - Test irfft with buoudary condition + Test irfft with boundary condition Test case include: - n out of range - n type error @@ -939,7 +939,7 @@ def test_irfft(self): RuntimeError, ), ( - 'test_n_nagative', + 'test_n_negative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), @@ -999,7 +999,7 @@ def test_irfft(self): class TestHfft2Exception(unittest.TestCase): def test_hfft2(self): """ - Test hfft2 with buoudary condition + Test hfft2 with boundary condition Test case include: - input type error - n type error @@ -1020,7 +1020,7 @@ def test_hfft2(self): (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ ( - 'test_n_nagative', + 'test_n_negative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), @@ -1088,7 +1088,7 @@ def test_hfft2(self): class TestIrfft2Exception(unittest.TestCase): def test_irfft2(self): """ - Test irfft2 with buoudary condition + Test irfft2 with boundary condition Test case include: - input type error - n type error @@ -1119,7 +1119,7 @@ def test_irfft2(self): RuntimeError, ), ( - 'test_n_nagative', + 'test_n_negative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), @@ -1178,7 +1178,7 @@ def test_irfft2(self): ) class TestHfftnException(unittest.TestCase): def test_hfftn(self): - """Test hfftn with buoudary condition + """Test hfftn with boundary condition Test case include: - input type error - n type error @@ -1199,7 +1199,7 @@ def test_hfftn(self): (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ ( - 'test_n_nagative', + 'test_n_negative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), @@ -1258,7 +1258,7 @@ def test_hfftn(self): ) class TestIrfftnException(unittest.TestCase): def test_irfftn(self): - """Test irfftn with buoudary condition + """Test irfftn with boundary condition Test case include: - n out of range - n type error @@ -1315,7 +1315,7 @@ def test_rfft(self): @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_negative', rand_x(2), -1, -1, 'backward', ValueError), ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), ( @@ -1338,7 +1338,7 @@ def test_rfft(self): ) class TestRfftException(unittest.TestCase): def test_rfft(self): - """Test rfft with buoudary condition + """Test rfft with boundary condition Test case include: - n out of range - axis out of range @@ -1404,7 +1404,7 @@ def test_rfft2(self): RuntimeError, ), ('test_x_1dim_tensor', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), + ('test_n_negative', rand_x(2), -1, (0, 1), 'backward', ValueError), ('test_n_zero', rand_x(2), 0, (0, 1), 'backward', ValueError), ( 'test_axis_out_of_range', @@ -1435,7 +1435,7 @@ def test_rfft2(self): ) class TestRfft2Exception(unittest.TestCase): def test_rfft2(self): - """Test rfft2 with buoudary condition + """Test rfft2 with boundary condition Test case include: - input type error - input dim error @@ -1502,7 +1502,7 @@ def test_rfftn(self): RuntimeError, ), ( - 'test_n_nagative', + 'test_n_negative', rand_x(4), (-1, -1), (1, 2), @@ -1524,7 +1524,7 @@ def test_rfftn(self): ) class TestRfftnException(unittest.TestCase): def test_rfftn(self): - """Test rfftn with buoudary condition + """Test rfftn with boundary condition Test case include: - n out of range - axis out of range @@ -1580,7 +1580,7 @@ def test_ihfft(self): @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_negative', rand_x(2), -1, -1, 'backward', ValueError), ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), ( @@ -1603,7 +1603,7 @@ def test_ihfft(self): ) class TestIhfftException(unittest.TestCase): def test_ihfft(self): - """Test ihfft with buoudary condition + """Test ihfft with boundary condition Test case include: - axis type error - axis out of range @@ -1668,7 +1668,7 @@ def test_ihfft2(self): ValueError, ), ('test_x_1dim_tensor', rand_x(1), None, (0, 1), None, ValueError), - ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), + ('test_n_negative', rand_x(2), -1, (0, 1), 'backward', ValueError), ( 'test_n_len_not_equal_axis', rand_x(5, max_dim_len=5), @@ -1707,7 +1707,7 @@ def test_ihfft2(self): ) class TestIhfft2Exception(unittest.TestCase): def test_ihfft2(self): - """Test ihfft2 with buoudary condition + """Test ihfft2 with boundary condition Test case include: - input type error - input dim error @@ -1773,7 +1773,7 @@ def test_ihfftn(self): 'backward', RuntimeError, ), - ('test_n_nagative', rand_x(4), -1, None, 'backward', ValueError), + ('test_n_negative', rand_x(4), -1, None, 'backward', ValueError), ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), ( 'test_axis_out_of_range', @@ -1788,7 +1788,7 @@ def test_ihfftn(self): ) class TestIhfftnException(unittest.TestCase): def test_ihfftn(self): - """Test ihfftn with buoudary condition + """Test ihfftn with boundary condition Test case include: - input type error - n out of range diff --git a/test/ipu/distributed/test_dist_sample.py b/test/ipu/distributed/test_dist_sample.py index 2f771d78310dd6..2e849d1eec59f6 100644 --- a/test/ipu/distributed/test_dist_sample.py +++ b/test/ipu/distributed/test_dist_sample.py @@ -185,7 +185,7 @@ def _get_comm(): global DISTRIBUTED_COMM if DISTRIBUTED_COMM is None: raise RuntimeError( - "Distributed Commumication not setup. Please run setup_comm(MPI.COMM_WORLD) first." + "Distributed Communication not setup. Please run setup_comm(MPI.COMM_WORLD) first." ) return DISTRIBUTED_COMM