Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions test/custom_op/test_custom_relu_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@

class Net(nn.Layer):
"""
A simple exmaple for Regression Model.
A simple example for Regression Model.
"""

def __init__(self, in_dim, out_dim, use_custom_op=False):
Expand Down Expand Up @@ -102,7 +102,7 @@ def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_save_dir = os.path.join(self.temp_dir.name, 'infer_model')
self.model_path_template = os.path.join(
self.model_save_dir, 'custom_relu_dygaph_model_{}.pdparams'
self.model_save_dir, 'custom_relu_dygraph_model_{}.pdparams'
)
self.model_dy2stat_path = os.path.join(
self.model_save_dir, 'infer_model/custom_relu_model_dy2sta'
Expand Down
2 changes: 1 addition & 1 deletion test/custom_op/test_inference_gap_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def forward(self, x):

class TestNewCustomOpSetUpInstall(unittest.TestCase):
def setUp(self):
# TODO(ming1753): skip window CI beacuse run_cmd(cmd) filed
# TODO(ming1753): skip window CI because run_cmd(cmd) filed
if os.name != 'nt':
cur_dir = os.path.dirname(os.path.abspath(__file__))
# compile, install the custom op egg into site-packages under background
Expand Down
2 changes: 1 addition & 1 deletion test/custom_runtime/extension_header_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@
// !!! do not fix this ut by adding other header files (PR#60842) !!!
#include "paddle/phi/extension.h"

TEST(CustomDevce, extension_header) {
TEST(CustomDevice, extension_header) {
VLOG(1) << "check extension header support compile only";
}
8 changes: 4 additions & 4 deletions test/dataset/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fliename:
Filename:
test_image.py
Description:
This scipt test image resize,flip and chw.
This script test image resize,flip and chw.
"""
import os
import unittest
Expand All @@ -34,10 +34,10 @@ class Image(unittest.TestCase):

def test_resize_flip_chw(self):
"""resize"""
imgdir = os.path.join(
img_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'cat.jpg'
)
images = image.load_image(imgdir)
images = image.load_image(img_dir)
images = image.resize_short(images, 256)
self.assertEqual(256, min(images.shape[:2]))
self.assertEqual(3, images.shape[2])
Expand Down
2 changes: 1 addition & 1 deletion test/distributed_passes/auto_parallel_pass_test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
)


class AutoPallelPassTestBase(DistPassTestBase):
class AutoParallelPassTestBase(DistPassTestBase):
def setUp(self):
paddle.enable_static()
seed = int(os.environ.get('SEED', -1))
Expand Down
4 changes: 2 additions & 2 deletions test/distributed_passes/test_auto_parallel_amp_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
import unittest

import numpy as np
from auto_parallel_pass_test_base import AutoPallelPassTestBase
from auto_parallel_pass_test_base import AutoParallelPassTestBase

import paddle
from paddle.distributed import fleet


class TestAMPPass(AutoPallelPassTestBase):
class TestAMPPass(AutoParallelPassTestBase):
def init(self):
if paddle.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': 1})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import unittest

import numpy as np
from auto_parallel_pass_test_base import AutoPallelPassTestBase
from auto_parallel_pass_test_base import AutoParallelPassTestBase

import paddle
from paddle.distributed import fleet
Expand All @@ -32,7 +32,7 @@
sys.path.append("..")


class TestDataParallelPassWithScale1(AutoPallelPassTestBase):
class TestDataParallelPassWithScale1(AutoParallelPassTestBase):
def init(self):
if paddle.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': 1})
Expand Down
4 changes: 2 additions & 2 deletions test/distributed_passes/test_auto_parallel_fp16_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
import unittest

import numpy as np
from auto_parallel_pass_test_base import AutoPallelPassTestBase
from auto_parallel_pass_test_base import AutoParallelPassTestBase

import paddle
from paddle.distributed import fleet


class TestPF16Pass(AutoPallelPassTestBase):
class TestPF16Pass(AutoParallelPassTestBase):
def init(self):
if paddle.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': 1})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def test_fused_linear_promotion_mp(self):
rank=0, enable_fused_linear_promotion=False, enable_sp=False
)
ops_without_promotion = dist_main_prog.global_block().ops
oringin_fused_gemm_epilogue_ops = [
origin_fused_gemm_epilogue_ops = [
op
for op in ops_without_promotion
if op.type == "fused_gemm_epilogue"
Expand All @@ -196,7 +196,7 @@ def test_fused_linear_promotion_mp(self):
]
self.assertEqual(
len(fused_gemm_epilogue_ops),
len(oringin_fused_gemm_epilogue_ops) + 2,
len(origin_fused_gemm_epilogue_ops) + 2,
)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import unittest

import numpy as np
from auto_parallel_pass_test_base import AutoPallelPassTestBase
from auto_parallel_pass_test_base import AutoParallelPassTestBase

import paddle
import paddle.nn.functional as F
Expand Down Expand Up @@ -101,7 +101,7 @@ def mlp_forward(input, label, hidden_size):
return loss


class TestGradientMergePass(AutoPallelPassTestBase):
class TestGradientMergePass(AutoParallelPassTestBase):
def init(self):
paddle.seed(2022)
random.seed(2022)
Expand Down
4 changes: 2 additions & 2 deletions test/distributed_passes/test_auto_parallel_recompute_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
import unittest

import numpy as np
from auto_parallel_pass_test_base import AutoPallelPassTestBase
from auto_parallel_pass_test_base import AutoParallelPassTestBase

import paddle
from paddle.distributed import fleet


class TestRecomputePass(AutoPallelPassTestBase):
class TestRecomputePass(AutoParallelPassTestBase):
def init(self):
if paddle.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': 1})
Expand Down
4 changes: 2 additions & 2 deletions test/distributed_passes/test_auto_parallel_sharding_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
import unittest

import numpy as np
from auto_parallel_pass_test_base import AutoPallelPassTestBase
from auto_parallel_pass_test_base import AutoParallelPassTestBase

import paddle
from paddle.distributed import fleet


class TestShardingPass(AutoPallelPassTestBase):
class TestShardingPass(AutoParallelPassTestBase):
def init(self):
if paddle.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': 1})
Expand Down
2 changes: 1 addition & 1 deletion test/distribution/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
DEFAULT_DTYPE = 'float64'

TEST_CASE_NAME = 'suffix'
# All test case will use float64 for compare percision, refs:
# All test case will use float64 for compare precision, refs:
# https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64
RTOL = {
'float32': 1e-03,
Expand Down
Loading