diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index c70e8b2d729fa5..33d00de4d0d1f9 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import _C_ops, _legacy_C_ops +from paddle import _C_ops from paddle.base.libpaddle import DataType from paddle.common_ops_import import VarDesc, dygraph_utils from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only @@ -3860,8 +3860,8 @@ def kron(x, y, name=None): [12, 15, 18, 16, 20, 24], [21, 24, 27, 28, 32, 36]]) """ - if in_dynamic_mode(): - return _legacy_C_ops.kron(x, y) + if in_dynamic_or_pir_mode(): + return _C_ops.kron(x, y) else: helper = LayerHelper('kron', **locals()) check_variable_and_dtype( @@ -5149,7 +5149,7 @@ def atan2(x, y, name=None): """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.atan2(x, y) else: check_variable_and_dtype( @@ -5279,7 +5279,7 @@ def lerp(x, y, weight, name=None): if isinstance(weight, float): weight = paddle.full(shape=[], fill_value=weight, dtype=x.dtype) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.lerp(x, y, weight) else: check_variable_and_dtype( @@ -5478,7 +5478,7 @@ def deg2rad(x, name=None): 3.14159274) """ deg2rad_scale = np.pi / 180.0 - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): if convert_dtype(x.dtype) in ['int32', 'int64']: x = cast(x, dtype="float32") return _C_ops.scale(x, deg2rad_scale, 0.0, True) @@ -6026,7 +6026,7 @@ def heaviside(x, y, name=None): [[0. , 0.20000000, 1. ], [0. , 1. , 0.30000001]]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.heaviside(x, y) else: op_type = 'elementwise_heaviside' diff --git a/test/legacy_test/test_atan2_op.py b/test/legacy_test/test_atan2_op.py index 3d2abef9393d0f..35365d766ea34d 100644 --- a/test/legacy_test/test_atan2_op.py +++ b/test/legacy_test/test_atan2_op.py @@ -19,6 +19,7 @@ import paddle from paddle.base import core +from paddle.pir_utils import test_with_pir_api paddle.enable_static() np.random.seed(0) @@ -45,10 +46,12 @@ def setUp(self): self.outputs = {'Out': out} def test_check_grad(self): - self.check_grad(['X1', 'X2'], 'Out', check_cinn=self.check_cinn) + self.check_grad( + ['X1', 'X2'], 'Out', check_cinn=self.check_cinn, check_pir=True + ) def test_check_output(self): - self.check_output(check_cinn=self.check_cinn) + self.check_output(check_cinn=self.check_cinn, check_pir=True) def init_dtype(self): self.dtype = np.float64 @@ -69,6 +72,7 @@ def test_check_grad(self): 1 / self.inputs['X1'].size, ), check_cinn=self.check_cinn, + check_pir=True, ) @@ -100,6 +104,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.place.append(paddle.CUDAPlace(0)) + @test_with_pir_api def test_static_api(self): paddle.enable_static() @@ -154,16 +159,23 @@ def setUp(self): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place, check_cinn=self.check_cinn) + self.check_output_with_place( + place, check_cinn=self.check_cinn, check_pir=True + ) def test_check_grad(self): place = core.CUDAPlace(0) self.check_grad_with_place( - place, ['X1', 'X2'], 'Out', check_cinn=self.check_cinn + place, + ['X1', 'X2'], + 'Out', + check_cinn=self.check_cinn, + check_pir=True, ) class TestAtan2Error(unittest.TestCase): + @test_with_pir_api def test_mismatch(self): paddle.enable_static() diff --git a/test/legacy_test/test_elementwise_floordiv_op.py b/test/legacy_test/test_elementwise_floordiv_op.py index f85ffe2252e02b..ccfab0b9adf56a 100644 --- a/test/legacy_test/test_elementwise_floordiv_op.py +++ b/test/legacy_test/test_elementwise_floordiv_op.py @@ -45,7 +45,7 @@ def setUp(self): self.outputs = {'Out': self.out} def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def init_input_output(self): self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) @@ -105,6 +105,7 @@ def device_guard(device=None): class TestFloorDivideOp(unittest.TestCase): def test_name(self): + paddle.enable_static() with paddle_static_guard(): with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") @@ -112,6 +113,7 @@ def test_name(self): y_1 = paddle.floor_divide(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) + paddle.disable_static() def test_dygraph(self): paddle.disable_static() diff --git a/test/legacy_test/test_elementwise_heaviside_op.py b/test/legacy_test/test_elementwise_heaviside_op.py index be1a241849e417..66cfe24d8853c4 100644 --- a/test/legacy_test/test_elementwise_heaviside_op.py +++ b/test/legacy_test/test_elementwise_heaviside_op.py @@ -19,6 +19,7 @@ import paddle from paddle.base import core +from paddle.pir_utils import test_with_pir_api def Heaviside_grad(x, y, dout, astype="float16", is_bfloat16=False): @@ -41,16 +42,16 @@ def setUp(self): self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])} def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out') + self.check_grad(['X', 'Y'], 'Out', check_pir=True) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], 'Out', no_grad_set=set("X")) + self.check_grad(['Y'], 'Out', no_grad_set=set("X"), check_pir=True) def test_check_grad_ingore_y(self): - self.check_grad(['X'], 'Out', no_grad_set=set('Y')) + self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_pir=True) class TestHeavisideBroadcast(unittest.TestCase): @@ -98,6 +99,7 @@ def setUp(self): self.out_np = np.heaviside(self.x_np, self.y_np) self.dtype = "float64" + @test_with_pir_api def test_static(self): for use_cuda in ( [False, True] if paddle.device.is_compiled_with_cuda() else [False] @@ -177,7 +179,7 @@ def setUp(self): self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])} def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): self.check_grad( @@ -186,6 +188,7 @@ def test_check_grad(self): user_defined_grads=Heaviside_grad( self.inputs['X'], self.inputs['Y'], 1 / self.inputs['X'].size ), + check_pir=True, ) @@ -212,7 +215,7 @@ def setUp(self): self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out']) def test_check_output(self): - self.check_output_with_place(self.place) + self.check_output_with_place(self.place, check_pir=True) def test_check_grad(self): self.check_grad_with_place( @@ -226,6 +229,7 @@ def test_check_grad(self): self.np_dtype, True, ), + check_pir=True, ) diff --git a/test/legacy_test/test_kron_op.py b/test/legacy_test/test_kron_op.py index be91861825a74e..64c17a6aab6802 100644 --- a/test/legacy_test/test_kron_op.py +++ b/test/legacy_test/test_kron_op.py @@ -21,6 +21,7 @@ import paddle.base.dygraph as dg from paddle import base from paddle.base import core +from paddle.pir_utils import test_with_pir_api class TestKronOp(OpTest): @@ -38,16 +39,16 @@ def _init_dtype(self): return "float64" def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X', 'Y'], 'Out') + self.check_grad(['X', 'Y'], 'Out', check_pir=True) def test_check_grad_ignore_x(self): - self.check_grad(['Y'], 'Out', no_grad_set=set('X')) + self.check_grad(['Y'], 'Out', no_grad_set=set('X'), check_pir=True) def test_check_grad_ignore_y(self): - self.check_grad(['X'], 'Out', no_grad_set=set('Y')) + self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_pir=True) class TestKronOp2(TestKronOp): @@ -102,19 +103,21 @@ def setUp(self): self.place = core.CUDAPlace(0) def test_check_output(self): - self.check_output_with_place(self.place) + self.check_output_with_place(self.place, check_pir=True) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', check_pir=True + ) def test_check_grad_ignore_x(self): self.check_grad_with_place( - self.place, ['Y'], 'Out', no_grad_set=set('X') + self.place, ['Y'], 'Out', no_grad_set=set('X'), check_pir=True ) def test_check_grad_ignore_y(self): self.check_grad_with_place( - self.place, ['X'], 'Out', no_grad_set=set('Y') + self.place, ['X'], 'Out', no_grad_set=set('Y'), check_pir=True ) @@ -122,7 +125,6 @@ class TestKronLayer(unittest.TestCase): def test_case(self): a = np.random.randn(10, 10).astype(np.float64) b = np.random.randn(10, 10).astype(np.float64) - place = base.CPUPlace() with dg.guard(place): a_var = dg.to_variable(a) @@ -130,23 +132,22 @@ def test_case(self): c_var = paddle.kron(a_var, b_var) np.testing.assert_allclose(c_var.numpy(), np.kron(a, b)) + @test_with_pir_api def test_case_with_output(self): + place = base.CPUPlace() a = np.random.randn(10, 10).astype(np.float64) b = np.random.randn(10, 10).astype(np.float64) - - main = base.Program() - start = base.Program() + out_np = np.kron(a, b) + paddle.enable_static() + prog = paddle.static.Program() with base.unique_name.guard(): - with base.program_guard(main, start): + with paddle.static.program_guard(prog, prog): a_var = paddle.static.data("a", [-1, -1], dtype="float64") b_var = paddle.static.data("b", [-1, -1], dtype="float64") out_var = paddle.kron(a_var, b_var) - - place = base.CPUPlace() - exe = base.Executor(place) - exe.run(start) - (c,) = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var]) - np.testing.assert_allclose(c, np.kron(a, b)) + exe = paddle.static.Executor(place=place) + (res,) = exe.run(prog, feed={'a': a, 'b': b}, fetch_list=[out_var]) + np.testing.assert_allclose(res, out_np) class TestComplexKronOp(OpTest): @@ -179,12 +180,13 @@ def init_input_output(self): self.out = np.kron(self.x, self.y) def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad_normal(self): self.check_grad( ['X', 'Y'], 'Out', + check_pir=True, ) def test_check_grad_ingore_x(self): @@ -192,6 +194,7 @@ def test_check_grad_ingore_x(self): ['Y'], 'Out', no_grad_set=set("X"), + check_pir=True, ) def test_check_grad_ingore_y(self): @@ -199,6 +202,7 @@ def test_check_grad_ingore_y(self): ['X'], 'Out', no_grad_set=set('Y'), + check_pir=True, ) diff --git a/test/legacy_test/test_lerp_op.py b/test/legacy_test/test_lerp_op.py index 9fc3ada88a4e86..99f0838b4aedff 100644 --- a/test/legacy_test/test_lerp_op.py +++ b/test/legacy_test/test_lerp_op.py @@ -19,6 +19,7 @@ import paddle from paddle.base import core +from paddle.pir_utils import test_with_pir_api paddle.enable_static() np.random.seed(0) @@ -52,10 +53,10 @@ def init_wshape(self): self.wshape = [1] def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X', 'Y'], 'Out') + self.check_grad(['X', 'Y'], 'Out', check_pir=True) class TestLerpWithDim2(TestLerp): @@ -139,6 +140,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.place.append(paddle.CUDAPlace(0)) + @test_with_pir_api def test_static_api(self): paddle.enable_static() @@ -268,7 +270,7 @@ def init_grad(self, w): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place) + self.check_output_with_place(place, check_pir=True) def test_check_grad(self): place = core.CUDAPlace(0) @@ -277,6 +279,7 @@ def test_check_grad(self): ['X', 'Y'], 'Out', user_defined_grads=[self.x_grad, self.y_grad], + check_pir=True, )