diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index 7549b68dc336b0..dd7b24cbf95738 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -1221,6 +1221,7 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) + @test_with_pir_api def test_errors(self): with static_guard(): with paddle.static.program_guard(paddle.static.Program()): @@ -1232,10 +1233,11 @@ def test_errors(self): ) self.assertRaises(TypeError, F.tanhshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.static.data( - name='x_fp16', shape=[12, 10], dtype='float16' - ) - F.tanhshrink(x_fp16) + if paddle.is_compiled_with_cuda(): + x_fp16 = paddle.static.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) + F.tanhshrink(x_fp16) def ref_hardshrink(x, threshold): @@ -4411,6 +4413,7 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) + @test_with_pir_api def test_errors(self): with static_guard(): with paddle.static.program_guard(paddle.static.Program()): @@ -4422,10 +4425,11 @@ def test_errors(self): ) self.assertRaises(TypeError, F.thresholded_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.static.data( - name='x_fp16', shape=[12, 10], dtype='float16' - ) - F.thresholded_relu(x_fp16) + if paddle.is_compiled_with_cuda(): + x_fp16 = paddle.static.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) + F.thresholded_relu(x_fp16) def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5): @@ -4642,6 +4646,7 @@ def test_base_api(self): out_ref = ref_swish(self.x_np) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) + @test_with_pir_api def test_errors(self): with static_guard(): with paddle.static.program_guard(paddle.static.Program()): @@ -4653,10 +4658,11 @@ def test_errors(self): ) self.assertRaises(TypeError, F.swish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.static.data( - name='x_fp16', shape=[12, 10], dtype='float16' - ) - F.swish(x_fp16) + if paddle.is_compiled_with_cuda(): + x_fp16 = paddle.static.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) + F.swish(x_fp16) def ref_mish(x, threshold=20.0): @@ -4943,7 +4949,7 @@ def test_check_grad(self): create_test_act_fp16_class(TestSTanh) create_test_act_fp16_class(TestSoftplus, check_pir=True) create_test_act_fp16_class(TestSoftsign, check_pir=True) -create_test_act_fp16_class(TestThresholdedRelu) +create_test_act_fp16_class(TestThresholdedRelu, check_pir=True) create_test_act_fp16_class(TestHardSigmoid, check_pir=True) create_test_act_fp16_class(TestSwish) create_test_act_fp16_class(TestHardSwish, check_prim=True, check_pir=True) @@ -5112,7 +5118,7 @@ def test_check_grad(self): create_test_act_bf16_class(TestSTanh) create_test_act_bf16_class(TestSoftplus, check_pir=True) create_test_act_bf16_class(TestSoftsign, check_pir=True) -create_test_act_bf16_class(TestThresholdedRelu) +create_test_act_bf16_class(TestThresholdedRelu, check_pir=True) create_test_act_bf16_class(TestHardSigmoid, check_pir=True) create_test_act_bf16_class(TestSwish) create_test_act_bf16_class(TestHardSwish, check_prim=True, check_pir=True) diff --git a/test/legacy_test/test_temporal_shift_op.py b/test/legacy_test/test_temporal_shift_op.py index bb29055bfc9463..10065d471d1c45 100644 --- a/test/legacy_test/test_temporal_shift_op.py +++ b/test/legacy_test/test_temporal_shift_op.py @@ -173,6 +173,7 @@ def test_static_fp16_gpu(self): fetch_list=[y], ) + @test_with_pir_api def test_error(self): def attr_data_format(): input = paddle.randn([6, 4, 2, 2]) diff --git a/test/legacy_test/test_triangular_solve_op.py b/test/legacy_test/test_triangular_solve_op.py index d4aecda8780ce5..d813292f9590b1 100644 --- a/test/legacy_test/test_triangular_solve_op.py +++ b/test/legacy_test/test_triangular_solve_op.py @@ -23,6 +23,7 @@ import paddle from paddle import base from paddle.base import Program, core, program_guard +from paddle.pir_utils import test_with_pir_api paddle.enable_static() @@ -749,7 +750,9 @@ def setUp(self): self.place.append(paddle.CUDAPlace(0)) def check_static_result(self, place): - with base.program_guard(base.Program(), base.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.static.data(name="x", shape=[3, 3], dtype=self.dtype) y = paddle.static.data(name="y", shape=[3, 2], dtype=self.dtype) z = paddle.linalg.triangular_solve(x, y) @@ -760,12 +763,13 @@ def check_static_result(self, place): exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), + paddle.static.default_main_program(), feed={"x": x_np, "y": y_np}, fetch_list=[z], ) np.testing.assert_allclose(fetches[0], z_np, rtol=1e-05) + @test_with_pir_api def test_static(self): for place in self.place: self.check_static_result(place=place) @@ -790,7 +794,7 @@ def run(place): class TestTriangularSolveOpError(unittest.TestCase): - def test_errors(self): + def test_errors1(self): with program_guard(Program(), Program()): # The input type of solve_op must be Variable. x1 = base.create_lod_tensor( @@ -801,6 +805,11 @@ def test_errors(self): ) self.assertRaises(TypeError, paddle.linalg.triangular_solve, x1, y1) + @test_with_pir_api + def test_errors2(self): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): # The data type of input must be float32 or float64. x2 = paddle.static.data(name="x2", shape=[30, 30], dtype="bool") y2 = paddle.static.data(name="y2", shape=[30, 10], dtype="bool")