diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index 7b2b70d7b515f6..93b3f807d53453 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -1491,6 +1491,7 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) + @test_with_pir_api def test_errors(self): with static_guard(): with paddle.static.program_guard(paddle.static.Program()): @@ -1507,10 +1508,11 @@ def test_errors(self): ) self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0) # support the input dtype is float16 - x_fp16 = paddle.static.data( - name='x_fp16', shape=[12, 10], dtype='float16' - ) - F.softshrink(x_fp16) + if core.is_compiled_with_cuda(): + x_fp16 = paddle.static.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) + F.softshrink(x_fp16) class TestSqrt(TestActivation, TestParameter): @@ -4307,6 +4309,7 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) + @test_with_pir_api def test_errors(self): with static_guard(): with paddle.static.program_guard(paddle.static.Program()): @@ -4318,10 +4321,11 @@ def test_errors(self): ) self.assertRaises(TypeError, F.softsign, x_int32) # support the input dtype is float16 - x_fp16 = paddle.static.data( - name='x_fp16', shape=[12, 10], dtype='float16' - ) - F.softsign(x_fp16) + if core.is_compiled_with_cuda(): + x_fp16 = paddle.static.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) + F.softsign(x_fp16) def ref_thresholded_relu(x, threshold=1.0):