diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 7360c14a5214c9..a4c80da11957b7 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -712,7 +712,7 @@ def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None): is_test = not training - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.rrelu(x, lower, upper, is_test) else: check_variable_and_dtype( @@ -886,7 +886,7 @@ def maxout(x, groups, axis=1, name=None): [0.42400089, 0.40641287, 0.97020894, 0.74437362], [0.51785129, 0.73292869, 0.97786582, 0.92382854]]]]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.maxout(x, groups, axis) else: check_variable_and_dtype( @@ -1007,7 +1007,7 @@ def selu( f"The alpha must be no less than zero. Received: {alpha}." ) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.selu(x, scale, alpha) else: check_variable_and_dtype( @@ -1533,7 +1533,7 @@ def tanhshrink(x, name=None): Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [-0.02005100, -0.00262472, 0.00033201, 0.00868741]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.tanh_shrink(x) else: check_variable_and_dtype( @@ -1583,7 +1583,7 @@ def thresholded_relu(x, threshold=1.0, name=None): [2., 0., 0.]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.thresholded_relu(x, threshold) else: check_variable_and_dtype( diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index 1e7550f35dd80a..dbf0b1a22974fe 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -1155,7 +1155,10 @@ def setUp(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) + + def test_check_output(self): + self.check_output(check_pir=True) class TestTanhshrink_ZeroDim(TestTanhshrink): @@ -1174,6 +1177,7 @@ def setUp(self): else paddle.CPUPlace() ) + @test_with_pir_api def test_static_api(self): with static_guard(): with paddle.static.program_guard(paddle.static.Program()): @@ -4302,7 +4306,10 @@ def init_shape(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) + + def test_check_output(self): + self.check_output(check_pir=True) class TestThresholdedRelu_ZeroDim(TestThresholdedRelu): @@ -4323,6 +4330,7 @@ def setUp(self): else paddle.CPUPlace() ) + @test_with_pir_api def test_static_api(self): with static_guard(): with paddle.static.program_guard(paddle.static.Program()): @@ -4790,7 +4798,7 @@ def test_check_grad(self): create_test_act_fp16_class( TestTanh, check_prim=True, check_prim_pir=True, enable_cinn=True ) -create_test_act_fp16_class(TestTanhshrink) +create_test_act_fp16_class(TestTanhshrink, check_pir=True) create_test_act_fp16_class(TestHardShrink, check_pir=True) create_test_act_fp16_class(TestSoftshrink, check_pir=True) create_test_act_fp16_class( @@ -4965,7 +4973,7 @@ def test_check_grad(self): create_test_act_bf16_class(TestSilu, check_prim=True, check_prim_pir=True) create_test_act_bf16_class(TestLogSigmoid) create_test_act_bf16_class(TestTanh, check_prim=True, check_prim_pir=True) -create_test_act_bf16_class(TestTanhshrink) +create_test_act_bf16_class(TestTanhshrink, check_pir=True) create_test_act_bf16_class(TestHardShrink, check_pir=True) create_test_act_bf16_class(TestSoftshrink, check_pir=True) create_test_act_bf16_class( diff --git a/test/legacy_test/test_maxout_op.py b/test/legacy_test/test_maxout_op.py index 7f067ef62e0586..4b7f0cbfba6a1d 100644 --- a/test/legacy_test/test_maxout_op.py +++ b/test/legacy_test/test_maxout_op.py @@ -20,6 +20,7 @@ import paddle import paddle.nn.functional as F from paddle.base import core +from paddle.pir_utils import test_with_pir_api paddle.enable_static() np.random.seed(1) @@ -57,10 +58,10 @@ def set_attrs(self): pass def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) class TestMaxOutOpAxis0(TestMaxOutOp): @@ -95,6 +96,7 @@ def setUp(self): else paddle.CPUPlace() ) + @test_with_pir_api def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) @@ -161,6 +163,7 @@ def setUp(self): self.axis = 1 self.place = paddle.CUDAPlace(0) + @test_with_pir_api def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) diff --git a/test/legacy_test/test_rrelu_op.py b/test/legacy_test/test_rrelu_op.py index e3b5e073d719a4..79ab7a65f3e03b 100644 --- a/test/legacy_test/test_rrelu_op.py +++ b/test/legacy_test/test_rrelu_op.py @@ -21,6 +21,7 @@ import paddle.nn.functional as F from paddle import base from paddle.base import core, dygraph +from paddle.pir_utils import test_with_pir_api paddle.seed(102) np.random.seed(102) @@ -56,32 +57,44 @@ def setUp(self): else base.CPUPlace() ] + @test_with_pir_api def check_static_result(self, place): - with base.program_guard(base.Program(), base.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): input = paddle.static.data( name="input", shape=[2, 3, 4, 5], dtype="float32" ) res1 = F.rrelu( x=input, lower=self.lower_0, upper=self.upper_0, training=False ) - res2 = F.rrelu( - x=input, lower=self.lower_1, upper=self.upper_1, training=False - ) in_np = np.random.uniform(-1.0, 1.0, [2, 3, 4, 5]).astype("float32") res_np1 = ref_rrelu(in_np, self.lower_0, self.upper_0) exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input": in_np}, fetch_list=[res1], ) np.testing.assert_allclose(fetches[0], res_np1, rtol=1e-05) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input = paddle.static.data( + name="input", shape=[2, 3, 4, 5], dtype="float32" + ) + + res2 = F.rrelu( + x=input, lower=self.lower_1, upper=self.upper_1, training=False + ) + in_np = np.random.uniform(-1.0, 1.0, [2, 3, 4, 5]).astype("float32") + + exe = base.Executor(place) + res_np2 = ref_rrelu(in_np, self.lower_1, self.upper_1) fetches = exe.run( - base.default_main_program(), feed={"input": in_np}, fetch_list=[res2], ) @@ -91,86 +104,130 @@ def test_static(self): for place in self.places: self.check_static_result(place=place) + @test_with_pir_api def test_static_graph_functional(self): '''test_static_graph_functional''' for place in self.places: - paddle.enable_static() - x_1 = paddle.static.data( - name="x", shape=self.x_np.shape, dtype="float64" - ) - x_2 = paddle.static.data( - name="x2", shape=self.x_np.shape, dtype="float64" - ) - out_1 = F.rrelu(x_1, self.lower_0, self.upper_0, training=False) - out_2 = F.rrelu(x_2, self.lower_1, self.upper_1, training=False) - out_3 = F.rrelu(x_2, self.lower_1, self.upper_1, training=True) - - exe = paddle.static.Executor(place=place) - (res_1,) = exe.run( - base.default_main_program(), - feed={"x": self.x_np}, - fetch_list=out_1, - use_prune=True, - ) - (res_2,) = exe.run( - base.default_main_program(), - feed={"x2": self.x_np}, - fetch_list=out_2, - use_prune=True, - ) - (res_3,) = exe.run( - base.default_main_program(), - feed={"x2": self.x_np}, - fetch_list=out_3, - use_prune=True, - ) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + paddle.enable_static() + x_1 = paddle.static.data( + name="x", shape=self.x_np.shape, dtype="float64" + ) + out_1 = F.rrelu(x_1, self.lower_0, self.upper_0, training=False) - out_ref_1 = ref_rrelu(self.x_np, self.lower_0, self.upper_0) - out_ref_2 = ref_rrelu(self.x_np, self.lower_1, self.upper_1) - np.testing.assert_allclose(out_ref_1, res_1, rtol=1e-05) - np.testing.assert_allclose(out_ref_2, res_2, rtol=1e-05) - self.assertTrue( - check_output(self.x_np, res_3[0], self.lower_1, self.upper_1) - ) + exe = paddle.static.Executor(place=place) + (res_1,) = exe.run( + feed={"x": self.x_np}, + fetch_list=out_1, + use_prune=True, + ) + + out_ref_1 = ref_rrelu(self.x_np, self.lower_0, self.upper_0) + np.testing.assert_allclose(out_ref_1, res_1, rtol=1e-05) + + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + paddle.enable_static() + + x_2 = paddle.static.data( + name="x2", shape=self.x_np.shape, dtype="float64" + ) + out_2 = F.rrelu(x_2, self.lower_1, self.upper_1, training=False) + + exe = paddle.static.Executor(place=place) + + (res_2,) = exe.run( + feed={"x2": self.x_np}, + fetch_list=out_2, + use_prune=True, + ) + + out_ref_2 = ref_rrelu(self.x_np, self.lower_1, self.upper_1) + np.testing.assert_allclose(out_ref_2, res_2, rtol=1e-05) + + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + paddle.enable_static() + + x_2 = paddle.static.data( + name="x2", shape=self.x_np.shape, dtype="float64" + ) + + out_3 = F.rrelu(x_2, self.lower_1, self.upper_1, training=True) + + exe = paddle.static.Executor(place=place) + + (res_3,) = exe.run( + feed={"x2": self.x_np}, + fetch_list=out_3, + use_prune=True, + ) + + self.assertTrue( + check_output( + self.x_np, res_3[0], self.lower_1, self.upper_1 + ) + ) + + @test_with_pir_api def test_static_graph_layer(self): '''test_static_graph_layer''' + paddle.enable_static() for place in self.places: - paddle.enable_static() - x_1 = paddle.static.data( - name="x", shape=self.x_np.shape, dtype="float64" - ) - x_2 = paddle.static.data( - name="x2", shape=self.x_np.shape, dtype="float64" - ) - # init instance - rrelu_1 = paddle.nn.RReLU(self.lower_0, self.upper_0) - rrelu_2 = paddle.nn.RReLU(self.lower_1, self.upper_1) - out_1 = rrelu_1(x_1) - out_2 = rrelu_2(x_2) - - exe = paddle.static.Executor(place=place) - res_1 = exe.run( - base.default_main_program(), - feed={"x": self.x_np}, - fetch_list=out_1, - use_prune=True, - ) - res_2 = exe.run( - base.default_main_program(), - feed={"x2": self.x_np}, - fetch_list=out_2, - use_prune=True, - ) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + x_1 = paddle.static.data( + name="x", shape=self.x_np.shape, dtype="float64" + ) - self.assertTrue( - check_output(self.x_np, res_1[0], self.lower_0, self.upper_0) - ) - self.assertTrue( - check_output(self.x_np, res_2[0], self.lower_1, self.upper_1) - ) + # init instance + rrelu_1 = paddle.nn.RReLU(self.lower_0, self.upper_0) + out_1 = rrelu_1(x_1) + + exe = paddle.static.Executor(place=place) + res_1 = exe.run( + feed={"x": self.x_np}, + fetch_list=out_1, + use_prune=True, + ) + + self.assertTrue( + check_output( + self.x_np, res_1[0], self.lower_0, self.upper_0 + ) + ) + + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + x_2 = paddle.static.data( + name="x2", shape=self.x_np.shape, dtype="float64" + ) + # init instance + rrelu_2 = paddle.nn.RReLU(self.lower_1, self.upper_1) + out_2 = rrelu_2(x_2) + + exe = paddle.static.Executor(place=place) + + res_2 = exe.run( + feed={"x2": self.x_np}, + fetch_list=out_2, + use_prune=True, + ) + + self.assertTrue( + check_output( + self.x_np, res_2[0], self.lower_1, self.upper_1 + ) + ) def dygraph_check(self, lower, upper): for place in self.places: @@ -351,10 +408,10 @@ def convert_input_output(self): pass def test_check_output(self): - self.check_output(no_check_set=['Noise']) + self.check_output(no_check_set=['Noise'], check_pir=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) class RReluTrainingTest(RReluTest): @@ -394,11 +451,13 @@ def convert_input_output(self): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place, no_check_set=['Noise']) + self.check_output_with_place( + place, no_check_set=['Noise'], check_pir=True + ) def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + self.check_grad_with_place(place, ['X'], 'Out', check_pir=True) class RReluTrainingTestFP16OP(RReluTrainingTest): @@ -425,11 +484,13 @@ def convert_input_output(self): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place, no_check_set=['Noise']) + self.check_output_with_place( + place, no_check_set=['Noise'], check_pir=True + ) def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + self.check_grad_with_place(place, ['X'], 'Out', check_pir=True) if __name__ == "__main__": diff --git a/test/legacy_test/test_selu_op.py b/test/legacy_test/test_selu_op.py index 52162111b85d86..4a3c86fba4bf9d 100644 --- a/test/legacy_test/test_selu_op.py +++ b/test/legacy_test/test_selu_op.py @@ -21,6 +21,7 @@ import paddle.nn.functional as F from paddle import base from paddle.base import core +from paddle.pir_utils import test_with_pir_api def ref_selu( @@ -79,10 +80,10 @@ def init_dtype(self): self.dtype = np.float64 def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) class SeluTestFP16OP(SeluTest): @@ -100,10 +101,12 @@ def init_dtype(self): self.dtype = np.uint16 def test_check_output(self): - self.check_output_with_place(core.CUDAPlace(0)) + self.check_output_with_place(core.CUDAPlace(0), check_pir=True) def test_check_grad(self): - self.check_grad_with_place(core.CUDAPlace(0), ['X'], 'Out') + self.check_grad_with_place( + core.CUDAPlace(0), ['X'], 'Out', check_pir=True + ) class TestSeluAPI(unittest.TestCase): @@ -121,6 +124,7 @@ def setUp(self): else paddle.CPUPlace() ) + @test_with_pir_api def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) @@ -144,6 +148,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() + @test_with_pir_api def test_base_api(self): with base.program_guard(base.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)