diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 5faba8b2f31310..3a2e31f8c97254 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -22,7 +22,11 @@ from paddle.utils import deprecated from ...base.data_feeder import check_variable_and_dtype -from ...base.framework import _current_expected_place, in_pir_mode +from ...base.framework import ( + _current_expected_place, + in_dynamic_or_pir_mode, + in_pir_mode, +) from ...base.layer_helper import LayerHelper from ...common_ops_import import Variable from ...tensor.manipulation import reshape @@ -267,7 +271,7 @@ def base_softmax_with_cross_entropy( ) if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=axis) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): softmax, loss = _C_ops.cross_entropy_with_softmax( logits, label, diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 86a2188c332255..c136fac33c255d 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -1466,7 +1466,7 @@ def cross(x, y, axis=9, name=None): [0., 0., 0.], [0., 0., 0.]]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): axis = K_DEFAULT_DIM if axis is None else axis return _C_ops.cross(x, y, axis) else: diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 40856399238ae2..e659136ec4f16f 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -780,10 +780,16 @@ def crop(x, shape=None, offsets=None, name=None): x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor' ) check_type( - shape, 'shape', (list, tuple, Variable, type(None)), 'crop_tensor' + shape, + 'shape', + (list, tuple, Variable, type(None), paddle.pir.OpResult), + 'crop_tensor', ) check_type( - offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor' + offsets, + 'offsets', + (list, tuple, Variable, type(None), paddle.pir.OpResult), + 'crop_tensor', ) if offsets is None: @@ -792,7 +798,7 @@ def crop(x, shape=None, offsets=None, name=None): if shape is None: shape = x.shape - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.crop(x, shape, offsets) out = helper.create_variable_for_type_inference(x.dtype) diff --git a/test/legacy_test/test_crop_tensor_op.py b/test/legacy_test/test_crop_tensor_op.py index 8d9743a55171c4..ab8a6466d97a52 100644 --- a/test/legacy_test/test_crop_tensor_op.py +++ b/test/legacy_test/test_crop_tensor_op.py @@ -81,10 +81,10 @@ def initTestCase(self): self.offsets = [1, 2] def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad_normal(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) class TestCase1(TestCropTensorOp): @@ -182,10 +182,10 @@ def initTestCase(self): self.shape_attr = [0, 0] def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad_normal(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_pir=True) class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr): diff --git a/test/legacy_test/test_cross_op.py b/test/legacy_test/test_cross_op.py index cd13ea10f45106..6aeab30d6c42f7 100644 --- a/test/legacy_test/test_cross_op.py +++ b/test/legacy_test/test_cross_op.py @@ -19,7 +19,8 @@ import paddle from paddle import base -from paddle.base import Program, core, program_guard +from paddle.base import core +from paddle.pir_utils import test_with_pir_api class TestCrossOp(OpTest): @@ -47,10 +48,10 @@ def init_output(self): self.outputs = {'Out': np.array(z_list).reshape(self.shape)} def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out') + self.check_grad(['X', 'Y'], 'Out', check_pir=True) class TestCrossOpCase1(TestCrossOp): @@ -116,13 +117,15 @@ def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_bfloat16_supported(place): - self.check_output_with_place(place) + self.check_output_with_place(place, check_pir=True) def test_check_grad_normal(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_bfloat16_supported(place): - self.check_grad_with_place(place, ['X', 'Y'], 'Out') + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', check_pir=True + ) class TestCrossAPI(unittest.TestCase): @@ -134,18 +137,22 @@ def input_data(self): [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]] ).astype('float32') + @test_with_pir_api def test_cross_api(self): self.input_data() + main = paddle.static.Program() + startup = paddle.static.Program() # case 1: - with program_guard(Program(), Program()): + with paddle.static.program_guard(main, startup): x = paddle.static.data(name='x', shape=[-1, 3], dtype="float32") y = paddle.static.data(name='y', shape=[-1, 3], dtype="float32") z = paddle.cross(x, y, axis=1) exe = base.Executor(base.CPUPlace()) (res,) = exe.run( + main, feed={'x': self.data_x, 'y': self.data_y}, - fetch_list=[z.name], + fetch_list=[z], return_numpy=False, ) expect_out = np.array( @@ -153,15 +160,18 @@ def test_cross_api(self): ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) + main = paddle.static.Program() + startup = paddle.static.Program() # case 2: - with program_guard(Program(), Program()): + with paddle.static.program_guard(main, startup): x = paddle.static.data(name='x', shape=[-1, 3], dtype="float32") y = paddle.static.data(name='y', shape=[-1, 3], dtype="float32") z = paddle.cross(x, y) exe = base.Executor(base.CPUPlace()) (res,) = exe.run( + main, feed={'x': self.data_x, 'y': self.data_y}, - fetch_list=[z.name], + fetch_list=[z], return_numpy=False, ) expect_out = np.array( @@ -169,8 +179,14 @@ def test_cross_api(self): ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) - # case 3: - with program_guard(Program(), Program()): + def test_cross_api1(self): + self.input_data() + + main = paddle.static.Program() + startup = paddle.static.Program() + + # case 1: + with paddle.static.program_guard(main, startup): x = paddle.static.data(name="x", shape=[-1, 3], dtype="float32") y = paddle.static.data(name='y', shape=[-1, 3], dtype='float32') diff --git a/test/legacy_test/test_softmax_with_cross_entropy_op.py b/test/legacy_test/test_softmax_with_cross_entropy_op.py index 0515c7c78e9d70..e2d512707e57de 100644 --- a/test/legacy_test/test_softmax_with_cross_entropy_op.py +++ b/test/legacy_test/test_softmax_with_cross_entropy_op.py @@ -153,27 +153,30 @@ def setUp(self): def test_check_output(self): if self.python_api is not None: - self.check_output() - self.check_output() + self.check_output(check_pir=True) + self.check_output(check_pir=True) def test_check_grad(self): if core.is_compiled_with_rocm(): if self.python_api is not None: self.check_grad( - ["Logits"], - "Loss", - max_relative_error=5e-1, + ["Logits"], "Loss", max_relative_error=5e-1, check_pir=False ) # HIP will have accuracy fail when using float32 in CPU place - self.check_grad(["Logits"], "Loss", max_relative_error=5e-1) + self.check_grad( + ["Logits"], "Loss", max_relative_error=5e-1, check_pir=False + ) else: if self.python_api is not None: self.check_grad( ["Logits"], "Loss", numeric_grad_delta=0.001, + check_pir=False, ) - self.check_grad(["Logits"], "Loss", numeric_grad_delta=0.001) + self.check_grad( + ["Logits"], "Loss", numeric_grad_delta=0.001, check_pir=False + ) class TestSoftmaxWithCrossEntropyOpInt32(TestSoftmaxWithCrossEntropyOp): @@ -509,13 +512,15 @@ def setUp(self): def test_check_output(self): if self.python_api is not None: - self.check_output() - self.check_output() + self.check_output(check_pir=True) + self.check_output(check_pir=True) def test_check_grad(self): if self.python_api is not None: - self.check_grad(["Logits"], "Loss") - self.check_grad(["Logits"], "Loss", max_relative_error=0.1) + self.check_grad(["Logits"], "Loss", check_pir=False) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.1, check_pir=False + ) class TestSoftmaxWithCrossEntropyOpNoCudnnFp16( @@ -534,8 +539,12 @@ def initParams(self): def test_check_grad(self): if self.python_api is not None: - self.check_grad(["Logits"], "Loss", max_relative_error=0.1) - self.check_grad(["Logits"], "Loss", max_relative_error=0.1) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.1, check_pir=False + ) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.1, check_pir=False + ) class TestSoftmaxWithCrossEntropyOp2(TestSoftmaxWithCrossEntropyOp): @@ -557,19 +566,23 @@ def initParams(self): def test_check_output(self): if self.python_api is not None: - self.check_output() - self.check_output() + self.check_output(check_pir=True) + self.check_output(check_pir=True) def test_check_grad(self): if core.is_compiled_with_rocm(): # HIP will have accuracy fail when using float32 in CPU place if self.python_api is not None: - self.check_grad(["Logits"], "Loss", max_relative_error=0.1) - self.check_grad(["Logits"], "Loss", max_relative_error=0.1) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.1, check_pir=False + ) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.1, check_pir=False + ) else: if self.python_api is not None: - self.check_grad(["Logits"], "Loss") - self.check_grad(["Logits"], "Loss") + self.check_grad(["Logits"], "Loss", check_pir=False) + self.check_grad(["Logits"], "Loss", check_pir=False) class TestSoftmaxWithCrossEntropyOp3(TestSoftmaxWithCrossEntropyOp):