Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 20 additions & 14 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1221,6 +1221,7 @@ def test_dygraph_api(self):
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

@test_with_pir_api
def test_errors(self):
with static_guard():
with paddle.static.program_guard(paddle.static.Program()):
Expand All @@ -1232,10 +1233,11 @@ def test_errors(self):
)
self.assertRaises(TypeError, F.tanhshrink, x_int32)
# support the input dtype is float16
x_fp16 = paddle.static.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
F.tanhshrink(x_fp16)
if paddle.is_compiled_with_cuda():
x_fp16 = paddle.static.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
F.tanhshrink(x_fp16)


def ref_hardshrink(x, threshold):
Expand Down Expand Up @@ -4411,6 +4413,7 @@ def test_dygraph_api(self):
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)

@test_with_pir_api
def test_errors(self):
with static_guard():
with paddle.static.program_guard(paddle.static.Program()):
Expand All @@ -4422,10 +4425,11 @@ def test_errors(self):
)
self.assertRaises(TypeError, F.thresholded_relu, x_int32)
# support the input dtype is float16
x_fp16 = paddle.static.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
F.thresholded_relu(x_fp16)
if paddle.is_compiled_with_cuda():
x_fp16 = paddle.static.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
F.thresholded_relu(x_fp16)


def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
Expand Down Expand Up @@ -4642,6 +4646,7 @@ def test_base_api(self):
out_ref = ref_swish(self.x_np)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

@test_with_pir_api
def test_errors(self):
with static_guard():
with paddle.static.program_guard(paddle.static.Program()):
Expand All @@ -4653,10 +4658,11 @@ def test_errors(self):
)
self.assertRaises(TypeError, F.swish, x_int32)
# support the input dtype is float16
x_fp16 = paddle.static.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
F.swish(x_fp16)
if paddle.is_compiled_with_cuda():
x_fp16 = paddle.static.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
F.swish(x_fp16)


def ref_mish(x, threshold=20.0):
Expand Down Expand Up @@ -4943,7 +4949,7 @@ def test_check_grad(self):
create_test_act_fp16_class(TestSTanh)
create_test_act_fp16_class(TestSoftplus, check_pir=True)
create_test_act_fp16_class(TestSoftsign, check_pir=True)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestThresholdedRelu, check_pir=True)
create_test_act_fp16_class(TestHardSigmoid, check_pir=True)
create_test_act_fp16_class(TestSwish)
create_test_act_fp16_class(TestHardSwish, check_prim=True, check_pir=True)
Expand Down Expand Up @@ -5112,7 +5118,7 @@ def test_check_grad(self):
create_test_act_bf16_class(TestSTanh)
create_test_act_bf16_class(TestSoftplus, check_pir=True)
create_test_act_bf16_class(TestSoftsign, check_pir=True)
create_test_act_bf16_class(TestThresholdedRelu)
create_test_act_bf16_class(TestThresholdedRelu, check_pir=True)
create_test_act_bf16_class(TestHardSigmoid, check_pir=True)
create_test_act_bf16_class(TestSwish)
create_test_act_bf16_class(TestHardSwish, check_prim=True, check_pir=True)
Expand Down
1 change: 1 addition & 0 deletions test/legacy_test/test_temporal_shift_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ def test_static_fp16_gpu(self):
fetch_list=[y],
)

@test_with_pir_api
def test_error(self):
def attr_data_format():
input = paddle.randn([6, 4, 2, 2])
Expand Down
15 changes: 12 additions & 3 deletions test/legacy_test/test_triangular_solve_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import paddle
from paddle import base
from paddle.base import Program, core, program_guard
from paddle.pir_utils import test_with_pir_api

paddle.enable_static()

Expand Down Expand Up @@ -749,7 +750,9 @@ def setUp(self):
self.place.append(paddle.CUDAPlace(0))

def check_static_result(self, place):
with base.program_guard(base.Program(), base.Program()):
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x = paddle.static.data(name="x", shape=[3, 3], dtype=self.dtype)
y = paddle.static.data(name="y", shape=[3, 2], dtype=self.dtype)
z = paddle.linalg.triangular_solve(x, y)
Expand All @@ -760,12 +763,13 @@ def check_static_result(self, place):

exe = base.Executor(place)
fetches = exe.run(
base.default_main_program(),
paddle.static.default_main_program(),
feed={"x": x_np, "y": y_np},
fetch_list=[z],
)
np.testing.assert_allclose(fetches[0], z_np, rtol=1e-05)

@test_with_pir_api
def test_static(self):
for place in self.place:
self.check_static_result(place=place)
Expand All @@ -790,7 +794,7 @@ def run(place):


class TestTriangularSolveOpError(unittest.TestCase):
def test_errors(self):
def test_errors1(self):
with program_guard(Program(), Program()):
# The input type of solve_op must be Variable.
x1 = base.create_lod_tensor(
Expand All @@ -801,6 +805,11 @@ def test_errors(self):
)
self.assertRaises(TypeError, paddle.linalg.triangular_solve, x1, y1)

@test_with_pir_api
def test_errors2(self):
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
# The data type of input must be float32 or float64.
x2 = paddle.static.data(name="x2", shape=[30, 30], dtype="bool")
y2 = paddle.static.data(name="y2", shape=[30, 10], dtype="bool")
Expand Down