Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions test/legacy_test/test_device_guard.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@
import warnings

import paddle
from paddle.base import core
from paddle.base import core, in_pir_mode
from paddle.pir_utils import test_with_pir_api

paddle.enable_static()

Expand Down Expand Up @@ -143,6 +144,7 @@ def test_cpu_only_op(self):

execute(main_program, startup_program)

@test_with_pir_api
def test_without_kernel_op(self):
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
Expand All @@ -158,15 +160,16 @@ def test_without_kernel_op(self):
with while_op.block():
i = paddle.increment(x=i, value=1)
paddle.assign(paddle.less_than(x=i, y=loop_len), cond)

warning = "The Op(while) is not support to set device."
warning_num = get_vaild_warning_num(warning, w)
assert warning_num == 1
if not in_pir_mode():
warning = "The Op(while) is not support to set device."
warning_num = get_vaild_warning_num(warning, w)
assert warning_num == 1

all_ops = main_program.global_block().ops
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
for op in all_ops:
if op.type == 'while':
op_name = op.name() if in_pir_mode() else op.type
if op_name == 'while':
self.assertEqual(op.desc.attr(device_attr_name), "")

execute(main_program, startup_program)
Expand Down
73 changes: 57 additions & 16 deletions test/legacy_test/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@

import paddle
import paddle.nn.functional as F
from paddle import base, core
from paddle.framework import in_dynamic_mode
from paddle.pir_utils import test_with_pir_api

unary_api_list = [
Expand Down Expand Up @@ -2674,6 +2676,7 @@ def test_prelu(self):
self.assertEqual(x2.grad.shape, [])
self.assertEqual(x2.grad.numpy(), 0.25)

@test_with_pir_api
def test_while_loop(self):
def cond(i, x):
return paddle.less_than(i, eleven)
Expand All @@ -2685,20 +2688,43 @@ def body(i, x):

i = paddle.full([], 1.0, dtype='float32')
i.stop_gradient = False
i.persistable = True
eleven = paddle.full([], 11, dtype='float32')
x = paddle.full([], 0.0, dtype='float32')
x.stop_gradient = False
x.persistable = True
out_i, out_x = paddle.static.nn.while_loop(cond, body, [i, x])
out_x.backward()

self.assertEqual(out_i.shape, [])
if in_dynamic_mode():
out_x.backward()
di = i.grad
dx = x.grad
else:
grad_list = paddle.static.append_backward(out_x)
for p, g in grad_list:
if p.is_same(i):
di = g
elif p.is_same(x):
dx = g
place = (
base.CUDAPlace(0)
if core.is_compiled_with_cuda()
else base.CPUPlace()
)
exe = base.Executor(place)
main_program = paddle.static.default_main_program()
out_i, out_x, di, dx = exe.run(
main_program, feed={}, fetch_list=[out_i, out_x, di, dx]
)

self.assertEqual(np.asarray(out_i).shape, ())
np.testing.assert_allclose(out_i, np.array(11))
self.assertEqual(out_x.shape, [])
self.assertEqual(np.asarray(out_x).shape, ())
np.testing.assert_allclose(out_x, np.array(55))
self.assertEqual(i.grad.shape, [])
np.testing.assert_allclose(i.grad, np.array(10))
self.assertEqual(x.grad.shape, [])
np.testing.assert_allclose(x.grad, np.array(1.0))
self.assertEqual(np.asarray(di).shape, ())
np.testing.assert_allclose(di, np.array(10))
self.assertEqual(np.asarray(dx).shape, ())
np.testing.assert_allclose(dx, np.array(1.0))

def test_to_tensor(self):
out1 = paddle.to_tensor(1)
Expand Down Expand Up @@ -5025,6 +5051,7 @@ def test_static_nn_prelu(self):
np.testing.assert_allclose(res[0], np.array(1))
np.testing.assert_allclose(res[1], np.array(1))

@test_with_pir_api
@prog_scope()
def test_while_loop(self):
def cond(i, x):
Expand All @@ -5039,20 +5066,34 @@ def body(i, x):
with paddle.static.program_guard(main_program, paddle.static.Program()):
i = paddle.static.data(name='i', shape=[], dtype='float32')
i.stop_gradient = False
i.persistable = True
eleven = paddle.full([], 11, 'float32')
x = paddle.static.data(name='x', shape=[], dtype='float32')
x.stop_gradient = False
x.persistable = True
out_i, out_x = paddle.static.nn.while_loop(cond, body, [i, x])
paddle.static.append_backward(out_x)
grad_list = paddle.static.append_backward(out_x)

feed = {
'i': np.array(1.0, dtype='float32'),
'x': np.array(0.0, dtype='float32'),
}
if paddle.framework.in_pir_mode():
fetch_list = [out_i, out_x]
for _, g in grad_list:
fetch_list.append(g)
res = self.exe.run(
main_program,
feed=feed,
fetch_list=fetch_list,
)
else:
res = self.exe.run(
main_program,
feed=feed,
fetch_list=[out_i.name, out_x.name, i.grad_name, x.grad_name],
)

res = self.exe.run(
main_program,
feed={
'i': np.array(1.0, dtype='float32'),
'x': np.array(0.0, dtype='float32'),
},
fetch_list=[out_i.name, out_x.name, i.grad_name, x.grad_name],
)
self.assertEqual(res[0].shape, ())
np.testing.assert_allclose(res[0], np.array(11))
self.assertEqual(res[1].shape, ())
Expand Down