Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -326,12 +326,7 @@
forward : cast (Tensor x, DataType out_dtype) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : cast_grad
data_type : out_grad
invoke : cast (out_grad, x.dtype())
no_need_buffer : x

- backward_op : ceil_grad
Expand Down Expand Up @@ -2108,6 +2103,12 @@
optional : grad_grad_out_grad
inplace : (grad_grad_x -> fwd_grad_out_grad)

- backward_op : sign_grad
forward : sign (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
invoke : scale(out_grad, 0.0, 0.0, true)

- backward_op : silu_grad
forward : silu (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/api/yaml/legacy_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2377,6 +2377,7 @@
func : UnchangedInferMeta
kernel :
func : sign
backward : sign_grad

- op : silu
args : (Tensor x)
Expand Down
20 changes: 18 additions & 2 deletions python/paddle/fluid/tests/unittests/gradient_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,9 @@ def fail_test(msg):
for v in x:
v.stop_gradient = False
v.persistable = True
for u in y:
u.stop_gradient = False
u.persistable = True
if place is None:
place = fluid.CPUPlace()
if program is None:
Expand Down Expand Up @@ -364,6 +367,9 @@ def double_grad_check(x,
v.stop_gradient = False
v.persistable = True
y = _as_list(y)
for u in y:
u.stop_gradient = False
u.persistable = True

if program is None:
program = fluid.default_main_program()
Expand Down Expand Up @@ -445,6 +451,9 @@ def triple_grad_check(x,
v.stop_gradient = False
v.persistable = True
y = _as_list(y)
for u in y:
u.stop_gradient = False
u.persistable = True

if program is None:
program = fluid.default_main_program()
Expand Down Expand Up @@ -578,6 +587,9 @@ def get_static_double_grad(x,
for v in x:
v.stop_gradient = False
v.persistable = True
for u in y:
u.stop_gradient = False
u.persistable = True
if place is None:
place = fluid.CPUPlace()
if program is None:
Expand Down Expand Up @@ -736,7 +748,9 @@ def fail_test(msg):
v.stop_gradient = False
v.persistable = True
y = _as_list(y)

for u in y:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fix this in right way

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Due to the inconsistency of the stop_gradient behavior of dynamic graphs and static graphs,we should consider refactoring the dynamic graph unit test framework in the future.

u.stop_gradient = False
u.persistable = True
y_grads_init = []
for yi in y:
np_type = dtype_to_np_dtype(yi.dtype)
Expand Down Expand Up @@ -903,7 +917,9 @@ def fail_test(msg):
v.stop_gradient = False
v.persistable = True
y = _as_list(y)

for u in y:
u.stop_gradient = False
u.persistable = True
y_grads_init = []
for yi in y:
np_type = dtype_to_np_dtype(yi.dtype)
Expand Down
77 changes: 77 additions & 0 deletions python/paddle/fluid/tests/unittests/test_cast_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
from paddle.fluid import compiler, Program, program_guard
from op_test import OpTest, convert_uint16_to_float, convert_float_to_uint16
from paddle.fluid.framework import _test_eager_guard
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers


class TestCastOpFp32ToFp64(OpTest):
Expand Down Expand Up @@ -137,6 +140,80 @@ def test_eager(self):
self.assertTrue(x.gradient().dtype == np.float16)


class TestCastDoubleGradCheck(unittest.TestCase):

def cast_wrapper(self, x):
return paddle.cast(x[0], 'float64')

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32

data = layers.data('data', [2, 3, 4], False, dtype)
data.persistable = True
out = paddle.cast(data, 'float64')
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

gradient_checker.double_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(self.cast_wrapper,
[data],
out,
x_init=[data_arr],
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


class TestCastTripleGradCheck(unittest.TestCase):

def cast_wrapper(self, x):
return paddle.cast(x[0], 'float64')

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32

data = layers.data('data', [2, 3, 4], False, dtype)
data.persistable = True
out = paddle.cast(data, 'float64')
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

gradient_checker.triple_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(self.cast_wrapper,
[data],
out,
x_init=[data_arr],
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


if __name__ == '__main__':
paddle.enable_static()
unittest.main()
78 changes: 78 additions & 0 deletions python/paddle/fluid/tests/unittests/test_sign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,11 @@
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers


class TestSignOp(OpTest):
Expand Down Expand Up @@ -91,6 +95,80 @@ def test_static(self):
paddle.sign(input4)


class TestSignDoubleGradCheck(unittest.TestCase):

def sign_wrapper(self, x):
return paddle.sign(x[0])

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32

data = layers.data('data', [1, 4], False, dtype)
data.persistable = True
out = paddle.sign(data)
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

gradient_checker.double_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(self.sign_wrapper,
[data],
out,
x_init=[data_arr],
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


class TestSignTripleGradCheck(unittest.TestCase):

def sign_wrapper(self, x):
return paddle.sign(x[0])

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32

data = layers.data('data', [1, 4], False, dtype)
data.persistable = True
out = paddle.sign(data)
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

gradient_checker.triple_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(self.sign_wrapper,
[data],
out,
x_init=[data_arr],
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


if __name__ == "__main__":
paddle.enable_static()
unittest.main()