diff --git a/test/dygraph_to_static/test_lac.py b/test/dygraph_to_static/test_lac.py index 62d842a3ed6c65..0eb17e85152642 100644 --- a/test/dygraph_to_static/test_lac.py +++ b/test/dygraph_to_static/test_lac.py @@ -22,7 +22,7 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "2" -from dygraph_to_static_utils import Dy2StTestBase +from dygraph_to_static_utils import Dy2StTestBase, enable_to_static_guard import paddle from paddle import _legacy_C_ops, base @@ -531,7 +531,6 @@ def setUp(self): self.dy_param_path = os.path.join(self.temp_dir.name, 'lac_dy_param') def train(self, args, to_static): - paddle.jit.enable_to_static(to_static) place = ( base.CUDAPlace(0) if base.is_compiled_with_cuda() @@ -580,7 +579,9 @@ def train(self, args, to_static): num_label_chunks, num_correct_chunks, ) = chunk_eval( - input=crf_decode, label=targets, seq_length=length + input=crf_decode, + label=targets, + seq_length=length, ) outputs = [avg_cost, precision, recall, f1_score] avg_cost, precision, recall, f1_score = ( @@ -619,9 +620,13 @@ def train(self, args, to_static): return np.array(loss_data) + def _train(self, to_static: bool): + with enable_to_static_guard(to_static): + self.train(self.args, to_static) + def test_train(self): - st_out = self.train(self.args, to_static=True) - dy_out = self.train(self.args, to_static=False) + st_out = self._train(to_static=True) + dy_out = self._train(to_static=False) np.testing.assert_allclose( dy_out, st_out, @@ -645,19 +650,21 @@ def verify_predict(self): def predict_dygraph(self, batch): words, targets, length = batch - paddle.jit.enable_to_static(False) - with base.dygraph.guard(self.place): - model = LexNet(self.args) - # load dygraph trained parameters - model_dict = paddle.load(self.dy_param_path + ".pdparams") - model.set_dict(model_dict) - model.eval() - - _, pred_res = model( - to_variable(words), to_variable(targets), to_variable(length) - ) + with enable_to_static_guard(False): + with base.dygraph.guard(self.place): + model = LexNet(self.args) + # load dygraph trained parameters + model_dict = paddle.load(self.dy_param_path + ".pdparams") + model.set_dict(model_dict) + model.eval() + + _, pred_res = model( + to_variable(words), + to_variable(targets), + to_variable(length), + ) - return pred_res.numpy() + return pred_res.numpy() def predict_static(self, batch): """ diff --git a/test/legacy_test/test_base_layer.py b/test/legacy_test/test_base_layer.py index 28b8d0cac762fd..e19a3b1ced2cfa 100644 --- a/test/legacy_test/test_base_layer.py +++ b/test/legacy_test/test_base_layer.py @@ -12,15 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import unittest import numpy as np import paddle from paddle import base -from paddle.base.dygraph import to_variable from paddle.base.framework import EagerParamBase +sys.path.append("../dygraph_to_static") +from dygraph_to_static_utils import enable_to_static_guard + class L1(paddle.nn.Layer): def __init__(self): @@ -61,70 +64,67 @@ def forward(self): class TestBaseLayer(unittest.TestCase): def test_one_level(self): - with base.dygraph.guard(): - l = L1() - ret = l() - expected_names = ['l1.w1', 'l1.w2'] - idx = 0 - for name, _ in l.named_parameters(prefix='l1'): - self.assertEqual(name, expected_names[idx]) - idx += 1 - np.testing.assert_allclose( - ret.numpy(), 0.2 * np.ones([2, 2]), rtol=1e-05 - ) + l = L1() + ret = l() + expected_names = ['l1.w1', 'l1.w2'] + idx = 0 + for name, _ in l.named_parameters(prefix='l1'): + self.assertEqual(name, expected_names[idx]) + idx += 1 + np.testing.assert_allclose( + ret.numpy(), 0.2 * np.ones([2, 2]), rtol=1e-05 + ) def test_three_level(self): - with base.dygraph.guard(): - l = L3() - expected_names = [ - 'l3.layer1.layer1.w1', - 'l3.layer1.layer1.w2', - 'l3.layer1.layer2.w1', - 'l3.layer1.layer2.w2', - 'l3.layer2.layer1.w1', - 'l3.layer2.layer1.w2', - 'l3.layer2.layer2.w1', - 'l3.layer2.layer2.w2', - ] - idx = 0 - for name, _ in l.named_parameters(prefix='l3'): - self.assertEqual(name, expected_names[idx]) - idx += 1 - ret = l() - np.testing.assert_allclose( - ret.numpy(), 0.8 * np.ones([2, 2]), rtol=1e-05 - ) + l = L3() + expected_names = [ + 'l3.layer1.layer1.w1', + 'l3.layer1.layer1.w2', + 'l3.layer1.layer2.w1', + 'l3.layer1.layer2.w2', + 'l3.layer2.layer1.w1', + 'l3.layer2.layer1.w2', + 'l3.layer2.layer2.w1', + 'l3.layer2.layer2.w2', + ] + idx = 0 + for name, _ in l.named_parameters(prefix='l3'): + self.assertEqual(name, expected_names[idx]) + idx += 1 + ret = l() + np.testing.assert_allclose( + ret.numpy(), 0.8 * np.ones([2, 2]), rtol=1e-05 + ) def test_add_parameter_with_error(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - param = net.create_parameter(shape=[1]) + net = paddle.nn.Layer() + param = net.create_parameter(shape=[1]) - with self.assertRaises(TypeError): - net.add_parameter(10, param) + with self.assertRaises(TypeError): + net.add_parameter(10, param) - with self.assertRaises(KeyError): - net.add_parameter("param.name", param) + with self.assertRaises(KeyError): + net.add_parameter("param.name", param) - with self.assertRaises(KeyError): - net.add_parameter("", param) + with self.assertRaises(KeyError): + net.add_parameter("", param) - with self.assertRaises(KeyError): - net.test_param = 10 - net.add_parameter("test_param", param) + with self.assertRaises(KeyError): + net.test_param = 10 + net.add_parameter("test_param", param) - with self.assertRaises(TypeError): - net.add_parameter("no_param", 10) + with self.assertRaises(TypeError): + net.add_parameter("no_param", 10) - load_param = net.create_parameter(shape=[1]) - net._loaddict_holder[load_param.name] = load_param - net.add_parameter("load_param", load_param) + load_param = net.create_parameter(shape=[1]) + net._loaddict_holder[load_param.name] = load_param + net.add_parameter("load_param", load_param) class BufferLayer(paddle.nn.Layer): def __init__(self): super().__init__() - buffer_var = to_variable(np.zeros([2, 4]).astype('int32')) + buffer_var = paddle.to_tensor(np.zeros([2, 4]).astype('int32')) self.register_buffer("layer_buffer", buffer_var) def forward(self): @@ -138,10 +138,10 @@ def __init__(self): self.w1 = self.create_parameter( shape=[2, 2], dtype='float32', is_bias=False ) - buffer_var = to_variable(np.ones([2, 4]).astype('int32')) + buffer_var = paddle.to_tensor(np.ones([2, 4]).astype('int32')) self.register_buffer("net_buffer", buffer_var) - self.new_buffer = to_variable(np.ones([4, 2]).astype('int32')) + self.new_buffer = paddle.to_tensor(np.ones([4, 2]).astype('int32')) def forward(self): pass @@ -152,156 +152,145 @@ def test_buffers_and_named_buffers(self): def names(named_buffers): return [name for name, _ in named_buffers] - with base.dygraph.guard(): - layer = BufferLayer() - net = BufferNet() + layer = BufferLayer() + net = BufferNet() - self.assertEqual(len(layer.buffers()), 1) - self.assertEqual(names(layer.named_buffers()), ['layer_buffer']) + self.assertEqual(len(layer.buffers()), 1) + self.assertEqual(names(layer.named_buffers()), ['layer_buffer']) - self.assertEqual(len(net.buffers()), 3) - self.assertEqual( - names(net.named_buffers()), - ['net_buffer', 'new_buffer', 'buffer_layer.layer_buffer'], - ) + self.assertEqual(len(net.buffers()), 3) + self.assertEqual( + names(net.named_buffers()), + ['net_buffer', 'new_buffer', 'buffer_layer.layer_buffer'], + ) - self.assertEqual(len(net.buffers(include_sublayers=False)), 2) - self.assertEqual( - names(net.named_buffers(include_sublayers=False)), - ['net_buffer', 'new_buffer'], - ) + self.assertEqual(len(net.buffers(include_sublayers=False)), 2) + self.assertEqual( + names(net.named_buffers(include_sublayers=False)), + ['net_buffer', 'new_buffer'], + ) def test_register_buffer_with_error(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - var = to_variable(np.zeros([1])) - - with self.assertRaisesRegex( - TypeError, "name of buffer should be a string" - ): - net.register_buffer(12, var) - - with self.assertRaisesRegex( - TypeError, "buffer should be a Paddle.Tensor" - ): - net.register_buffer( - "buffer_name", EagerParamBase([2, 2], 'float32') - ) + net = paddle.nn.Layer() + var = paddle.to_tensor(np.zeros([1])) + + with self.assertRaisesRegex( + TypeError, "name of buffer should be a string" + ): + net.register_buffer(12, var) + + with self.assertRaisesRegex( + TypeError, "buffer should be a Paddle.Tensor" + ): + net.register_buffer( + "buffer_name", EagerParamBase([2, 2], 'float32') + ) - with self.assertRaisesRegex( - KeyError, "name of buffer can not contain" - ): - net.register_buffer("buffer.name", var) + with self.assertRaisesRegex(KeyError, "name of buffer can not contain"): + net.register_buffer("buffer.name", var) - with self.assertRaisesRegex( - KeyError, "name of buffer can not be empty" - ): - net.register_buffer("", var) + with self.assertRaisesRegex( + KeyError, "name of buffer can not be empty" + ): + net.register_buffer("", var) - net.attr_name = 10 - with self.assertRaisesRegex(KeyError, "already exists"): - net.register_buffer("attr_name", var) + net.attr_name = 10 + with self.assertRaisesRegex(KeyError, "already exists"): + net.register_buffer("attr_name", var) - del net.attr_name - net.attr_name = EagerParamBase([2, 2], 'float32') - with self.assertRaisesRegex(KeyError, "already exists"): - net.register_buffer("attr_name", var) + del net.attr_name + net.attr_name = EagerParamBase([2, 2], 'float32') + with self.assertRaisesRegex(KeyError, "already exists"): + net.register_buffer("attr_name", var) def test_register_buffer_same_name(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - var1 = to_variable(np.zeros([1])) - var2 = to_variable(np.zeros([2])) - var3 = to_variable(np.zeros([3])) - - net.register_buffer("buffer_name", var1) - self.assert_var_base_equal(net.buffer_name, var1) - net.register_buffer("buffer_name", var2) - self.assert_var_base_equal(net.buffer_name, var2) - net.register_buffer("buffer_name", var3) - self.assert_var_base_equal(net.buffer_name, var3) + net = paddle.nn.Layer() + var1 = paddle.to_tensor(np.zeros([1])) + var2 = paddle.to_tensor(np.zeros([2])) + var3 = paddle.to_tensor(np.zeros([3])) + + net.register_buffer("buffer_name", var1) + self.assert_var_base_equal(net.buffer_name, var1) + net.register_buffer("buffer_name", var2) + self.assert_var_base_equal(net.buffer_name, var2) + net.register_buffer("buffer_name", var3) + self.assert_var_base_equal(net.buffer_name, var3) def test_buffer_not_persistable(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - var1 = to_variable(np.zeros([1])) + net = paddle.nn.Layer() + var1 = paddle.to_tensor(np.zeros([1])) - net.register_buffer("buffer_name", var1, persistable=False) - self.assertEqual(len(net.buffers()), 1) - self.assertEqual(len(net.state_dict()), 0) + net.register_buffer("buffer_name", var1, persistable=False) + self.assertEqual(len(net.buffers()), 1) + self.assertEqual(len(net.state_dict()), 0) def test_buffer_not_persistable_del(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - var1 = to_variable(np.zeros([1])) - net.register_buffer("buffer_name", var1, persistable=False) - del net.buffer_name - self.assertEqual(len(net.buffers()), 0) + net = paddle.nn.Layer() + var1 = paddle.to_tensor(np.zeros([1])) + net.register_buffer("buffer_name", var1, persistable=False) + del net.buffer_name + self.assertEqual(len(net.buffers()), 0) def test_buffer_not_persistable_overwrite(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - var1 = to_variable(np.zeros([1])) - var2 = to_variable(np.zeros([2])) - net.register_buffer("buffer_name", var1, persistable=False) - net.register_buffer("buffer_name", var2) + net = paddle.nn.Layer() + var1 = paddle.to_tensor(np.zeros([1])) + var2 = paddle.to_tensor(np.zeros([2])) + net.register_buffer("buffer_name", var1, persistable=False) + net.register_buffer("buffer_name", var2) - # Allow to overwrite a non-persistable buffer with a persistable var. - self.assertEqual(len(net.buffers()), 1) - self.assertEqual(len(net.state_dict()), 1) + # Allow to overwrite a non-persistable buffer with a persistable var. + self.assertEqual(len(net.buffers()), 1) + self.assertEqual(len(net.state_dict()), 1) - net.register_buffer("buffer_name", var1, persistable=False) - self.assertEqual(len(net.buffers()), 1) - self.assertEqual(len(net.state_dict()), 0) + net.register_buffer("buffer_name", var1, persistable=False) + self.assertEqual(len(net.buffers()), 1) + self.assertEqual(len(net.state_dict()), 0) def test_buffer_not_persistable_assign(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - var1 = to_variable(np.zeros([1])) - net.register_buffer("buffer_name", var1, persistable=False) - - # Assigning Nones will remove the buffer, but allow to re-assign - # to remark it as buffer. - net.buffer_name = None - self.assertEqual(len(net.buffers()), 0) - self.assertEqual(len(net.state_dict()), 0) - - net.buffer_name = var1 - self.assertEqual(len(net.buffers()), 1) - self.assertEqual(len(net.state_dict()), 0) - - # Re-assign a EagerParamBase will remove the buffer. - net.buffer_name = EagerParamBase([2, 2], 'float32') - self.assertEqual(len(net.buffers()), 0) - self.assertEqual(len(net.state_dict()), 1) + net = paddle.nn.Layer() + var1 = paddle.to_tensor(np.zeros([1])) + net.register_buffer("buffer_name", var1, persistable=False) + + # Assigning Nones will remove the buffer, but allow to re-assign + # to remark it as buffer. + net.buffer_name = None + self.assertEqual(len(net.buffers()), 0) + self.assertEqual(len(net.state_dict()), 0) + + net.buffer_name = var1 + self.assertEqual(len(net.buffers()), 1) + self.assertEqual(len(net.state_dict()), 0) + + # Re-assign a EagerParamBase will remove the buffer. + net.buffer_name = EagerParamBase([2, 2], 'float32') + self.assertEqual(len(net.buffers()), 0) + self.assertEqual(len(net.state_dict()), 1) def test_buffer_not_persistable_load(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - var1 = to_variable(np.zeros([1])) - net.register_buffer("buffer_name", var1, persistable=False) - net.load_dict({}) + net = paddle.nn.Layer() + var1 = paddle.to_tensor(np.zeros([1])) + net.register_buffer("buffer_name", var1, persistable=False) + net.load_dict({}) def test_buffer_state_dict(self): - with base.dygraph.guard(): - net = paddle.nn.Layer() - var1 = to_variable(np.zeros([2, 3])) - var2 = to_variable(np.zeros([3, 2])) - net.register_buffer("buffer_var1", var1) - net.register_buffer("buffer_var2", var2, persistable=False) - - self.assertEqual(len(net.state_dict()), 1) - self.assertEqual( - [name for name, _ in net.state_dict().items()], ["buffer_var1"] - ) + net = paddle.nn.Layer() + var1 = paddle.to_tensor(np.zeros([2, 3])) + var2 = paddle.to_tensor(np.zeros([3, 2])) + net.register_buffer("buffer_var1", var1) + net.register_buffer("buffer_var2", var2, persistable=False) + + self.assertEqual(len(net.state_dict()), 1) + self.assertEqual( + [name for name, _ in net.state_dict().items()], ["buffer_var1"] + ) - # load state_dict - net_load = paddle.nn.Layer() - var = to_variable(np.ones([2, 3])) - net_load.register_buffer("buffer_var1", var) - net_load.load_dict(net.state_dict()) + # load state_dict + net_load = paddle.nn.Layer() + var = paddle.to_tensor(np.ones([2, 3])) + net_load.register_buffer("buffer_var1", var) + net_load.load_dict(net.state_dict()) - self.assert_var_base_equal(net_load.buffer_var1, var1) + self.assert_var_base_equal(net_load.buffer_var1, var1) def assert_var_base_equal(self, var1, var2): np.testing.assert_array_equal(var1.numpy(), var2.numpy()) @@ -326,17 +315,15 @@ def forward(self, x): class TestModifiedBuffer(unittest.TestCase): def funcsetUp(self): - paddle.disable_static() self.shape = [10, 16] def _run(self, to_static=False): - paddle.jit.enable_to_static(to_static) - - x = paddle.ones([1], 'int32') - net = BufferNetWithModification(self.shape) - out = net(x) + with enable_to_static_guard(to_static): + x = paddle.ones([1], 'int32') + net = BufferNetWithModification(self.shape) + out = net(x) - return out, net.buffer1, net.buffer2 + return out, net.buffer1, net.buffer2 def test_modified(self): self.funcsetUp() @@ -351,7 +338,6 @@ def test_modified(self): class TestLayerTo(unittest.TestCase): def funcsetUp(self): - paddle.disable_static() self.linear = paddle.nn.Linear(2, 2) self.new_grad = np.random.random([2, 2]) self.linear.weight._set_grad_ivar(paddle.to_tensor(self.new_grad)) @@ -519,5 +505,4 @@ def test_main(self): if __name__ == '__main__': - paddle.enable_static() unittest.main() diff --git a/test/legacy_test/test_jit_layer.py b/test/legacy_test/test_jit_layer.py index e4889957645399..9cc628e336d2df 100644 --- a/test/legacy_test/test_jit_layer.py +++ b/test/legacy_test/test_jit_layer.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import sys import tempfile import unittest @@ -23,6 +24,9 @@ from paddle.jit.layer import Layer from paddle.static import InputSpec +sys.path.append("../dygraph_to_static") +from dygraph_to_static_utils import enable_to_static_guard + paddle.seed(1) @@ -59,11 +63,9 @@ def tearDown(self): def test_multi_load(self): x = paddle.full([2, 4], 2) model = Net() - paddle.jit.enable_to_static(False) - forward_out1 = model.forward(x) - infer_out1 = model.infer(x) - paddle.jit.enable_to_static(True) - + with enable_to_static_guard(False): + forward_out1 = model.forward(x) + infer_out1 = model.infer(x) model_path = os.path.join(self.temp_dir.name, 'multi_program') paddle.jit.save(model, model_path, combine_params=True) place = paddle.CPUPlace() diff --git a/test/legacy_test/test_model.py b/test/legacy_test/test_model.py index 683b002e058159..a34c1404de45e5 100644 --- a/test/legacy_test/test_model.py +++ b/test/legacy_test/test_model.py @@ -824,7 +824,6 @@ def test_export_deploy_model(self): for dynamic in [True, False]: paddle.disable_static() if dynamic else None - paddle.jit.enable_to_static(False) if not dynamic else None net = LeNet() inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] diff --git a/test/legacy_test/test_paddlescience.py b/test/legacy_test/test_paddlescience.py index 06c5abe80df89e..96cf2ea510a9f7 100644 --- a/test/legacy_test/test_paddlescience.py +++ b/test/legacy_test/test_paddlescience.py @@ -17,7 +17,6 @@ import paddle from paddle import base, jit, nn -paddle.jit.enable_to_static(True) base.core._set_prim_all_enabled(True) x = paddle.randn([4, 1])