Skip to content
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 7 additions & 27 deletions python/paddle/hapi/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -799,8 +799,7 @@ class Model(object):
paddle.nn.Layer.
inputs (InputSpec|list|dict|None): `inputs`, entry points of network,
could be a InputSpec instance, or lits of InputSpec instances,
or dict ({name: InputSpec}), or None. For static graph,
inputs must be set. For dynamic graph, it could be None.
or dict ({name: InputSpec}), and it couldn't be None.
labels (InputSpec|list|None): `labels`, entry points of network,
could be a InputSpec instnace or lits of InputSpec instances,
or None. For static graph, if labels is required in loss,
Expand Down Expand Up @@ -849,10 +848,9 @@ def __init__(self, network, inputs=None, labels=None):
self._optimizer = None
self._test_dataloader = None

if not in_dygraph_mode():
if not isinstance(inputs, (list, dict, Input)):
raise TypeError(
"'inputs' must be list or dict in static graph mode")
if not isinstance(inputs, (list, dict, Input)):
raise TypeError(
"'inputs' must be list or dict, and couldn't be None.")
self._inputs = self._verify_spec(inputs, True)
self._labels = self._verify_spec(labels)

Expand Down Expand Up @@ -1649,10 +1647,6 @@ def _save_inference_model(self,
model_only=False):
"""
Save inference model can be in static or dynamic mode.
It should be noted that before using `save_inference_model`, you should
run the model, and the shape you saved is as same as the input of its
running. `@paddle.jit.to_static` must be added on `forward` function of
your layer in dynamic mode now and these will be optimized later.

Args:
save_dir (str): The directory path to save the inference model.
Expand All @@ -1678,14 +1672,11 @@ def get_inout_spec(all_vars, return_name=False):

return result_list

# TODO:
# 1. Make it Unnecessary to run model before calling `save_inference_model` for users in dygraph.
# 2. Save correct shape of input, now the interface stores the shape that the user sent to
# the inputs of the model in running.
# 3. Make it Unnecessary to add `@paddle.jit.to_static` for users in dynamic mode.
if fluid.in_dygraph_mode():
with fluid.framework._dygraph_guard(None):
layer = self.network
layer.forward = paddle.jit.to_static(
layer.forward, input_spec=self._inputs)

# 1. input check
prog_translator = ProgramTranslator()
Expand Down Expand Up @@ -1879,18 +1870,7 @@ def summary(self, input_size=None, batch_size=None, dtype=None):
def _verify_spec(self, specs, is_input=False):
out_specs = []

if specs is None:
# Note(Aurelius84): If not specific specs of `Input`, using argument names of `forward` function
# to generate `Input`. But how can we know the actual shape of each input tensor?
if is_input:
out_specs = [
Input(
name=n, shape=[None])
for n in extract_args(self.network.forward) if n != 'self'
]
else:
out_specs = to_list(specs)
elif isinstance(specs, dict):
if isinstance(specs, dict):
assert is_input == False
out_specs = [specs[n] \
for n in extract_args(self.network.forward) if n != 'self']
Expand Down
50 changes: 16 additions & 34 deletions python/paddle/tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,35 +67,6 @@ def forward(self, inputs):
return x


class LeNetDeclarative(fluid.dygraph.Layer):
def __init__(self, num_classes=10, classifier_activation=None):
super(LeNetDeclarative, self).__init__()
self.num_classes = num_classes
self.features = Sequential(
Conv2d(
1, 6, 3, stride=1, padding=1),
ReLU(),
Pool2D(2, 'max', 2),
Conv2d(
6, 16, 5, stride=1, padding=0),
ReLU(),
Pool2D(2, 'max', 2))

if num_classes > 0:
self.fc = Sequential(
Linear(400, 120), Linear(120, 84), Linear(84, 10),
Softmax()) #Todo: accept any activation

@declarative
def forward(self, inputs):
x = self.features(inputs)

if self.num_classes > 0:
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x


class MnistDataset(MNIST):
def __init__(self, mode, return_label=True, sample_num=None):
super(MnistDataset, self).__init__(mode=mode)
Expand Down Expand Up @@ -444,7 +415,9 @@ def test_dynamic_save_static_load(self):
# dynamic saving
device = paddle.set_device('cpu')
fluid.enable_dygraph(device)
model = Model(MyModel(classifier_activation=None))
inputs = [InputSpec([None, 20], 'float32', 'x')]
labels = [InputSpec([None, 1], 'int64', 'label')]
model = Model(MyModel(classifier_activation=None), inputs, labels)
optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters())
model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
Expand Down Expand Up @@ -543,11 +516,10 @@ def test_summary_error(self):

def test_export_deploy_model(self):
for dynamic in [True, False]:
fluid.enable_dygraph() if dynamic else None
# paddle.disable_static() if dynamic else None
paddle.disable_static() if dynamic else None
prog_translator = ProgramTranslator()
prog_translator.enable(False) if not dynamic else None
net = LeNetDeclarative()
net = LeNet()
inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
model = Model(net, inputs)
model.prepare()
Expand All @@ -556,8 +528,9 @@ def test_export_deploy_model(self):
os.makedirs(save_dir)
tensor_img = np.array(
np.random.random((1, 1, 28, 28)), dtype=np.float32)
ori_results = model.test_batch(tensor_img)

model.save(save_dir, training=False)
ori_results = model.test_batch(tensor_img)
fluid.disable_dygraph() if dynamic else None

place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda(
Expand All @@ -574,6 +547,7 @@ def test_export_deploy_model(self):
np.testing.assert_allclose(
results, ori_results, rtol=1e-5, atol=1e-7)
shutil.rmtree(save_dir)
paddle.enable_static()


class TestRaiseError(unittest.TestCase):
Expand All @@ -585,6 +559,14 @@ def test_input_without_name(self):
with self.assertRaises(ValueError):
model = Model(net, inputs, labels)

def test_input_without_input_spec(self):
for dynamic in [True, False]:
paddle.disable_static() if dynamic else None
net = MyModel(classifier_activation=None)
with self.assertRaises(TypeError):
model = Model(net)
paddle.enable_static()


if __name__ == '__main__':
unittest.main()