Skip to content
Merged
Show file tree
Hide file tree
Changes from 28 commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
463359e
update save_inference_model for hapi
LiuChiachi Aug 3, 2020
4709a25
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 3, 2020
b595db4
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 3, 2020
db663c4
update save_inference_model to support dygraph
LiuChiachi Aug 3, 2020
b7850ed
fix comments
LiuChiachi Aug 3, 2020
314e3e2
fix comments
LiuChiachi Aug 3, 2020
2ffcddc
test=develop
LiuChiachi Aug 3, 2020
196df6a
test, test=develop
LiuChiachi Aug 3, 2020
6caa61b
fix dim test, test=develop
LiuChiachi Aug 3, 2020
c625a50
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 3, 2020
8f6cb8f
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 4, 2020
c7cc35f
test, test=develop
LiuChiachi Aug 4, 2020
753aef9
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 4, 2020
86adb71
add test_export_deploy_model_dynamic
LiuChiachi Aug 4, 2020
43bd7d2
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 4, 2020
08688f5
fix unittest for hapi: save_inference_model
LiuChiachi Aug 5, 2020
c61da24
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 5, 2020
0939851
fix code style
LiuChiachi Aug 5, 2020
25efe73
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 5, 2020
cca0c0a
accept review by guoshengCS
LiuChiachi Aug 6, 2020
92ab5d9
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 6, 2020
87aaf3c
fix coverage rate
LiuChiachi Aug 7, 2020
19f5ec1
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 7, 2020
d45488e
update doc for save_inference_model and copyright
LiuChiachi Aug 11, 2020
4f6fda6
change test model back to LeNet() in test_export_deploy_model
LiuChiachi Aug 11, 2020
7d3bf0f
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 11, 2020
743f249
copy jit.save, use LeNet() to test export deploy model
LiuChiachi Aug 12, 2020
103f8bf
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 12, 2020
5777091
add return value for dygraph, and fix doc error
LiuChiachi Aug 13, 2020
9d1be86
corrected the doc writing
LiuChiachi Aug 13, 2020
5fef294
Delete redundant import and correct import order in sample code.
LiuChiachi Aug 14, 2020
0ee2da7
remove 'fluid' and add prepare() and fit() in sample code
LiuChiachi Aug 18, 2020
f569de7
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 18, 2020
5cd1d8a
correct usage of API 2.0 in sample code
LiuChiachi Aug 19, 2020
1453d03
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 19, 2020
a4e44af
solve conflicts and change name of save_inference_model for hapi
LiuChiachi Aug 21, 2020
fa11228
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 21, 2020
ead375a
fix sample code bugs
LiuChiachi Aug 21, 2020
d01442d
fix code style bugs
LiuChiachi Aug 21, 2020
eeb08bb
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 21, 2020
4f1223d
fix test_model.py bugs
LiuChiachi Aug 21, 2020
aa96150
set for_inference=True
LiuChiachi Aug 21, 2020
869bc19
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 21, 2020
26fcde3
correct usage for static.InputSpec
LiuChiachi Aug 21, 2020
2d29fac
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 21, 2020
04b2d5a
update doc for model.save
LiuChiachi Aug 24, 2020
3777dc7
correct usage of API 2.0
LiuChiachi Aug 24, 2020
3e6f384
Merge branch 'develop' into update_save_inference_model_to_support_dy…
LiuChiachi Aug 24, 2020
9dffc17
rename param name for model.save
LiuChiachi Aug 25, 2020
1e4aea5
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
LiuChiachi Aug 25, 2020
92d280c
correct for_inference as training
LiuChiachi Aug 25, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
172 changes: 148 additions & 24 deletions python/paddle/incubate/hapi/model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -25,15 +25,19 @@
from collections import Iterable

from paddle import fluid
from paddle.fluid.framework import in_dygraph_mode, Variable
from paddle.fluid import core
from paddle.fluid.framework import in_dygraph_mode, Variable, ParamBase, _current_expected_place
from paddle.fluid.executor import global_scope
from paddle.fluid.io import is_belong_to_optimizer
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, FunctionSpec
from paddle.fluid.layers.utils import flatten
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
from paddle.fluid.incubate.fleet.base import role_maker
from paddle.fluid.executor import scope_guard, Executor
from paddle.io import DataLoader, Dataset
from paddle.fluid.dygraph.layers import Layer

from .distributed import DistributedBatchSampler, _all_gather, prepare_distributed_context, _parallel_context_initialized
from .metrics import Metric
Expand Down Expand Up @@ -1525,7 +1529,10 @@ def save_inference_model(self,
params_filename=None,
model_only=False):
"""
Save inference model must in static mode.
Save inference model can be in static or dynamic mode.
It should be noted that before using `save_inference_model`,
`forward` must be called and `@paddle.jit.to_static` should
be added in dygraph now and these will be optimized later.

Args:
save_dir (str): The directory path to save the inference model.
Expand All @@ -1544,37 +1551,154 @@ def save_inference_model(self,

Examples:
.. code-block:: python

import paddle.fluid as fluid
import paddle.incubate.hapi as hapi

input = hapi.Input('image', [-1, 1, 28, 28], 'float32')
model = hapi.Model(hapi.vision.LeNet(), input)
from paddle.nn import Conv2D, Pool2D, Linear, ReLU, Sequential
import numpy as np
from paddle.incubate.hapi import Model, Input
import os
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator

class LeNet(fluid.dygraph.Layer):
def __init__(self, num_classes=10, classifier_activation=None):
super(LeNet, self).__init__()
self.num_classes = num_classes
self.features = Sequential(
Conv2D(
1, 6, 3, stride=1, padding=1),
ReLU(),
Pool2D(2, 'max', 2),
Conv2D(
6, 16, 5, stride=1, padding=0),
ReLU(),
Pool2D(2, 'max', 2))

if num_classes > 0:
self.fc = Sequential(
Linear(400, 120),
Linear(120, 84),
Linear(
84, 10, act=classifier_activation))

@paddle.jit.to_static
def forward(self, inputs):
x = self.features(inputs)

if self.num_classes > 0:
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x

dynamic = True # False
fluid.enable_dygraph() if dynamic else None
prog_translator = ProgramTranslator()
prog_translator.enable(False) if not dynamic else None
inputs = hapi.Input('image', [-1, 1, 28, 28], 'float32')
model = hapi.Model(LeNet(), inputs)
model.prepare()
tensor_img = np.array(np.random.random((1, 1, 28, 28)), dtype=np.float32)
results = model.test_batch(tensor_img) # Now it's necessary in a dygraph

model.save_inference_model('inference_model')

TODO:
1. Save correct shape of input, now it can not save `None` in shape.
2. Make it Unnecessary to call `forward` before call `save_inference_model` for users.
3. Make it Unnecessary to add `@paddle.jit.to_static` for users.
"""
assert not fluid.in_dygraph_mode(
), 'Save inference model must in static mode!'

prog = self._adapter._progs.get('test', None)
assert prog, \
"Model is not ready, please call `model.prepare()` first"
def get_inout_spec(all_vars, return_name=False):
result_list = []
valid_vars = [var for var in all_vars if isinstance(var, Variable)]
result_list = valid_vars
if return_name:
result_list = [var.name for var in result_list]

infer_prog = prog.clone(for_test=True)
return result_list

input_names = [v.name for v in self._adapter._input_vars['test']]
endpoints = self._adapter._endpoints['test']['output']
if fluid.in_dygraph_mode():
layer = self.network
fluid.disable_dygraph()

# 1. input check
prog_translator = ProgramTranslator()
if not prog_translator.enable_declarative:
raise RuntimeError(
"save_inference_model doesn't work when setting ProgramTranslator.enable=False."
)
if not isinstance(layer, Layer):
raise TypeError(
"The input layer should be 'Layer', but received layer type is %s."
% type(layer))

# 2. get program of declarative Layer.forward
prog_cache = prog_translator.get_program_cache()
# make dummy args & kwargs, to get excepted FunctionSpec
layer_func = FunctionSpec(type(layer).forward, [layer], {})
concrete_program, _ = prog_cache.get_program(layer_func)

# NOTE: we maintain the mapping of variable name to
# structured name, the buffer variable (non-persistable)
# saved to inference program may not need by dygraph Layer,
# we only record the state_dict variable's structured name
state_names_dict = dict()
for structured_name, var in layer.state_dict().items():
state_names_dict[var.name] = structured_name

# 3. share parameters from Layer to scope & record var info
scope = core.Scope()
extra_var_info = dict()
for param_or_buffer in concrete_program.parameters:
# share to scope
param_or_buffer_tensor = scope.var(
param_or_buffer.name).get_tensor()
src_tensor = param_or_buffer.value().get_tensor()
param_or_buffer_tensor._share_data_with(src_tensor)
# record var info
extra_info_dict = dict()
if param_or_buffer.name in state_names_dict:
extra_info_dict['structured_name'] = state_names_dict[
param_or_buffer.name]
extra_info_dict['stop_gradient'] = param_or_buffer.stop_gradient
if isinstance(param_or_buffer, ParamBase):
extra_info_dict['trainable'] = param_or_buffer.trainable
extra_var_info[param_or_buffer.name] = extra_info_dict

# 4. build input & output spec
input_var_names = get_inout_spec(concrete_program.inputs, True)
output_vars = get_inout_spec(concrete_program.outputs)

# 5. save inference model
with scope_guard(scope):
fluid.io.save_inference_model(
dirname=save_dir,
feeded_var_names=input_var_names,
target_vars=output_vars,
executor=Executor(_current_expected_place()),
main_program=concrete_program.main_program.clone(),
model_filename=model_filename,
params_filename=params_filename,
program_only=model_only)

return fluid.io.save_inference_model(
save_dir,
input_names,
endpoints,
self._adapter._executor,
main_program=infer_prog,
model_filename=model_filename,
params_filename=params_filename,
program_only=model_only)
else:
prog = self._adapter._progs.get('test', None)
assert prog, \
"Model is not ready, please call `model.prepare()` first"

infer_prog = prog.clone(for_test=True)

input_names = [v.name for v in self._adapter._input_vars['test']]
endpoints = self._adapter._endpoints['test']['output']

return fluid.io.save_inference_model(
save_dir,
input_names,
endpoints,
self._adapter._executor,
main_program=infer_prog,
model_filename=model_filename,
params_filename=params_filename,
program_only=model_only)

def _run_one_epoch(self, data_loader, callbacks, mode, logs={}):
outputs = []
Expand Down
92 changes: 64 additions & 28 deletions python/paddle/incubate/hapi/tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
from paddle.incubate.hapi.datasets import MNIST
from paddle.incubate.hapi.vision.models import LeNet
from paddle.incubate.hapi.distributed import DistributedBatchSampler, prepare_distributed_context
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator


class LeNetDygraph(fluid.dygraph.Layer):
Expand Down Expand Up @@ -65,6 +67,37 @@ def forward(self, inputs):
return x


class LeNetDeclarative(fluid.dygraph.Layer):
def __init__(self, num_classes=10, classifier_activation=None):
super(LeNetDeclarative, self).__init__()
self.num_classes = num_classes
self.features = Sequential(
Conv2D(
1, 6, 3, stride=1, padding=1),
ReLU(),
Pool2D(2, 'max', 2),
Conv2D(
6, 16, 5, stride=1, padding=0),
ReLU(),
Pool2D(2, 'max', 2))

if num_classes > 0:
self.fc = Sequential(
Linear(400, 120),
Linear(120, 84),
Linear(
84, 10, act=classifier_activation))

@declarative
def forward(self, inputs):
x = self.features(inputs)

if self.num_classes > 0:
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x


class MnistDataset(MNIST):
def __init__(self, mode, return_label=True, sample_num=None):
super(MnistDataset, self).__init__(mode=mode)
Expand Down Expand Up @@ -322,7 +355,6 @@ def get_expect():
model.prepare(
optim2, loss_function=CrossEntropyLoss(reduction="sum"))
loss, = model.train_batch([data], [label])

np.testing.assert_allclose(loss.flatten(), ref.flatten())
fluid.disable_dygraph() if dynamic else None

Expand Down Expand Up @@ -437,33 +469,37 @@ def test_parameters(self):
fluid.disable_dygraph() if dynamic else None

def test_export_deploy_model(self):
net = LeNet()
inputs = [Input('image', [-1, 1, 28, 28], 'float32')]
model = Model(net, inputs)
model.prepare()
save_dir = tempfile.mkdtemp()
if not os.path.exists(save_dir):
os.makedirs(save_dir)

tensor_img = np.array(
np.random.random((1, 1, 28, 28)), dtype=np.float32)
ori_results = model.test_batch(tensor_img)

model.save_inference_model(save_dir)

place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = fluid.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=save_dir, executor=exe))

results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)

np.testing.assert_allclose(results, ori_results, rtol=1e-6)
shutil.rmtree(save_dir)
for dynamic in [True, False]:
fluid.enable_dygraph() if dynamic else None
prog_translator = ProgramTranslator()
prog_translator.enable(False) if not dynamic else None
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
net = LeNetDeclarative()
inputs = [Input('X', [None, 1, 28, 28], 'float32')]
model = Model(net, inputs)
model.prepare()
save_dir = tempfile.mkdtemp()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
tensor_img = np.array(
np.random.random((1, 1, 28, 28)), dtype=np.float32)
ori_results = model.test_batch(tensor_img)
model.save_inference_model(save_dir)
fluid.disable_dygraph() if dynamic else None

place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = fluid.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=save_dir, executor=exe))
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
np.testing.assert_allclose(
results, ori_results, rtol=1e-5, atol=1e-7)
shutil.rmtree(save_dir)


if __name__ == '__main__':
Expand Down