From adf8c959a945ca6508a3431d34a086b9e762e0a7 Mon Sep 17 00:00:00 2001 From: jiweibo Date: Thu, 24 Oct 2019 05:23:46 +0000 Subject: [PATCH] update paddle-lite subgraph --- .../analysis/ir_passes/lite_subgraph_pass.cc | 6 +- paddle/fluid/inference/lite/tensor_utils.cc | 4 + paddle/fluid/inference/lite/test_predictor.cc | 78 ++++++++++++++++++- 3 files changed, 85 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc index 2d1ec7a9d28e26..1da8a27f11a3e2 100644 --- a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc @@ -226,7 +226,11 @@ void LiteSubgraphPass::SetUpEngine( paddle::lite_api::PrecisionType precision_type = enable_int8 ? PRECISION(kInt8) : PRECISION(kFloat); paddle::lite::Place prefer_place = {target_type, precision_type}; - + std::set param_names_set(repetitive_params.begin(), + repetitive_params.end()); + const_cast&>(repetitive_params) + .assign(param_names_set.begin(), param_names_set.end()); + serialize_params(&config.param, scope, repetitive_params); serialize_params(&config.param, scope, repetitive_params); config.model = program->Proto()->SerializeAsString(); config.prefer_place = prefer_place; diff --git a/paddle/fluid/inference/lite/tensor_utils.cc b/paddle/fluid/inference/lite/tensor_utils.cc index 8021768ce389bb..c047d0189d7f32 100644 --- a/paddle/fluid/inference/lite/tensor_utils.cc +++ b/paddle/fluid/inference/lite/tensor_utils.cc @@ -46,6 +46,8 @@ PrecisionType GetLitePrecisionType(framework::proto::VarType::Type type) { return PrecisionType::kFloat; case framework::proto::VarType_Type_INT8: return PrecisionType::kInt8; + case framework::proto::VarType_Type_INT32: + return PrecisionType::kInt32; default: LOG(FATAL) << "Error precision type."; return PrecisionType::kUnk; @@ -59,6 +61,8 @@ framework::proto::VarType::Type GetNativePrecisionType( return framework::proto::VarType_Type_FP32; case PrecisionType::kInt8: return framework::proto::VarType_Type_INT8; + case PrecisionType::kInt32: + return framework::proto::VarType_Type_INT32; default: LOG(FATAL) << "Error precision type."; return static_cast(-1); diff --git a/paddle/fluid/inference/lite/test_predictor.cc b/paddle/fluid/inference/lite/test_predictor.cc index 7cb1ee71118869..d428db1e6d705b 100644 --- a/paddle/fluid/inference/lite/test_predictor.cc +++ b/paddle/fluid/inference/lite/test_predictor.cc @@ -23,15 +23,89 @@ #include "paddle/fluid/platform/enforce.h" int main() { - LOG(INFO) << "Hello World!"; + LOG(INFO) << "leaky_relu"; paddle::AnalysisConfig config; config.SetModel("/shixiaowei02/Paddle_lite/xingzhaolong/leaky_relu_model"); + // config.SetModel("/Paddle/models/lite/leaky_relu"); config.SwitchUseFeedFetchOps(false); - config.EnableUseGpu(10, 1); + config.EnableUseGpu(10, 0); config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32); config.pass_builder()->TurnOnDebug(); auto predictor = CreatePaddlePredictor(config); PADDLE_ENFORCE_NOT_NULL(predictor.get()); + + const int batch_size = 1; + const int channels = 1; + const int height = 3; + const int width = 3; + // float *data = new float[batch_size * channels * height * width]; + float data[batch_size * channels * height * width] = {0.5, -0.5, 0, -0, 1, + -1, 2, -2, 3}; + + auto input_names = predictor->GetInputNames(); + auto input_t = predictor->GetInputTensor(input_names[0]); + input_t->Reshape({batch_size, channels, height, width}); + input_t->copy_from_cpu(data); + + CHECK(predictor->ZeroCopyRun()); + + std::vector out_data; + auto output_names = predictor->GetOutputNames(); + auto output_t = predictor->GetOutputTensor(output_names[0]); + std::vector output_shape = output_t->shape(); + int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, + std::multiplies()); + LOG(INFO) << "out_num is " << out_num; + out_data.resize(out_num); + output_t->copy_to_cpu(out_data.data()); return 0; + + /* + // for yolov3 + LOG(INFO) << "yolo_v3"; + paddle::AnalysisConfig config; + config.SetModel("/Paddle/models/lite/yolov3_infer/__model__", + "/Paddle/models/lite/yolov3_infer/__params__"); + config.SwitchUseFeedFetchOps(false); + config.EnableUseGpu(10, 1); + config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32); + config.pass_builder()->TurnOnDebug(); + + auto predictor = CreatePaddlePredictor(config); + PADDLE_ENFORCE_NOT_NULL(predictor.get()); + + const int batch_size = 1; + const int channels = 3; + const int height = 608; + const int width = 608; + // float *data = new float[batch_size * channels * height * width]; + float data[batch_size * channels * height * width]; + memset(data, 0, sizeof(float) * batch_size * channels * height * width); + + auto input_names = predictor->GetInputNames(); + LOG(INFO) << input_names[0]; + LOG(INFO) << input_names[1]; + auto input_image = predictor->GetInputTensor(input_names[0]); + input_image->Reshape({batch_size, channels, height, width}); + input_image->copy_from_cpu(data); + + int im_size_data[2] = {608, 608}; + auto input_size = predictor->GetInputTensor(input_names[1]); + input_size->Reshape({1, 2}); + input_size->copy_from_cpu(im_size_data); + + CHECK(predictor->ZeroCopyRun()); + + std::vector out_data; + auto output_names = predictor->GetOutputNames(); + auto output_t = predictor->GetOutputTensor(output_names[0]); + std::vector output_shape = output_t->shape(); + int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, + std::multiplies()); + LOG(INFO) << "out_num is " << out_num; + out_data.resize(out_num); + output_t->copy_to_cpu(out_data.data()); + return 0; + */ }