Skip to content

Commit 1c31d9d

Browse files
authored
add det_mv3_db & LeViT test case in pr-ci-inference (#34803)
* add det_mv3_db & LeViT test case in pr-ci-inference * fix LeViT model dir bugs * fix grammar error
1 parent 0a5c99e commit 1c31d9d

File tree

5 files changed

+391
-2
lines changed

5 files changed

+391
-2
lines changed

paddle/fluid/inference/tests/infer_ut/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ else()
6666
if(WITH_MKL)
6767
set(FLAG_OPENMP "-fopenmp")
6868
endif()
69-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ${FLAG_OPENMP}")
69+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 ${FLAG_OPENMP}")
7070
endif()
7171

7272
if(WITH_GPU)

paddle/fluid/inference/tests/infer_ut/run.sh

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,18 @@ for model_name in $download_list; do
6565
download $url_prefix $model_name
6666
done
6767

68+
ocr_download_list='ocr_det_mv3_db'
69+
for model_name in $ocr_download_list; do
70+
url_prefix="https://paddle-qa.bj.bcebos.com/inference_model/2.1.1/ocr"
71+
download $url_prefix $model_name
72+
done
73+
74+
clas_download_list='LeViT'
75+
for model_name in $clas_download_list; do
76+
url_prefix="https://paddle-qa.bj.bcebos.com/inference_model/2.1.1/class"
77+
download $url_prefix $model_name
78+
done
79+
6880
# compile and run test
6981
cd $current_dir
7082
mkdir -p build
@@ -92,6 +104,46 @@ if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
92104
fi
93105
fi
94106

107+
# ---------tensorrt det_mv3_db on linux---------
108+
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
109+
cmake .. -DPADDLE_LIB=${inference_install_dir} \
110+
-DWITH_MKL=$TURN_ON_MKL \
111+
-DDEMO_NAME=test_det_mv3_db \
112+
-DWITH_GPU=$TEST_GPU_CPU \
113+
-DWITH_STATIC_LIB=OFF \
114+
-DUSE_TENSORRT=$USE_TENSORRT \
115+
-DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \
116+
-DWITH_GTEST=ON
117+
make -j$(nproc)
118+
./test_det_mv3_db \
119+
--modeldir=$DATA_DIR/ocr_det_mv3_db/ocr_det_mv3_db \
120+
--gtest_output=xml:test_det_mv3_db.xml
121+
if [ $? -ne 0 ]; then
122+
echo "test_det_mv3_db runs failed" >> ${current_dir}/build/test_summary.txt
123+
EXIT_CODE=1
124+
fi
125+
fi
126+
127+
# ---------tensorrt LeViT on linux---------
128+
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
129+
cmake .. -DPADDLE_LIB=${inference_install_dir} \
130+
-DWITH_MKL=$TURN_ON_MKL \
131+
-DDEMO_NAME=test_LeViT \
132+
-DWITH_GPU=$TEST_GPU_CPU \
133+
-DWITH_STATIC_LIB=OFF \
134+
-DUSE_TENSORRT=$USE_TENSORRT \
135+
-DTENSORRT_ROOT=$TENSORRT_ROOT_DIR \
136+
-DWITH_GTEST=ON
137+
make -j$(nproc)
138+
./test_LeViT \
139+
--modeldir=$DATA_DIR/LeViT/LeViT \
140+
--gtest_output=xml:test_LeViT.xml
141+
if [ $? -ne 0 ]; then
142+
echo "test_LeViT runs failed" >> ${current_dir}/build/test_summary.txt
143+
EXIT_CODE=1
144+
fi
145+
fi
146+
95147
if [[ -f ${current_dir}/build/test_summary.txt ]];then
96148
echo "=====================test summary======================"
97149
cat ${current_dir}/build/test_summary.txt
Lines changed: 179 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,179 @@
1+
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "test_suite.h" // NOLINT
16+
17+
DEFINE_string(modeldir, "", "Directory of the inference model.");
18+
19+
namespace paddle_infer {
20+
21+
paddle::test::Record PrepareInput(int batch_size) {
22+
// init input data
23+
int channel = 3;
24+
int width = 224;
25+
int height = 224;
26+
paddle::test::Record image_Record;
27+
int input_num = batch_size * channel * width * height;
28+
std::vector<float> input_data(input_num, 1);
29+
image_Record.data = input_data;
30+
image_Record.shape = std::vector<int>{batch_size, channel, width, height};
31+
image_Record.type = paddle::PaddleDType::FLOAT32;
32+
return image_Record;
33+
}
34+
35+
TEST(test_LeViT, analysis_gpu_bz1) {
36+
// init input data
37+
std::map<std::string, paddle::test::Record> my_input_data_map;
38+
my_input_data_map["x"] = PrepareInput(1);
39+
// init output data
40+
std::map<std::string, paddle::test::Record> infer_output_data,
41+
truth_output_data;
42+
// prepare groudtruth config
43+
paddle_infer::Config config, config_no_ir;
44+
config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
45+
FLAGS_modeldir + "/inference.pdiparams");
46+
config_no_ir.SwitchIrOptim(false);
47+
// prepare inference config
48+
config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
49+
FLAGS_modeldir + "/inference.pdiparams");
50+
// get groudtruth by disbale ir
51+
paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
52+
SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
53+
&truth_output_data, 1);
54+
// get infer results
55+
paddle_infer::services::PredictorPool pred_pool(config, 1);
56+
SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
57+
&infer_output_data);
58+
// check outputs
59+
CompareRecord(&truth_output_data, &infer_output_data);
60+
std::cout << "finish test" << std::endl;
61+
}
62+
63+
TEST(test_LeViT, trt_fp32_bz2) {
64+
// init input data
65+
std::map<std::string, paddle::test::Record> my_input_data_map;
66+
my_input_data_map["x"] = PrepareInput(2);
67+
// init output data
68+
std::map<std::string, paddle::test::Record> infer_output_data,
69+
truth_output_data;
70+
// prepare groudtruth config
71+
paddle_infer::Config config, config_no_ir;
72+
config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
73+
FLAGS_modeldir + "/inference.pdiparams");
74+
config_no_ir.SwitchIrOptim(false);
75+
// prepare inference config
76+
config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
77+
FLAGS_modeldir + "/inference.pdiparams");
78+
config.EnableUseGpu(100, 0);
79+
config.EnableTensorRtEngine(
80+
1 << 20, 2, 6, paddle_infer::PrecisionType::kFloat32, false, false);
81+
// get groudtruth by disbale ir
82+
paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
83+
SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
84+
&truth_output_data, 1);
85+
// get infer results
86+
paddle_infer::services::PredictorPool pred_pool(config, 1);
87+
SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
88+
&infer_output_data);
89+
// check outputs
90+
CompareRecord(&truth_output_data, &infer_output_data);
91+
std::cout << "finish test" << std::endl;
92+
}
93+
94+
TEST(test_LeViT, serial_diff_batch_trt_fp32) {
95+
int max_batch_size = 5;
96+
// prepare groudtruth config
97+
paddle_infer::Config config, config_no_ir;
98+
config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
99+
FLAGS_modeldir + "/inference.pdiparams");
100+
config_no_ir.SwitchIrOptim(false);
101+
paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
102+
// prepare inference config
103+
config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
104+
FLAGS_modeldir + "/inference.pdiparams");
105+
config.EnableUseGpu(100, 0);
106+
config.EnableTensorRtEngine(1 << 20, max_batch_size, 6,
107+
paddle_infer::PrecisionType::kFloat32, false,
108+
false);
109+
paddle_infer::services::PredictorPool pred_pool(config, 1);
110+
111+
for (int i = 1; i < max_batch_size; i++) {
112+
// init input data
113+
std::map<std::string, paddle::test::Record> my_input_data_map;
114+
my_input_data_map["x"] = PrepareInput(i);
115+
// init output data
116+
std::map<std::string, paddle::test::Record> infer_output_data,
117+
truth_output_data;
118+
// get groudtruth by disbale ir
119+
SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
120+
&truth_output_data, 1);
121+
// get infer results
122+
SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
123+
&infer_output_data);
124+
// check outputs
125+
CompareRecord(&truth_output_data, &infer_output_data);
126+
}
127+
std::cout << "finish test" << std::endl;
128+
}
129+
130+
TEST(test_LeViT, multi_thread4_trt_fp32_bz2) {
131+
int thread_num = 4;
132+
// init input data
133+
std::map<std::string, paddle::test::Record> my_input_data_map;
134+
my_input_data_map["x"] = PrepareInput(2);
135+
// init output data
136+
std::map<std::string, paddle::test::Record> infer_output_data,
137+
truth_output_data;
138+
// prepare groudtruth config
139+
paddle_infer::Config config, config_no_ir;
140+
config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
141+
FLAGS_modeldir + "/inference.pdiparams");
142+
config_no_ir.SwitchIrOptim(false);
143+
// prepare inference config
144+
config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
145+
FLAGS_modeldir + "/inference.pdiparams");
146+
config.EnableUseGpu(100, 0);
147+
config.EnableTensorRtEngine(
148+
1 << 20, 2, 6, paddle_infer::PrecisionType::kFloat32, false, false);
149+
// get groudtruth by disbale ir
150+
paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
151+
SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
152+
&truth_output_data, 1);
153+
154+
// get infer results from multi threads
155+
std::vector<std::thread> threads;
156+
services::PredictorPool pred_pool(config, thread_num);
157+
for (int i = 0; i < thread_num; ++i) {
158+
threads.emplace_back(paddle::test::SingleThreadPrediction,
159+
pred_pool.Retrive(i), &my_input_data_map,
160+
&infer_output_data, 2);
161+
}
162+
163+
// thread join & check outputs
164+
for (int i = 0; i < thread_num; ++i) {
165+
LOG(INFO) << "join tid : " << i;
166+
threads[i].join();
167+
CompareRecord(&truth_output_data, &infer_output_data);
168+
}
169+
170+
std::cout << "finish multi-thread test" << std::endl;
171+
}
172+
173+
} // namespace paddle_infer
174+
175+
int main(int argc, char** argv) {
176+
::testing::InitGoogleTest(&argc, argv);
177+
::google::ParseCommandLineFlags(&argc, &argv, true);
178+
return RUN_ALL_TESTS();
179+
}

0 commit comments

Comments
 (0)