Skip to content

Commit 1185a1b

Browse files
Xrekikexinzhao
authored andcommitted
Add C++ inference unittest of recommender system (#8227)
* Save the inference model in Python example of recommender_system. * Add infer() in Python unittest recommender_system. * Add C++ inference unittest of recommender_system.
1 parent b257ca9 commit 1185a1b

10 files changed

+266
-50
lines changed

paddle/inference/tests/book/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,4 @@ inference_test(recognize_digits ARGS mlp)
2828
inference_test(image_classification ARGS vgg resnet)
2929
inference_test(label_semantic_roles)
3030
inference_test(rnn_encoder_decoder)
31+
inference_test(recommender_system)

paddle/inference/tests/book/test_helper.h

Lines changed: 28 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,34 @@ void SetupTensor(paddle::framework::LoDTensor& input,
3030
}
3131
}
3232

33+
template <typename T>
34+
void SetupTensor(paddle::framework::LoDTensor& input,
35+
paddle::framework::DDim dims,
36+
std::vector<T>& data) {
37+
CHECK_EQ(paddle::framework::product(dims), data.size());
38+
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
39+
memcpy(input_ptr, data.data(), input.numel() * sizeof(T));
40+
}
41+
3342
template <typename T>
3443
void SetupLoDTensor(paddle::framework::LoDTensor& input,
3544
paddle::framework::LoD& lod,
3645
T lower,
3746
T upper) {
3847
input.set_lod(lod);
3948
int dim = lod[0][lod[0].size() - 1];
40-
SetupTensor(input, {dim, 1}, lower, upper);
49+
SetupTensor<T>(input, {dim, 1}, lower, upper);
50+
}
51+
52+
template <typename T>
53+
void SetupLoDTensor(paddle::framework::LoDTensor& input,
54+
paddle::framework::DDim dims,
55+
paddle::framework::LoD lod,
56+
std::vector<T>& data) {
57+
const size_t level = lod.size() - 1;
58+
CHECK_EQ(dims[0], (lod[level]).back());
59+
input.set_lod(lod);
60+
SetupTensor<T>(input, dims, data);
4161
}
4262

4363
template <typename T>
@@ -67,26 +87,29 @@ void CheckError(paddle::framework::LoDTensor& output1,
6787
EXPECT_EQ(count, 0) << "There are " << count << " different elements.";
6888
}
6989

70-
template <typename Place, typename T, bool IsCombined = false>
90+
template <typename Place, bool IsCombined = false>
7191
void TestInference(const std::string& dirname,
7292
const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
7393
std::vector<paddle::framework::LoDTensor*>& cpu_fetchs) {
7494
// 1. Define place, executor, scope and inference_program
7595
auto place = Place();
7696
auto executor = paddle::framework::Executor(place);
7797
auto* scope = new paddle::framework::Scope();
78-
std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
7998

80-
// 2. Initialize the inference_program and load all parameters from file
99+
// 2. Initialize the inference_program and load parameters
100+
std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
81101
if (IsCombined) {
82-
// Hard-coding the names for combined params case
102+
// All parameters are saved in a single file.
103+
// Hard-coding the file names of program and parameters in unittest.
104+
// Users are free to specify different filename.
83105
std::string prog_filename = "__model_combined__";
84106
std::string param_filename = "__params_combined__";
85107
inference_program = paddle::inference::Load(executor,
86108
*scope,
87109
dirname + "/" + prog_filename,
88110
dirname + "/" + param_filename);
89111
} else {
112+
// Parameters are saved in separate files sited in the specified `dirname`.
90113
inference_program = paddle::inference::Load(executor, *scope, dirname);
91114
}
92115

paddle/inference/tests/book/test_inference_image_classification.cc

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,15 @@ TEST(inference, image_classification) {
2929
// 0. Call `paddle::framework::InitDevices()` initialize all the devices
3030
// In unittests, this is done in paddle/testing/paddle_gtest_main.cc
3131

32+
int64_t batch_size = 1;
33+
3234
paddle::framework::LoDTensor input;
3335
// Use normilized image pixels as input data,
3436
// which should be in the range [0.0, 1.0].
35-
SetupTensor<float>(
36-
input, {1, 3, 32, 32}, static_cast<float>(0), static_cast<float>(1));
37+
SetupTensor<float>(input,
38+
{batch_size, 3, 32, 32},
39+
static_cast<float>(0),
40+
static_cast<float>(1));
3741
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
3842
cpu_feeds.push_back(&input);
3943

@@ -42,8 +46,7 @@ TEST(inference, image_classification) {
4246
cpu_fetchs1.push_back(&output1);
4347

4448
// Run inference on CPU
45-
TestInference<paddle::platform::CPUPlace, float>(
46-
dirname, cpu_feeds, cpu_fetchs1);
49+
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1);
4750
LOG(INFO) << output1.dims();
4851

4952
#ifdef PADDLE_WITH_CUDA
@@ -52,8 +55,7 @@ TEST(inference, image_classification) {
5255
cpu_fetchs2.push_back(&output2);
5356

5457
// Run inference on CUDA GPU
55-
TestInference<paddle::platform::CUDAPlace, float>(
56-
dirname, cpu_feeds, cpu_fetchs2);
58+
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2);
5759
LOG(INFO) << output2.dims();
5860

5961
CheckError<float>(output1, output2);

paddle/inference/tests/book/test_inference_label_semantic_roles.cc

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,7 @@ TEST(inference, label_semantic_roles) {
5858
cpu_fetchs1.push_back(&output1);
5959

6060
// Run inference on CPU
61-
TestInference<paddle::platform::CPUPlace, float>(
62-
dirname, cpu_feeds, cpu_fetchs1);
61+
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1);
6362
LOG(INFO) << output1.lod();
6463
LOG(INFO) << output1.dims();
6564

@@ -69,8 +68,7 @@ TEST(inference, label_semantic_roles) {
6968
cpu_fetchs2.push_back(&output2);
7069

7170
// Run inference on CUDA GPU
72-
TestInference<paddle::platform::CUDAPlace, float>(
73-
dirname, cpu_feeds, cpu_fetchs2);
71+
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2);
7472
LOG(INFO) << output2.lod();
7573
LOG(INFO) << output2.dims();
7674

paddle/inference/tests/book/test_inference_recognize_digits.cc

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,15 @@ TEST(inference, recognize_digits) {
2929
// 0. Call `paddle::framework::InitDevices()` initialize all the devices
3030
// In unittests, this is done in paddle/testing/paddle_gtest_main.cc
3131

32+
int64_t batch_size = 1;
33+
3234
paddle::framework::LoDTensor input;
3335
// Use normilized image pixels as input data,
3436
// which should be in the range [-1.0, 1.0].
35-
SetupTensor<float>(
36-
input, {1, 28, 28}, static_cast<float>(-1), static_cast<float>(1));
37+
SetupTensor<float>(input,
38+
{batch_size, 1, 28, 28},
39+
static_cast<float>(-1),
40+
static_cast<float>(1));
3741
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
3842
cpu_feeds.push_back(&input);
3943

@@ -42,8 +46,7 @@ TEST(inference, recognize_digits) {
4246
cpu_fetchs1.push_back(&output1);
4347

4448
// Run inference on CPU
45-
TestInference<paddle::platform::CPUPlace, float>(
46-
dirname, cpu_feeds, cpu_fetchs1);
49+
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1);
4750
LOG(INFO) << output1.dims();
4851

4952
#ifdef PADDLE_WITH_CUDA
@@ -52,8 +55,7 @@ TEST(inference, recognize_digits) {
5255
cpu_fetchs2.push_back(&output2);
5356

5457
// Run inference on CUDA GPU
55-
TestInference<paddle::platform::CUDAPlace, float>(
56-
dirname, cpu_feeds, cpu_fetchs2);
58+
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2);
5759
LOG(INFO) << output2.dims();
5860

5961
CheckError<float>(output1, output2);
@@ -84,7 +86,7 @@ TEST(inference, recognize_digits_combine) {
8486
cpu_fetchs1.push_back(&output1);
8587

8688
// Run inference on CPU
87-
TestInference<paddle::platform::CPUPlace, float, true>(
89+
TestInference<paddle::platform::CPUPlace, true>(
8890
dirname, cpu_feeds, cpu_fetchs1);
8991
LOG(INFO) << output1.dims();
9092

@@ -94,7 +96,7 @@ TEST(inference, recognize_digits_combine) {
9496
cpu_fetchs2.push_back(&output2);
9597

9698
// Run inference on CUDA GPU
97-
TestInference<paddle::platform::CUDAPlace, float, true>(
99+
TestInference<paddle::platform::CUDAPlace, true>(
98100
dirname, cpu_feeds, cpu_fetchs2);
99101
LOG(INFO) << output2.dims();
100102

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include <gtest/gtest.h>
16+
#include "gflags/gflags.h"
17+
#include "test_helper.h"
18+
19+
DEFINE_string(dirname, "", "Directory of the inference model.");
20+
21+
TEST(inference, recommender_system) {
22+
if (FLAGS_dirname.empty()) {
23+
LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model";
24+
}
25+
26+
LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl;
27+
std::string dirname = FLAGS_dirname;
28+
29+
// 0. Call `paddle::framework::InitDevices()` initialize all the devices
30+
// In unittests, this is done in paddle/testing/paddle_gtest_main.cc
31+
32+
int64_t batch_size = 1;
33+
34+
paddle::framework::LoDTensor user_id, gender_id, age_id, job_id, movie_id,
35+
category_id, movie_title;
36+
37+
// Use the first data from paddle.dataset.movielens.test() as input
38+
std::vector<int64_t> user_id_data = {1};
39+
SetupTensor<int64_t>(user_id, {batch_size, 1}, user_id_data);
40+
41+
std::vector<int64_t> gender_id_data = {1};
42+
SetupTensor<int64_t>(gender_id, {batch_size, 1}, gender_id_data);
43+
44+
std::vector<int64_t> age_id_data = {0};
45+
SetupTensor<int64_t>(age_id, {batch_size, 1}, age_id_data);
46+
47+
std::vector<int64_t> job_id_data = {10};
48+
SetupTensor<int64_t>(job_id, {batch_size, 1}, job_id_data);
49+
50+
std::vector<int64_t> movie_id_data = {783};
51+
SetupTensor<int64_t>(movie_id, {batch_size, 1}, movie_id_data);
52+
53+
std::vector<int64_t> category_id_data = {10, 8, 9};
54+
SetupLoDTensor<int64_t>(category_id, {3, 1}, {{0, 3}}, category_id_data);
55+
56+
std::vector<int64_t> movie_title_data = {1069, 4140, 2923, 710, 988};
57+
SetupLoDTensor<int64_t>(movie_title, {5, 1}, {{0, 5}}, movie_title_data);
58+
59+
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
60+
cpu_feeds.push_back(&user_id);
61+
cpu_feeds.push_back(&gender_id);
62+
cpu_feeds.push_back(&age_id);
63+
cpu_feeds.push_back(&job_id);
64+
cpu_feeds.push_back(&movie_id);
65+
cpu_feeds.push_back(&category_id);
66+
cpu_feeds.push_back(&movie_title);
67+
68+
paddle::framework::LoDTensor output1;
69+
std::vector<paddle::framework::LoDTensor*> cpu_fetchs1;
70+
cpu_fetchs1.push_back(&output1);
71+
72+
// Run inference on CPU
73+
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1);
74+
LOG(INFO) << output1.dims();
75+
76+
#ifdef PADDLE_WITH_CUDA
77+
paddle::framework::LoDTensor output2;
78+
std::vector<paddle::framework::LoDTensor*> cpu_fetchs2;
79+
cpu_fetchs2.push_back(&output2);
80+
81+
// Run inference on CUDA GPU
82+
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2);
83+
LOG(INFO) << output2.dims();
84+
85+
CheckError<float>(output1, output2);
86+
#endif
87+
}

paddle/inference/tests/book/test_inference_rnn_encoder_decoder.cc

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,7 @@ TEST(inference, rnn_encoder_decoder) {
4646
cpu_fetchs1.push_back(&output1);
4747

4848
// Run inference on CPU
49-
TestInference<paddle::platform::CPUPlace, float>(
50-
dirname, cpu_feeds, cpu_fetchs1);
49+
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1);
5150
LOG(INFO) << output1.lod();
5251
LOG(INFO) << output1.dims();
5352

@@ -57,8 +56,7 @@ TEST(inference, rnn_encoder_decoder) {
5756
cpu_fetchs2.push_back(&output2);
5857

5958
// Run inference on CUDA GPU
60-
TestInference<paddle::platform::CUDAPlace, float>(
61-
dirname, cpu_feeds, cpu_fetchs2);
59+
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2);
6260
LOG(INFO) << output2.lod();
6361
LOG(INFO) << output2.dims();
6462

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
recognize_digits_*.inference.model
1+
*.inference.model

python/paddle/v2/fluid/tests/book/test_recognize_digits.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,8 +174,9 @@ def infer(use_cuda, save_dirname=None, param_filename=None):
174174

175175
# The input's dimension of conv should be 4-D or 5-D.
176176
# Use normilized image pixels as input data, which should be in the range [-1.0, 1.0].
177+
batch_size = 1
177178
tensor_img = numpy.random.uniform(-1.0, 1.0,
178-
[1, 1, 28, 28]).astype("float32")
179+
[batch_size, 1, 28, 28]).astype("float32")
179180

180181
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
181182
# and results will contain a list of data corresponding to fetch_targets.

0 commit comments

Comments
 (0)