|
| 1 | +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. |
| 2 | +
|
| 3 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +you may not use this file except in compliance with the License. |
| 5 | +You may obtain a copy of the License at |
| 6 | +
|
| 7 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +
|
| 9 | +Unless required by applicable law or agreed to in writing, software |
| 10 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +See the License for the specific language governing permissions and |
| 13 | +limitations under the License. */ |
| 14 | + |
| 15 | +#include <time.h> |
| 16 | +#include <iostream> |
| 17 | +#include "gflags/gflags.h" |
| 18 | +#include "paddle/inference/inference.h" |
| 19 | + |
| 20 | +DEFINE_string(dirname, "", "Directory of the inference model."); |
| 21 | +DEFINE_string(feed_var_names, "", "Names of feeding variables"); |
| 22 | +DEFINE_string(fetch_var_names, "", "Names of fetching variables"); |
| 23 | + |
| 24 | +int main(int argc, char** argv) { |
| 25 | + google::ParseCommandLineFlags(&argc, &argv, true); |
| 26 | + if (FLAGS_dirname.empty() || FLAGS_feed_var_names.empty() || |
| 27 | + FLAGS_fetch_var_names.empty()) { |
| 28 | + // Example: |
| 29 | + // ./example --dirname=recognize_digits_mlp.inference.model |
| 30 | + // --feed_var_names="x" |
| 31 | + // --fetch_var_names="fc_2.tmp_2" |
| 32 | + std::cout << "Usage: ./example --dirname=path/to/your/model " |
| 33 | + "--feed_var_names=x --fetch_var_names=y" |
| 34 | + << std::endl; |
| 35 | + exit(1); |
| 36 | + } |
| 37 | + |
| 38 | + std::cout << "FLAGS_dirname: " << FLAGS_dirname << std::endl; |
| 39 | + std::cout << "FLAGS_feed_var_names: " << FLAGS_feed_var_names << std::endl; |
| 40 | + std::cout << "FLAGS_fetch_var_names: " << FLAGS_fetch_var_names << std::endl; |
| 41 | + |
| 42 | + std::string dirname = FLAGS_dirname; |
| 43 | + std::vector<std::string> feed_var_names = {FLAGS_feed_var_names}; |
| 44 | + std::vector<std::string> fetch_var_names = {FLAGS_fetch_var_names}; |
| 45 | + |
| 46 | + paddle::InferenceEngine* engine = new paddle::InferenceEngine(); |
| 47 | + engine->LoadInferenceModel(dirname, feed_var_names, fetch_var_names); |
| 48 | + |
| 49 | + paddle::framework::LoDTensor input; |
| 50 | + srand(time(0)); |
| 51 | + float* input_ptr = |
| 52 | + input.mutable_data<float>({1, 784}, paddle::platform::CPUPlace()); |
| 53 | + for (int i = 0; i < 784; ++i) { |
| 54 | + input_ptr[i] = rand() / (static_cast<float>(RAND_MAX)); |
| 55 | + } |
| 56 | + |
| 57 | + std::vector<paddle::framework::LoDTensor> feeds; |
| 58 | + feeds.push_back(input); |
| 59 | + std::vector<paddle::framework::LoDTensor> fetchs; |
| 60 | + engine->Execute(feeds, fetchs); |
| 61 | + |
| 62 | + for (size_t i = 0; i < fetchs.size(); ++i) { |
| 63 | + auto dims_i = fetchs[i].dims(); |
| 64 | + std::cout << "dims_i:"; |
| 65 | + for (int j = 0; j < dims_i.size(); ++j) { |
| 66 | + std::cout << " " << dims_i[j]; |
| 67 | + } |
| 68 | + std::cout << std::endl; |
| 69 | + std::cout << "result:"; |
| 70 | + float* output_ptr = fetchs[i].data<float>(); |
| 71 | + for (int j = 0; j < paddle::framework::product(dims_i); ++j) { |
| 72 | + std::cout << " " << output_ptr[j]; |
| 73 | + } |
| 74 | + std::cout << std::endl; |
| 75 | + } |
| 76 | + |
| 77 | + delete engine; |
| 78 | + return 0; |
| 79 | +} |
0 commit comments