From fe240d96db123521bd7ad6a74c5555f8bf0bd18d Mon Sep 17 00:00:00 2001 From: Wojciech Uss Date: Tue, 2 Apr 2019 15:11:57 +0200 Subject: [PATCH 01/27] fix dataset reading and add support for full dataset (#16559) --- paddle/fluid/inference/api/helper.h | 21 +-- .../fluid/inference/tests/api/CMakeLists.txt | 14 +- .../tests/api/analyzer_bert_tester.cc | 2 +- .../tests/api/analyzer_dam_tester.cc | 8 +- ...alyzer_int8_image_classification_tester.cc | 53 ++++--- .../tests/api/analyzer_lac_tester.cc | 10 +- .../tests/api/analyzer_mm_dnn_tester.cc | 7 +- .../tests/api/analyzer_ner_tester.cc | 10 +- .../tests/api/analyzer_pyramid_dnn_tester.cc | 10 +- .../tests/api/analyzer_resnet50_tester.cc | 2 +- .../tests/api/analyzer_rnn1_tester.cc | 4 +- .../tests/api/analyzer_rnn2_tester.cc | 8 +- .../tests/api/analyzer_seq_conv1_tester.cc | 10 +- .../tests/api/analyzer_seq_pool1_tester.cc | 2 +- .../analyzer_text_classification_tester.cc | 7 +- .../tests/api/analyzer_transformer_tester.cc | 2 +- .../tests/api/analyzer_vis_tester.cc | 5 +- .../fluid/inference/tests/api/tester_helper.h | 147 +++++++++++------- .../inference/tests/api/trt_models_tester.cc | 2 +- 19 files changed, 193 insertions(+), 131 deletions(-) diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index 258a79fa4e8841..c89dd41e0a6283 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -27,6 +27,7 @@ #include #include #include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/port.h" #include "paddle/fluid/string/printf.h" @@ -266,17 +267,17 @@ static std::string DescribeZeroCopyTensor(const ZeroCopyTensor &tensor) { } static void PrintTime(int batch_size, int repeat, int num_threads, int tid, - double latency, int epoch = 1) { - LOG(INFO) << "====== batch_size: " << batch_size << ", repeat: " << repeat - << ", threads: " << num_threads << ", thread id: " << tid - << ", latency: " << latency << "ms, fps: " << 1 / (latency / 1000.f) + double batch_latency, int epoch = 1) { + PADDLE_ENFORCE(batch_size > 0, "Non-positive batch size."); + double sample_latency = batch_latency / batch_size; + LOG(INFO) << "====== threads: " << num_threads << ", thread id: " << tid << " ======"; - if (epoch > 1) { - int samples = batch_size * epoch; - LOG(INFO) << "====== sample number: " << samples - << ", average latency of each sample: " << latency / samples - << "ms ======"; - } + LOG(INFO) << "====== batch_size: " << batch_size << ", iterations: " << epoch + << ", repetitions: " << repeat << " ======"; + LOG(INFO) << "====== batch latency: " << batch_latency + << "ms, number of samples: " << batch_size * epoch + << ", sample latency: " << sample_latency + << "ms, fps: " << 1000.f / sample_latency << " ======"; } static bool IsFileExists(const std::string &path) { diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 6a31185b097bc0..d3d278822bacee 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -26,7 +26,11 @@ endfunction() function(inference_analysis_api_int8_test target model_dir data_dir filename) inference_analysis_test(${target} SRCS ${filename} EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} benchmark - ARGS --infer_model=${model_dir}/model --infer_data=${data_dir}/data.bin --batch_size=100) + ARGS --infer_model=${model_dir}/model + --infer_data=${data_dir}/data.bin + --warmup_batch_size=100 + --batch_size=50 + --iterations=2) endfunction() function(inference_analysis_api_test_with_fake_data target install_dir filename model_name) @@ -146,22 +150,22 @@ inference_analysis_api_test_with_fake_data(test_analyzer_mobilenet_depthwise_con # int8 image classification tests if(WITH_MKLDNN) - set(INT8_DATA_DIR "${INFERENCE_DEMO_INSTALL_DIR}/int8") + set(INT8_DATA_DIR "${INFERENCE_DEMO_INSTALL_DIR}/int8v2") if (NOT EXISTS ${INT8_DATA_DIR}) - inference_download_and_uncompress(${INT8_DATA_DIR} "https://paddle-inference-dist.bj.bcebos.com/int8" "imagenet_val_100.tar.gz") + inference_download_and_uncompress(${INT8_DATA_DIR} "${INFERENCE_URL}/int8" "imagenet_val_100_tail.tar.gz") endif() #resnet50 int8 set(INT8_RESNET50_MODEL_DIR "${INT8_DATA_DIR}/resnet50") if (NOT EXISTS ${INT8_RESNET50_MODEL_DIR}) - inference_download_and_uncompress(${INT8_RESNET50_MODEL_DIR} "https://paddle-inference-dist.bj.bcebos.com/int8" "resnet50_int8_model.tar.gz" ) + inference_download_and_uncompress(${INT8_RESNET50_MODEL_DIR} "${INFERENCE_URL}/int8" "resnet50_int8_model.tar.gz" ) endif() inference_analysis_api_int8_test(test_analyzer_int8_resnet50 ${INT8_RESNET50_MODEL_DIR} ${INT8_DATA_DIR} analyzer_int8_image_classification_tester.cc SERIAL) #mobilenet int8 set(INT8_MOBILENET_MODEL_DIR "${INT8_DATA_DIR}/mobilenet") if (NOT EXISTS ${INT8_MOBILENET_MODEL_DIR}) - inference_download_and_uncompress(${INT8_MOBILENET_MODEL_DIR} "https://paddle-inference-dist.bj.bcebos.com/int8" "mobilenetv1_int8_model.tar.gz" ) + inference_download_and_uncompress(${INT8_MOBILENET_MODEL_DIR} "${INFERENCE_URL}/int8" "mobilenetv1_int8_model.tar.gz" ) endif() inference_analysis_api_int8_test(test_analyzer_int8_mobilenet ${INT8_MOBILENET_MODEL_DIR} ${INT8_DATA_DIR} analyzer_int8_image_classification_tester.cc SERIAL) endif() diff --git a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc index e73358d8827a40..9b2e74ec16eb3b 100644 --- a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc @@ -154,7 +154,7 @@ void profile(bool use_mkldnn = false) { config.EnableMKLDNN(); } - std::vector outputs; + std::vector> outputs; std::vector> inputs; LoadInputData(&inputs); TestPrediction(reinterpret_cast(&config), diff --git a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc index 735e4fb5637884..e10d239a5d1b30 100644 --- a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc @@ -197,7 +197,7 @@ void profile(bool use_mkldnn = false) { cfg.SetMKLDNNOp(op_list); } - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -206,9 +206,11 @@ void profile(bool use_mkldnn = false) { if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { PADDLE_ENFORCE_GT(outputs.size(), 0); - size_t size = GetSize(outputs[0]); + auto output = outputs.back(); + PADDLE_ENFORCE_GT(output.size(), 0); + size_t size = GetSize(output[0]); PADDLE_ENFORCE_GT(size, 0); - float *result = static_cast(outputs[0].data.data()); + float *result = static_cast(output[0].data.data()); for (size_t i = 0; i < size; i++) { EXPECT_NEAR(result[i], result_data[i], 1e-3); } diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc index 5a4f9a31a164a8..ece094717b8076 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc @@ -17,8 +17,6 @@ limitations under the License. */ #include "paddle/fluid/inference/api/paddle_analysis_config.h" #include "paddle/fluid/inference/tests/api/tester_helper.h" -DEFINE_int32(iterations, 0, "Number of iterations"); - namespace paddle { namespace inference { namespace analysis { @@ -30,8 +28,13 @@ void SetConfig(AnalysisConfig *cfg) { cfg->SwitchIrOptim(); cfg->SwitchSpecifyInputNames(false); cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); - cfg->EnableMKLDNN(); + cfg->pass_builder()->SetPasses( + {"infer_clean_graph_pass", "mkldnn_placement_pass", + "depthwise_conv_mkldnn_pass", "conv_bn_fuse_pass", + "conv_eltwiseadd_bn_fuse_pass", "conv_bias_mkldnn_fuse_pass", + "conv_elementwise_add_mkldnn_fuse_pass", "conv_relu_mkldnn_fuse_pass", + "fc_fuse_pass", "is_test_pass"}); } template @@ -40,8 +43,8 @@ class TensorReader { TensorReader(std::ifstream &file, size_t beginning_offset, std::vector shape, std::string name) : file_(file), position(beginning_offset), shape_(shape), name_(name) { - numel = - std::accumulate(shape_.begin(), shape_.end(), 1, std::multiplies()); + numel = std::accumulate(shape_.begin(), shape_.end(), size_t{1}, + std::multiplies()); } PaddleTensor NextBatch() { @@ -71,10 +74,14 @@ class TensorReader { }; std::shared_ptr> GetWarmupData( - const std::vector> &test_data, int num_images) { + const std::vector> &test_data, + int num_images = FLAGS_warmup_batch_size) { int test_data_batch_size = test_data[0][0].shape[0]; - CHECK_LE(static_cast(num_images), - test_data.size() * test_data_batch_size); + auto iterations_max = test_data.size(); + PADDLE_ENFORCE( + static_cast(num_images) <= iterations_max * test_data_batch_size, + "The requested quantization warmup data size " + + std::to_string(num_images) + " is bigger than all test data size."); PaddleTensor images; images.name = "input"; @@ -120,20 +127,17 @@ void SetInput(std::vector> *inputs, std::vector image_batch_shape{batch_size, 3, 224, 224}; std::vector label_batch_shape{batch_size, 1}; + auto images_offset_in_file = static_cast(file.tellg()); auto labels_offset_in_file = - static_cast(file.tellg()) + - sizeof(float) * total_images * - std::accumulate(image_batch_shape.begin() + 1, - image_batch_shape.end(), 1, std::multiplies()); + images_offset_in_file + sizeof(float) * total_images * 3 * 224 * 224; - TensorReader image_reader(file, 0, image_batch_shape, "input"); + TensorReader image_reader(file, images_offset_in_file, + image_batch_shape, "input"); TensorReader label_reader(file, labels_offset_in_file, label_batch_shape, "label"); - auto iterations = total_images / batch_size; - if (FLAGS_iterations > 0 && FLAGS_iterations < iterations) - iterations = FLAGS_iterations; - for (auto i = 0; i < iterations; i++) { + auto iterations_max = total_images / batch_size; + for (auto i = 0; i < iterations_max; i++) { auto images = image_reader.NextBatch(); auto labels = label_reader.NextBatch(); inputs->emplace_back( @@ -148,20 +152,21 @@ TEST(Analyzer_int8_resnet50, quantization) { AnalysisConfig q_cfg; SetConfig(&q_cfg); + // read data from file and prepare batches with test data std::vector> input_slots_all; - SetInput(&input_slots_all, 100); + SetInput(&input_slots_all); + // prepare warmup batch from input data read earlier + // warmup batch size can be different than batch size std::shared_ptr> warmup_data = - GetWarmupData(input_slots_all, 100); + GetWarmupData(input_slots_all); + // configure quantizer q_cfg.EnableMkldnnQuantizer(); q_cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data); - q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(100); + q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(FLAGS_warmup_batch_size); - CompareQuantizedAndAnalysis( - reinterpret_cast(&cfg), - reinterpret_cast(&q_cfg), - input_slots_all); + CompareQuantizedAndAnalysis(&cfg, &q_cfg, input_slots_all); } } // namespace analysis diff --git a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc index 347672eaae314a..142905dcd8d996 100644 --- a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc @@ -124,7 +124,7 @@ void SetInput(std::vector> *inputs) { TEST(Analyzer_LAC, profile) { AnalysisConfig cfg; SetConfig(&cfg); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -137,11 +137,13 @@ TEST(Analyzer_LAC, profile) { 24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25, 25, 25, 25, 25, 44, 24, 25, 25, 25, 36, 42, 43, 44, 14, 15, 44, 14, 15, 44, 14, 15, 44, 38, 39, 14, 15, 44, 22, 23, 23, 23, 23, 23, 23, 23}; - PADDLE_ENFORCE_EQ(outputs.size(), 1UL); - size_t size = GetSize(outputs[0]); + PADDLE_ENFORCE_GT(outputs.size(), 0); + auto output = outputs.back(); + PADDLE_ENFORCE_EQ(output.size(), 1UL); + size_t size = GetSize(output[0]); size_t batch1_size = sizeof(lac_ref_data) / sizeof(int64_t); PADDLE_ENFORCE_GE(size, batch1_size); - int64_t *pdata = static_cast(outputs[0].data.data()); + int64_t *pdata = static_cast(output[0].data.data()); for (size_t i = 0; i < batch1_size; ++i) { EXPECT_EQ(pdata[i], lac_ref_data[i]); } diff --git a/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc index 089f655c180d78..2eb347a44b394a 100644 --- a/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc @@ -96,7 +96,7 @@ void SetInput(std::vector> *inputs) { void profile(bool use_mkldnn = false) { AnalysisConfig cfg; SetConfig(&cfg); - std::vector outputs; + std::vector> outputs; if (use_mkldnn) { cfg.EnableMKLDNN(); @@ -108,8 +108,9 @@ void profile(bool use_mkldnn = false) { input_slots_all, &outputs, FLAGS_num_threads); if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { - PADDLE_ENFORCE_EQ(outputs.size(), 2UL); - for (auto &output : outputs) { + PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_EQ(outputs.back().size(), 2UL); + for (auto &output : outputs.back()) { size_t size = GetSize(output); PADDLE_ENFORCE_GT(size, 0); float *result = static_cast(output.data.data()); diff --git a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc index a70aa7a6ac4112..36e07d5f55600d 100644 --- a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc @@ -106,7 +106,7 @@ void SetInput(std::vector> *inputs) { void profile(bool memory_load = false) { AnalysisConfig cfg; SetConfig(&cfg, memory_load); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -117,10 +117,12 @@ void profile(bool memory_load = false) { // the first inference result const int chinese_ner_result_data[] = {30, 45, 41, 48, 17, 26, 48, 39, 38, 16, 25}; - PADDLE_ENFORCE_EQ(outputs.size(), 1UL); - size_t size = GetSize(outputs[0]); + PADDLE_ENFORCE_GT(outputs.size(), 0); + auto output = outputs.back(); + PADDLE_ENFORCE_EQ(output.size(), 1UL); + size_t size = GetSize(output[0]); PADDLE_ENFORCE_GT(size, 0); - int64_t *result = static_cast(outputs[0].data.data()); + int64_t *result = static_cast(output[0].data.data()); for (size_t i = 0; i < std::min(11UL, size); i++) { EXPECT_EQ(result[i], chinese_ner_result_data[i]); } diff --git a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc index 5157bd280d0f3e..9443b08063b8f6 100644 --- a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc @@ -127,7 +127,7 @@ void SetInput(std::vector> *inputs) { TEST(Analyzer_Pyramid_DNN, profile) { AnalysisConfig cfg; SetConfig(&cfg); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -135,10 +135,12 @@ TEST(Analyzer_Pyramid_DNN, profile) { input_slots_all, &outputs, FLAGS_num_threads); if (FLAGS_num_threads == 1 && !FLAGS_test_all_data && !FLAGS_zero_copy) { - PADDLE_ENFORCE_EQ(outputs.size(), 1UL); - size_t size = GetSize(outputs[0]); + PADDLE_ENFORCE_GT(outputs.size(), 0); + auto output = outputs.back(); + PADDLE_ENFORCE_EQ(output.size(), 1UL); + size_t size = GetSize(output[0]); PADDLE_ENFORCE_GT(size, 0); - float *result = static_cast(outputs[0].data.data()); + float *result = static_cast(output[0].data.data()); // output is probability, which is in (0, 1). for (size_t i = 0; i < size; i++) { EXPECT_GT(result[i], 0); diff --git a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc index 629981d565f1b6..d4330e6cddf881 100644 --- a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc @@ -40,7 +40,7 @@ void profile(bool use_mkldnn = false) { if (use_mkldnn) { cfg.EnableMKLDNN(); } - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index dcf4b38ce8a923..54fd3a4a4caba5 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -229,7 +229,7 @@ TEST(Analyzer_rnn1, profile) { SetConfig(&cfg); cfg.DisableGpu(); cfg.SwitchIrDebug(); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -280,7 +280,7 @@ TEST(Analyzer_rnn1, compare_determine) { TEST(Analyzer_rnn1, multi_thread) { AnalysisConfig cfg; SetConfig(&cfg); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn2_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn2_tester.cc index 007f9f0b66a7b2..9ccbf58cbd2bba 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn2_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn2_tester.cc @@ -126,7 +126,7 @@ void SetInput(std::vector> *inputs) { TEST(Analyzer_rnn2, profile) { AnalysisConfig cfg; SetConfig(&cfg); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -136,9 +136,11 @@ TEST(Analyzer_rnn2, profile) { if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { // the first inference result PADDLE_ENFORCE_GT(outputs.size(), 0); - size_t size = GetSize(outputs[0]); + auto output = outputs.back(); + PADDLE_ENFORCE_GT(output.size(), 0); + size_t size = GetSize(output[0]); PADDLE_ENFORCE_GT(size, 0); - float *result = static_cast(outputs[0].data.data()); + float *result = static_cast(output[0].data.data()); for (size_t i = 0; i < size; i++) { EXPECT_NEAR(result[i], result_data[i], 1e-3); } diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc index 47c1d7375843e4..9f23b9f037bcae 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc @@ -110,7 +110,7 @@ void SetInput(std::vector> *inputs) { TEST(Analyzer_seq_conv1, profile) { AnalysisConfig cfg; SetConfig(&cfg); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -119,10 +119,12 @@ TEST(Analyzer_seq_conv1, profile) { if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { // the first inference result - PADDLE_ENFORCE_EQ(outputs.size(), 1UL); - size_t size = GetSize(outputs[0]); + PADDLE_ENFORCE_GT(outputs.size(), 0); + auto output = outputs.back(); + PADDLE_ENFORCE_EQ(output.size(), 1UL); + size_t size = GetSize(output[0]); PADDLE_ENFORCE_GT(size, 0); - float *result = static_cast(outputs[0].data.data()); + float *result = static_cast(output[0].data.data()); // output is probability, which is in (0, 1). for (size_t i = 0; i < size; i++) { EXPECT_GT(result[i], 0); diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc index 19fa5528da4d11..d6f7f468a6c83b 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc @@ -156,7 +156,7 @@ void profile(bool use_mkldnn = false) { AnalysisConfig cfg; SetConfig(&cfg, use_mkldnn); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); TestPrediction(reinterpret_cast(&cfg), diff --git a/paddle/fluid/inference/tests/api/analyzer_text_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_text_classification_tester.cc index 2003be82019333..54492dbc238bba 100644 --- a/paddle/fluid/inference/tests/api/analyzer_text_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_text_classification_tester.cc @@ -70,7 +70,7 @@ TEST(Analyzer_Text_Classification, profile) { AnalysisConfig cfg; SetConfig(&cfg); cfg.SwitchIrDebug(); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -79,8 +79,9 @@ TEST(Analyzer_Text_Classification, profile) { if (FLAGS_num_threads == 1) { // Get output - LOG(INFO) << "get outputs " << outputs.size(); - for (auto &output : outputs) { + PADDLE_ENFORCE_GT(outputs.size(), 0); + LOG(INFO) << "get outputs " << outputs.back().size(); + for (auto &output : outputs.back()) { LOG(INFO) << "output.shape: " << to_string(output.shape); // no lod ? CHECK_EQ(output.lod.size(), 0UL); diff --git a/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc b/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc index a925da312cde30..bd4f1b61973fb0 100644 --- a/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc @@ -186,7 +186,7 @@ void SetInput(std::vector> *inputs) { void profile(bool use_mkldnn = false) { AnalysisConfig cfg; SetConfig(&cfg); - std::vector outputs; + std::vector> outputs; if (use_mkldnn) { cfg.EnableMKLDNN(); } diff --git a/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc b/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc index ca04c1365cbbff..fb47048cd0ccc8 100644 --- a/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc @@ -87,7 +87,7 @@ void profile(bool use_mkldnn = false) { cfg.EnableMKLDNN(); } // cfg.pass_builder()->TurnOnDebug(); - std::vector outputs; + std::vector> outputs; std::vector> input_slots_all; SetInput(&input_slots_all); @@ -100,7 +100,8 @@ void profile(bool use_mkldnn = false) { auto refer = ProcessALine(line); file.close(); - auto &output = outputs.front(); + PADDLE_ENFORCE_GT(outputs.size(), 0); + auto &output = outputs.back().front(); size_t numel = output.data.length() / PaddleDtypeSize(output.dtype); CHECK_EQ(numel, refer.data.size()); for (size_t i = 0; i < numel; ++i) { diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 33f1d025485881..9a0dcc722cf009 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -41,7 +41,10 @@ DEFINE_string(model_name, "", "model name"); DEFINE_string(infer_model, "", "model path"); DEFINE_string(infer_data, "", "data file"); DEFINE_string(refer_result, "", "reference result for comparison"); -DEFINE_int32(batch_size, 1, "batch size."); +DEFINE_int32(batch_size, 1, "batch size"); +DEFINE_int32(warmup_batch_size, 100, "batch size for quantization warmup"); +// setting iterations to 0 means processing the whole dataset +DEFINE_int32(iterations, 0, "number of batches to process"); DEFINE_int32(repeat, 1, "Running the inference program repeat times."); DEFINE_bool(test_all_data, false, "Test the all dataset in data file."); DEFINE_int32(num_threads, 1, "Running the inference program in multi-threads."); @@ -239,7 +242,7 @@ void SetFakeImageInput(std::vector> *inputs, } input.shape = shape; input.dtype = PaddleDType::FLOAT32; - size_t len = std::accumulate(shape.begin(), shape.end(), 1, + size_t len = std::accumulate(shape.begin(), shape.end(), size_t{1}, [](int a, int b) { return a * b; }); input.data.Resize(len * sizeof(float)); input.lod.assign({{0, static_cast(FLAGS_batch_size)}}); @@ -286,17 +289,18 @@ void ConvertPaddleTensorToZeroCopyTensor( void PredictionWarmUp(PaddlePredictor *predictor, const std::vector> &inputs, - std::vector *outputs, int num_threads, - int tid) { + std::vector> *outputs, + int num_threads, int tid) { int batch_size = FLAGS_batch_size; LOG(INFO) << "Running thread " << tid << ", warm up run..."; if (FLAGS_zero_copy) { ConvertPaddleTensorToZeroCopyTensor(predictor, inputs[0]); } + outputs->resize(1); Timer warmup_timer; warmup_timer.tic(); if (!FLAGS_zero_copy) { - predictor->Run(inputs[0], outputs, batch_size); + predictor->Run(inputs[0], &(*outputs)[0], batch_size); } else { predictor->ZeroCopyRun(); } @@ -308,11 +312,16 @@ void PredictionWarmUp(PaddlePredictor *predictor, void PredictionRun(PaddlePredictor *predictor, const std::vector> &inputs, - std::vector *outputs, int num_threads, - int tid) { - int batch_size = FLAGS_batch_size; + std::vector> *outputs, + int num_threads, int tid) { int num_times = FLAGS_repeat; - LOG(INFO) << "Thread " << tid << " run " << num_times << " times..."; + int iterations = inputs.size(); // process the whole dataset ... + if (FLAGS_iterations > 0 && FLAGS_iterations < inputs.size()) + iterations = + FLAGS_iterations; // ... unless the number of iterations is set + outputs->resize(iterations); + LOG(INFO) << "Thread " << tid << ", number of threads " << num_threads + << ", run " << num_times << " times..."; Timer run_timer; double elapsed_time = 0; #ifdef WITH_GPERFTOOLS @@ -320,14 +329,14 @@ void PredictionRun(PaddlePredictor *predictor, #endif if (!FLAGS_zero_copy) { run_timer.tic(); - for (size_t i = 0; i < inputs.size(); i++) { + for (size_t i = 0; i < iterations; i++) { for (int j = 0; j < num_times; j++) { - predictor->Run(inputs[i], outputs, batch_size); + predictor->Run(inputs[i], &(*outputs)[i], FLAGS_batch_size); } } elapsed_time = run_timer.toc(); } else { - for (size_t i = 0; i < inputs.size(); i++) { + for (size_t i = 0; i < iterations; i++) { ConvertPaddleTensorToZeroCopyTensor(predictor, inputs[i]); run_timer.tic(); for (int j = 0; j < num_times; j++) { @@ -340,13 +349,14 @@ void PredictionRun(PaddlePredictor *predictor, ProfilerStop(); #endif - PrintTime(batch_size, num_times, num_threads, tid, elapsed_time / num_times, - inputs.size()); + auto batch_latency = elapsed_time / (iterations * num_times); + PrintTime(FLAGS_batch_size, num_times, num_threads, tid, batch_latency, + iterations); if (FLAGS_record_benchmark) { Benchmark benchmark; benchmark.SetName(FLAGS_model_name); - benchmark.SetBatchSize(batch_size); - benchmark.SetLatency(elapsed_time / num_times); + benchmark.SetBatchSize(FLAGS_batch_size); + benchmark.SetLatency(batch_latency); benchmark.PersistToFile("benchmark_record.txt"); } } @@ -354,16 +364,17 @@ void PredictionRun(PaddlePredictor *predictor, void TestOneThreadPrediction( const PaddlePredictor::Config *config, const std::vector> &inputs, - std::vector *outputs, bool use_analysis = true) { + std::vector> *outputs, bool use_analysis = true) { auto predictor = CreateTestPredictor(config, use_analysis); - PredictionWarmUp(predictor.get(), inputs, outputs, 1, 0); - PredictionRun(predictor.get(), inputs, outputs, 1, 0); + PredictionWarmUp(predictor.get(), inputs, outputs, FLAGS_paddle_num_threads, + 0); + PredictionRun(predictor.get(), inputs, outputs, FLAGS_paddle_num_threads, 0); } void TestMultiThreadPrediction( const PaddlePredictor::Config *config, const std::vector> &inputs, - std::vector *outputs, int num_threads, + std::vector> *outputs, int num_threads, bool use_analysis = true) { std::vector threads; std::vector> predictors; @@ -376,7 +387,7 @@ void TestMultiThreadPrediction( threads.emplace_back([&, tid]() { // Each thread should have local inputs and outputs. // The inputs of each thread are all the same. - std::vector outputs_tid; + std::vector> outputs_tid; auto &predictor = predictors[tid]; #ifdef PADDLE_WITH_MKLDNN if (use_analysis) { @@ -384,8 +395,8 @@ void TestMultiThreadPrediction( ->SetMkldnnThreadID(static_cast(tid) + 1); } #endif - PredictionWarmUp(predictor.get(), inputs, outputs, num_threads, tid); - PredictionRun(predictor.get(), inputs, outputs, num_threads, tid); + PredictionWarmUp(predictor.get(), inputs, &outputs_tid, num_threads, tid); + PredictionRun(predictor.get(), inputs, &outputs_tid, num_threads, tid); }); } for (int i = 0; i < num_threads; ++i) { @@ -395,8 +406,8 @@ void TestMultiThreadPrediction( void TestPrediction(const PaddlePredictor::Config *config, const std::vector> &inputs, - std::vector *outputs, int num_threads, - bool use_analysis = FLAGS_use_analysis) { + std::vector> *outputs, + int num_threads, bool use_analysis = FLAGS_use_analysis) { PrintConfig(config, use_analysis); if (num_threads == 1) { TestOneThreadPrediction(config, inputs, outputs, use_analysis); @@ -406,30 +417,41 @@ void TestPrediction(const PaddlePredictor::Config *config, } } -void CompareTopAccuracy(const std::vector &output_slots1, - const std::vector &output_slots2) { - // first output: avg_cost - if (output_slots1.size() == 0 || output_slots2.size() == 0) +void CompareTopAccuracy( + const std::vector> &output_slots_quant, + const std::vector> &output_slots_ref) { + if (output_slots_quant.size() == 0 || output_slots_ref.size() == 0) throw std::invalid_argument( "CompareTopAccuracy: output_slots vector is empty."); - PADDLE_ENFORCE(output_slots1.size() >= 2UL); - PADDLE_ENFORCE(output_slots2.size() >= 2UL); - // second output: acc_top1 - if (output_slots1[1].lod.size() > 0 || output_slots2[1].lod.size() > 0) - throw std::invalid_argument( - "CompareTopAccuracy: top1 accuracy output has nonempty LoD."); - if (output_slots1[1].dtype != paddle::PaddleDType::FLOAT32 || - output_slots2[1].dtype != paddle::PaddleDType::FLOAT32) - throw std::invalid_argument( - "CompareTopAccuracy: top1 accuracy output is of a wrong type."); - float *top1_quantized = static_cast(output_slots1[1].data.data()); - float *top1_reference = static_cast(output_slots2[1].data.data()); - LOG(INFO) << "top1 INT8 accuracy: " << *top1_quantized; - LOG(INFO) << "top1 FP32 accuracy: " << *top1_reference; + float total_accs1_quant{0}; + float total_accs1_ref{0}; + for (size_t i = 0; i < output_slots_quant.size(); ++i) { + PADDLE_ENFORCE(output_slots_quant[i].size() >= 2UL); + PADDLE_ENFORCE(output_slots_ref[i].size() >= 2UL); + // second output: acc_top1 + if (output_slots_quant[i][1].lod.size() > 0 || + output_slots_ref[i][1].lod.size() > 0) + throw std::invalid_argument( + "CompareTopAccuracy: top1 accuracy output has nonempty LoD."); + if (output_slots_quant[i][1].dtype != paddle::PaddleDType::FLOAT32 || + output_slots_ref[i][1].dtype != paddle::PaddleDType::FLOAT32) + throw std::invalid_argument( + "CompareTopAccuracy: top1 accuracy output is of a wrong type."); + total_accs1_quant += + *static_cast(output_slots_quant[i][1].data.data()); + total_accs1_ref += + *static_cast(output_slots_ref[i][1].data.data()); + } + float avg_acc1_quant = total_accs1_quant / output_slots_quant.size(); + float avg_acc1_ref = total_accs1_ref / output_slots_ref.size(); + + LOG(INFO) << "Avg top1 INT8 accuracy: " << std::fixed << std::setw(6) + << std::setprecision(4) << avg_acc1_quant; + LOG(INFO) << "Avg top1 FP32 accuracy: " << std::fixed << std::setw(6) + << std::setprecision(4) << avg_acc1_ref; LOG(INFO) << "Accepted accuracy drop threshold: " << FLAGS_quantized_accuracy; - CHECK_LE(std::abs(*top1_quantized - *top1_reference), - FLAGS_quantized_accuracy); + CHECK_LE(std::abs(avg_acc1_quant - avg_acc1_ref), FLAGS_quantized_accuracy); } void CompareDeterministic( @@ -455,20 +477,35 @@ void CompareNativeAndAnalysis( const PaddlePredictor::Config *config, const std::vector> &inputs) { PrintConfig(config, true); - std::vector native_outputs, analysis_outputs; + std::vector> native_outputs, analysis_outputs; TestOneThreadPrediction(config, inputs, &native_outputs, false); TestOneThreadPrediction(config, inputs, &analysis_outputs, true); - CompareResult(analysis_outputs, native_outputs); + PADDLE_ENFORCE(native_outputs.size() > 0, "Native output is empty."); + PADDLE_ENFORCE(analysis_outputs.size() > 0, "Analysis output is empty."); + CompareResult(analysis_outputs.back(), native_outputs.back()); } void CompareQuantizedAndAnalysis( - const PaddlePredictor::Config *config, - const PaddlePredictor::Config *qconfig, + const AnalysisConfig *config, const AnalysisConfig *qconfig, const std::vector> &inputs) { - PrintConfig(config, true); - std::vector analysis_outputs, quantized_outputs; - TestOneThreadPrediction(config, inputs, &analysis_outputs, true); - TestOneThreadPrediction(qconfig, inputs, &quantized_outputs, true); + PADDLE_ENFORCE_EQ(inputs[0][0].shape[0], FLAGS_batch_size, + "Input data has to be packed batch by batch."); + LOG(INFO) << "FP32 & INT8 prediction run: batch_size " << FLAGS_batch_size + << ", warmup batch size " << FLAGS_warmup_batch_size << "."; + + LOG(INFO) << "--- FP32 prediction start ---"; + auto *cfg = reinterpret_cast(config); + PrintConfig(cfg, true); + std::vector> analysis_outputs; + TestOneThreadPrediction(cfg, inputs, &analysis_outputs, true); + + LOG(INFO) << "--- INT8 prediction start ---"; + auto *qcfg = reinterpret_cast(qconfig); + PrintConfig(qcfg, true); + std::vector> quantized_outputs; + TestOneThreadPrediction(qcfg, inputs, &quantized_outputs, true); + + LOG(INFO) << "--- comparing outputs --- "; CompareTopAccuracy(quantized_outputs, analysis_outputs); } @@ -578,9 +615,9 @@ static bool CompareTensorData(const framework::LoDTensor &a, const framework::LoDTensor &b) { auto a_shape = framework::vectorize(a.dims()); auto b_shape = framework::vectorize(b.dims()); - size_t a_size = std::accumulate(a_shape.begin(), a_shape.end(), 1, + size_t a_size = std::accumulate(a_shape.begin(), a_shape.end(), size_t{1}, [](int a, int b) { return a * b; }); - size_t b_size = std::accumulate(b_shape.begin(), b_shape.end(), 1, + size_t b_size = std::accumulate(b_shape.begin(), b_shape.end(), size_t{1}, [](int a, int b) { return a * b; }); if (a_size != b_size) { LOG(ERROR) << string::Sprintf("tensor data size not match, %d != %d", diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_models_tester.cc index cb668a4174134b..98ce225a0476b3 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_models_tester.cc @@ -74,7 +74,7 @@ void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) { SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); } - std::vector outputs; + std::vector> outputs; if (use_analysis || use_tensorrt) { AnalysisConfig config; config.EnableUseGpu(100, 0); From 627ca4a3dd2e5773322cbdc40383dd0d5f5f9a10 Mon Sep 17 00:00:00 2001 From: Wojciech Uss Date: Tue, 2 Apr 2019 16:44:53 +0200 Subject: [PATCH 02/27] fix repeating passes (#16606) --- paddle/fluid/inference/api/analysis_config.cc | 13 ++--- .../inference/api/paddle_pass_builder.cc | 50 ++++++++++++++++--- .../fluid/inference/api/paddle_pass_builder.h | 41 +++------------ 3 files changed, 53 insertions(+), 51 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index aee94e12340597..85910c10e7409a 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -141,7 +141,6 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { void AnalysisConfig::EnableMKLDNN() { #ifdef PADDLE_WITH_MKLDNN - pass_builder()->EnableMKLDNN(); use_mkldnn_ = true; #else LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN"; @@ -234,16 +233,13 @@ void AnalysisConfig::Update() { } if (use_mkldnn_) { +#ifdef PADDLE_WITH_MKLDNN if (!enable_ir_optim_) { LOG(ERROR) << "EnableMKLDNN() only works when IR optimization is enabled."; + } else { + pass_builder()->EnableMKLDNN(); } -#ifdef PADDLE_WITH_MKLDNN - pass_builder()->EnableMKLDNN(); - use_mkldnn_ = true; -#else - LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN"; - use_mkldnn_ = false; #endif } @@ -255,9 +251,6 @@ void AnalysisConfig::Update() { } #ifdef PADDLE_WITH_MKLDNN pass_builder()->EnableMkldnnQuantizer(); -#else - LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer"; - use_mkldnn_quantizer_ = false; #endif } diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index 1d1d39e44096b9..87e02a02caebd9 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -64,10 +64,12 @@ void PaddlePassBuilder::DeletePass(size_t idx) { passes_.erase(std::begin(passes_) + idx); } -void GpuPassStrategy::EnableMKLDNN() { - LOG(ERROR) << "GPU not support MKLDNN yet"; +void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) { + analysis_passes_.push_back(pass); } +void PaddlePassBuilder::ClearPasses() { passes_.clear(); } + // The following passes works for Anakin sub-graph engine. const std::vector kAnakinSubgraphPasses({ "infer_clean_graph_pass", // @@ -102,12 +104,12 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { use_gpu_ = true; } -void GpuPassStrategy::EnableMkldnnQuantizer() { - LOG(ERROR) << "GPU not support MKL-DNN quantization"; +void GpuPassStrategy::EnableMKLDNN() { + LOG(ERROR) << "GPU not support MKLDNN yet"; } -void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) { - analysis_passes_.push_back(pass); +void GpuPassStrategy::EnableMkldnnQuantizer() { + LOG(ERROR) << "GPU not support MKL-DNN quantization"; } CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { @@ -135,5 +137,39 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { }); use_gpu_ = false; } -void PaddlePassBuilder::ClearPasses() { passes_.clear(); } + +void CpuPassStrategy::EnableMKLDNN() { +// TODO(Superjomn) Consider the way to mix CPU with GPU. +#ifdef PADDLE_WITH_MKLDNN + if (!use_mkldnn_) { + passes_.insert(passes_.begin(), "mkldnn_placement_pass"); + + for (auto &pass : std::vector( + {"depthwise_conv_mkldnn_pass", // + "conv_bn_fuse_pass", // Execute BN passes again to + "conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order + "conv_bias_mkldnn_fuse_pass", // + "conv3d_bias_mkldnn_fuse_pass", // + "conv_elementwise_add_mkldnn_fuse_pass", + "conv_relu_mkldnn_fuse_pass"})) { + passes_.push_back(pass); + } + } + use_mkldnn_ = true; +#else + use_mkldnn_ = false; +#endif +} + +void CpuPassStrategy::EnableMkldnnQuantizer() { +#ifdef PADDLE_WITH_MKLDNN + if (!use_mkldnn_quantizer_) { + passes_.push_back("cpu_quantize_placement_pass"); + } + use_mkldnn_quantizer_ = true; +#else + use_mkldnn_quantizer_ = false; +#endif +} + } // namespace paddle diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index 48da8c156f4264..09ef195d5e66af 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -109,43 +109,16 @@ class CpuPassStrategy : public PassStrategy { CpuPassStrategy(); explicit CpuPassStrategy(const CpuPassStrategy &other) - : PassStrategy(other.AllPasses()) {} + : PassStrategy(other.AllPasses()) { + use_gpu_ = other.use_gpu_; + use_mkldnn_ = other.use_mkldnn_; + use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_; + } virtual ~CpuPassStrategy() = default; - void EnableMKLDNN() override { -// TODO(Superjomn) Consider the way to mix CPU with GPU. -#ifdef PADDLE_WITH_MKLDNN - if (!use_mkldnn_) { - passes_.insert(passes_.begin(), "mkldnn_placement_pass"); - - for (auto &pass : std::vector( - {"depthwise_conv_mkldnn_pass", // - "conv_bn_fuse_pass", // Execute BN passes again to - "conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order - "conv_bias_mkldnn_fuse_pass", // - "conv3d_bias_mkldnn_fuse_pass", // - "conv_relu_mkldnn_fuse_pass", // - "conv_elementwise_add_mkldnn_fuse_pass"})) { - passes_.push_back(pass); - } - } - use_mkldnn_ = true; -#else - use_mkldnn_ = false; -#endif - } - - void EnableMkldnnQuantizer() override { -#ifdef PADDLE_WITH_MKLDNN - if (!use_mkldnn_quantizer_) { - passes_.push_back("cpu_quantize_placement_pass"); - } - use_mkldnn_quantizer_ = true; -#else - use_mkldnn_quantizer_ = false; -#endif - } + void EnableMKLDNN() override; + void EnableMkldnnQuantizer() override; protected: bool use_mkldnn_quantizer_{false}; From bbb654e2dfb0b087ff8b20db654eda99fd00802c Mon Sep 17 00:00:00 2001 From: lidanqing Date: Tue, 2 Apr 2019 05:44:23 +0200 Subject: [PATCH 03/27] fix preprocess script with processbar, integrity check and logs (#16608) * fix preprocess script with processbar, integrity check and logs * delete unnecessary empty lines, change function name test=release/1.4 --- .../api/full_ILSVRC2012_val_preprocess.py | 204 +++++++++++------- 1 file changed, 132 insertions(+), 72 deletions(-) diff --git a/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py b/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py index 4d968c83d9c9bf..842865933f2b47 100644 --- a/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py +++ b/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py @@ -1,5 +1,4 @@ # copyright (c) 2019 paddlepaddle authors. all rights reserved. -# # licensed under the apache license, version 2.0 (the "license"); # you may not use this file except in compliance with the license. # you may obtain a copy of the license at @@ -11,6 +10,7 @@ # without warranties or conditions of any kind, either express or implied. # see the license for the specific language governing permissions and # limitations under the license. +import hashlib import unittest import os import numpy as np @@ -21,16 +21,20 @@ import contextlib from PIL import Image, ImageEnhance import math -from paddle.dataset.common import download +from paddle.dataset.common import download, md5file +import tarfile random.seed(0) np.random.seed(0) DATA_DIM = 224 - SIZE_FLOAT32 = 4 SIZE_INT64 = 8 - +FULL_SIZE_BYTES = 30106000008 +FULL_IMAGES = 50000 +DATA_DIR_NAME = 'ILSVRC2012' +IMG_DIR_NAME = 'var' +TARGET_HASH = '8dc592db6dcc8d521e4d5ba9da5ca7d2' img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)) @@ -70,19 +74,9 @@ def process_image(img_path, mode, color_jitter, rotate): return img -def download_unzip(): - int8_download = 'int8/download' - - target_name = 'data' - - cache_folder = os.path.expanduser('~/.cache/paddle/dataset/' + - int8_download) - - target_folder = os.path.join(cache_folder, target_name) - +def download_concat(cache_folder, zip_path): data_urls = [] data_md5s = [] - data_urls.append( 'https://paddle-inference-dist.bj.bcebos.com/int8/ILSVRC2012_img_val.tar.gz.partaa' ) @@ -91,72 +85,138 @@ def download_unzip(): 'https://paddle-inference-dist.bj.bcebos.com/int8/ILSVRC2012_img_val.tar.gz.partab' ) data_md5s.append('1e9f15f64e015e58d6f9ec3210ed18b5') - file_names = [] - + print("Downloading full ImageNet Validation dataset ...") for i in range(0, len(data_urls)): download(data_urls[i], cache_folder, data_md5s[i]) - file_names.append(data_urls[i].split('/')[-1]) - - zip_path = os.path.join(cache_folder, 'full_imagenet_val.tar.gz') - + file_name = os.path.join(cache_folder, data_urls[i].split('/')[-1]) + file_names.append(file_name) + print("Downloaded part {0}\n".format(file_name)) if not os.path.exists(zip_path): - cat_command = 'cat' - for file_name in file_names: - cat_command += ' ' + os.path.join(cache_folder, file_name) - cat_command += ' > ' + zip_path - os.system(cat_command) - print('Data is downloaded at {0}\n').format(zip_path) - - if not os.path.exists(target_folder): - cmd = 'mkdir {0} && tar xf {1} -C {0}'.format(target_folder, zip_path) - os.system(cmd) - print('Data is unzipped at {0}\n'.format(target_folder)) - - data_dir = os.path.join(target_folder, 'ILSVRC2012') - print('ILSVRC2012 full val set at {0}\n'.format(data_dir)) - return data_dir + with open(zip_path, "w+") as outfile: + for fname in file_names: + with open(fname) as infile: + outfile.write(infile.read()) + + +def extract(zip_path, extract_folder): + data_dir = os.path.join(extract_folder, DATA_DIR_NAME) + img_dir = os.path.join(data_dir, IMG_DIR_NAME) + print("Extracting...\n") + + if not (os.path.exists(img_dir) and + len(os.listdir(img_dir)) == FULL_IMAGES): + tar = tarfile.open(zip_path) + tar.extractall(path=extract_folder) + tar.close() + print('Extracted. Full Imagenet Validation dataset is located at {0}\n'. + format(data_dir)) + + +def print_processbar(done, total): + done_filled = done * '=' + empty_filled = (total - done) * ' ' + percentage_done = done * 100 / total + sys.stdout.write("\r[%s%s]%d%%" % + (done_filled, empty_filled, percentage_done)) + sys.stdout.flush() + + +def check_integrity(filename, target_hash): + print('\nThe binary file exists. Checking file integrity...\n') + md = hashlib.md5() + count = 0 + total_parts = 50 + chunk_size = 8192 + onepart = FULL_SIZE_BYTES / chunk_size / total_parts + with open(filename) as ifs: + while True: + buf = ifs.read(8192) + if count % onepart == 0: + done = count / onepart + print_processbar(done, total_parts) + count = count + 1 + if not buf: + break + md.update(buf) + hash1 = md.hexdigest() + if hash1 == target_hash: + return True + else: + return False -def reader(): - data_dir = download_unzip() - file_list = os.path.join(data_dir, 'val_list.txt') - output_file = os.path.join(data_dir, 'int8_full_val.bin') +def convert(file_list, data_dir, output_file): + print('Converting 50000 images to binary file ...\n') with open(file_list) as flist: lines = [line.strip() for line in flist] num_images = len(lines) - if not os.path.exists(output_file): - print( - 'Preprocessing to binary file......\n' - ) - with open(output_file, "w+b") as of: - #save num_images(int64_t) to file - of.seek(0) - num = np.array(int(num_images)).astype('int64') - of.write(num.tobytes()) - for idx, line in enumerate(lines): - img_path, label = line.split() - img_path = os.path.join(data_dir, img_path) - if not os.path.exists(img_path): - continue - - #save image(float32) to file - img = process_image( - img_path, 'val', color_jitter=False, rotate=False) - np_img = np.array(img) - of.seek(SIZE_INT64 + SIZE_FLOAT32 * DATA_DIM * DATA_DIM * 3 - * idx) - of.write(np_img.astype('float32').tobytes()) - - #save label(int64_t) to file - label_int = (int)(label) - np_label = np.array(label_int) - of.seek(SIZE_INT64 + SIZE_FLOAT32 * DATA_DIM * DATA_DIM * 3 - * num_images + idx * SIZE_INT64) - of.write(np_label.astype('int64').tobytes()) - - print('The preprocessed binary file path {}\n'.format(output_file)) + with open(output_file, "w+b") as ofs: + #save num_images(int64_t) to file + ofs.seek(0) + num = np.array(int(num_images)).astype('int64') + ofs.write(num.tobytes()) + per_parts = 1000 + full_parts = FULL_IMAGES / per_parts + print_processbar(0, full_parts) + for idx, line in enumerate(lines): + img_path, label = line.split() + img_path = os.path.join(data_dir, img_path) + if not os.path.exists(img_path): + continue + + #save image(float32) to file + img = process_image( + img_path, 'val', color_jitter=False, rotate=False) + np_img = np.array(img) + ofs.seek(SIZE_INT64 + SIZE_FLOAT32 * DATA_DIM * DATA_DIM * 3 * + idx) + ofs.write(np_img.astype('float32').tobytes()) + ofs.flush() + + #save label(int64_t) to file + label_int = (int)(label) + np_label = np.array(label_int) + ofs.seek(SIZE_INT64 + SIZE_FLOAT32 * DATA_DIM * DATA_DIM * 3 * + num_images + idx * SIZE_INT64) + ofs.write(np_label.astype('int64').tobytes()) + ofs.flush() + if (idx + 1) % per_parts == 0: + done = (idx + 1) / per_parts + print_processbar(done, full_parts) + print("Conversion finished.") + + +def run_convert(): + print('Start to download and convert 50000 images to binary file...') + cache_folder = os.path.expanduser('~/.cache/paddle/dataset/int8/download') + extract_folder = os.path.join(cache_folder, 'full_data') + data_dir = os.path.join(extract_folder, DATA_DIR_NAME) + file_list = os.path.join(data_dir, 'val_list.txt') + zip_path = os.path.join(cache_folder, 'full_imagenet_val.tar.gz') + output_file = os.path.join(cache_folder, 'int8_full_val.bin') + retry = 0 + try_limit = 3 + + while not (os.path.exists(output_file) and + os.path.getsize(output_file) == FULL_SIZE_BYTES and + check_integrity(output_file, TARGET_HASH)): + if os.path.exists(output_file): + sys.stderr.write( + "\n\nThe existing binary file is broken. Start to generate new one...\n\n". + format(output_file)) + os.remove(output_file) + if retry < try_limit: + retry = retry + 1 + else: + raise RuntimeError( + "Can not convert the dataset to binary file with try limit {0}". + format(try_limit)) + download_concat(cache_folder, zip_path) + extract(zip_path, extract_folder) + convert(file_list, data_dir, output_file) + print("\nSuccess! The binary file can be found at {0}".format(output_file)) if __name__ == '__main__': - reader() + run_convert() From 46e18cb7000c593e3079339f0d8ba8ab805215b4 Mon Sep 17 00:00:00 2001 From: lujun Date: Wed, 3 Apr 2019 16:09:34 +0800 Subject: [PATCH 04/27] Merge pull request #16634 from junjun315/my-cool-stuff fix load bug about context.place --- paddle/fluid/operators/load_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index 656728c609eb19..435c755df3642a 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -29,7 +29,7 @@ class LoadOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { framework::OpKernelType kt = framework::OpKernelType( - framework::proto::VarType::FP32, platform::CPUPlace()); + framework::proto::VarType::FP32, ctx.GetPlace()); return kt; } }; From 7bcb070a82bd36b975035545a9d0fab5d043804b Mon Sep 17 00:00:00 2001 From: lujun Date: Wed, 3 Apr 2019 13:38:08 +0800 Subject: [PATCH 05/27] merge confict, test=release/1.4 --- python/paddle/fluid/dygraph/nn.py | 548 +++++++++++++++++- .../fluid/tests/unittests/CMakeLists.txt | 3 +- .../fluid/tests/unittests/test_layers.py | 274 +++++++++ 3 files changed, 816 insertions(+), 9 deletions(-) diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 8925381119272d..ddf0de9a29b3b9 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -15,19 +15,20 @@ from __future__ import print_function from six.moves import reduce -import numpy as np from .. import core from ..layers import utils from . import layers -from ..framework import Variable, OpProtoHolder -from ..layers import layer_function_generator +from ..framework import Variable, _in_dygraph_mode, OpProtoHolder, Parameter from ..param_attr import ParamAttr from ..initializer import Normal, Constant, NumpyArrayInitializer +import numpy as np __all__ = [ - 'Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding', 'GRUUnit', 'LayerNorm', - 'NCE', 'PRelu', 'BilinearTensorProduct', 'Conv2DTranspose', 'SequenceConv' + 'Conv2D', 'Conv3D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding', 'GRUUnit', + 'LayerNorm', 'NCE', 'PRelu', 'BilinearTensorProduct', 'Conv2DTranspose', + 'Conv3DTranspose', 'SequenceConv', 'RowConv', 'GroupNorm', 'SpectralNorm', + 'TreeConv' ] @@ -137,6 +138,303 @@ def forward(self, input): return self._helper.append_activation(pre_act, act=self._act) +class Conv3D(layers.Layer): + """ + **Convlution3D Layer** + + The convolution3D layer calculates the output based on the input, filter + and strides, paddings, dilations, groups parameters. Input(Input) and + Output(Output) are in NCDHW format. Where N is batch size C is the number of + channels, D is the depth of the feature, H is the height of the feature, + and W is the width of the feature. Convlution3D is similar with Convlution2D + but adds one dimension(depth). If bias attribution and activation type are + provided, bias is added to the output of the convolution, and the + corresponding activation function is applied to the final result. + + For each input :math:`X`, the equation is: + + .. math:: + + Out = \sigma (W \\ast X + b) + + In the above equation: + + * :math:`X`: Input value, a tensor with NCDHW format. + * :math:`W`: Filter value, a tensor with MCDHW format. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. + + Example: + + - Input: + + Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + + Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)` + + - Output: + Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + + Where + + .. math:: + + D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\ + H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\ + W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1 + + Args: + input (Variable): The input image with [N, C, D, H, W] format. + num_filters(int): The number of filter. It is as same as the output + image channel. + filter_size (int|tuple|None): The filter size. If filter_size is a tuple, + it must contain three integers, (filter_size_D, filter_size_H, filter_size_W). + Otherwise, the filter will be a square. + stride (int|tuple): The stride size. If stride is a tuple, it must + contain three integers, (stride_D, stride_H, stride_W). Otherwise, the + stride_D = stride_H = stride_W = stride. Default: stride = 1. + padding (int|tuple): The padding size. If padding is a tuple, it must + contain three integers, (padding_D, padding_H, padding_W). Otherwise, the + padding_D = padding_H = padding_W = padding. Default: padding = 0. + dilation (int|tuple): The dilation size. If dilation is a tuple, it must + contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the + dilation_D = dilation_H = dilation_W = dilation. Default: dilation = 1. + groups (int): The groups number of the Conv3d Layer. According to grouped + convolution in Alex Krizhevsky's Deep CNN paper: when group=2, + the first half of the filters is only connected to the first half + of the input channels, while the second half of the filters is only + connected to the second half of the input channels. Default: groups=1 + param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights + of conv3d. If it is set to None or one attribute of ParamAttr, conv3d + will create ParamAttr as param_attr. If it is set to None, the parameter + is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is + :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. + bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d. + If it is set to False, no bias will be added to the output units. + If it is set to None or one attribute of ParamAttr, conv3d + will create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + act (str): Activation type, if it is set to None, activation is not appended. + Default: None. + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Default: None. + + Returns: + Variable: The tensor variable storing the convolution and \ + non-linearity activation result. + + Raises: + ValueError: If the shapes of input, filter_size, stride, padding and + groups mismatch. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu") + """ + + def __init__(self, + name_scope, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None): + assert param_attr is not False, "param_attr should not be False here." + super(Conv3D, self).__init__(name_scope) + self._groups = groups + self._stride = utils.convert_to_list(stride, 3, 'stride') + self._padding = utils.convert_to_list(padding, 3, 'padding') + self._dilation = utils.convert_to_list(dilation, 3, 'dilation') + self._act = act + if not isinstance(use_cudnn, bool): + raise ValueError("use_cudnn should be True or False") + self._use_cudnn = use_cudnn + self._filter_size = filter_size + self._num_filters = num_filters + self._param_attr = param_attr + self._bias_attr = bias_attr + + def _build_once(self, input): + num_channels = input.shape[1] + self._dtype = self._helper.input_dtype(input) + + if self._groups is None: + num_filter_channels = num_channels + else: + if num_channels % self._groups != 0: + raise ValueError("num_channels must be divisible by groups.") + num_filter_channels = num_channels // self._groups + + filter_size = utils.convert_to_list(self._filter_size, 3, 'filter_size') + + filter_shape = [self._num_filters, num_filter_channels] + filter_size + + def _get_default_param_initializer(): + filter_elem_num = filter_size[0] * filter_size[1] * filter_size[ + 2] * num_channels + std = (2.0 / filter_elem_num)**0.5 + return Normal(0.0, std, 0) + + self._filter_param = self.create_parameter( + attr=self._param_attr, + shape=filter_shape, + dtype=self._dtype, + default_initializer=_get_default_param_initializer()) + + self._bias_param = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True) + + def forward(self, input): + pre_bias = self._helper.create_variable_for_type_inference( + dtype=self._dtype) + + self._helper.append_op( + type='conv3d', + inputs={ + 'Input': input, + 'Filter': self._filter_param, + }, + outputs={"Output": pre_bias}, + attrs={ + 'strides': self._stride, + 'paddings': self._padding, + 'dilations': self._dilation, + 'groups': self._groups if self._groups else 1, + 'use_cudnn': self._use_cudnn, + 'use_mkldnn': False + }) + + pre_act = self._helper.create_variable_for_type_inference( + dtype=self._dtype) + + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], + 'Y': [self._bias_param]}, + outputs={'Out': [pre_act]}, + attrs={'axis': 1}) + + return self._helper.append_activation(pre_act, act=self._act) + + +class Conv3DTranspose(layers.Layer): + def __init__(self, + name_scope, + num_filters, + output_size=None, + filter_size=None, + padding=0, + stride=1, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + name=None): + super(Conv3DTranspose, self).__init__(name_scope) + if not isinstance(use_cudnn, bool): + raise ValueError("use_cudnn should be True or False") + assert param_attr is not False, "param_attr should not be False in conv3d_transpose." + self._padding = utils.convert_to_list(padding, 3, 'padding') + self._stride = utils.convert_to_list(stride, 3, 'stride') + self._dilation = utils.convert_to_list(dilation, 3, 'dilation') + self._param_attr = param_attr + self._filter_size = filter_size + self._output_size = output_size + self._groups = 1 if groups is None else groups + self._num_filters = num_filters + self._use_cudnn = use_cudnn + self._bias_attr = bias_attr + self._act = act + + def _build_once(self, input): + self._dtype = self._helper.input_dtype(input) + self._input_channel = input.shape[1] + + if self._filter_size is None: + if self._output_size is None: + raise ValueError( + "output_size must be set when filter_size is None") + if isinstance(self._output_size, int): + self._output_size = [self._output_size, self._output_size] + + d_in = input.shape[2] + h_in = input.shape[3] + w_in = input.shape[4] + + filter_size_d = (self._output_size[0] - + (d_in - 1) * self._stride[0] + 2 * self._padding[0] + - 1) // self._dilation[0] + 1 + filter_size_h = (self._output_size[1] - + (h_in - 1) * self._stride[1] + 2 * self._padding[1] + - 1) // self._dilation[1] + 1 + filter_size_w = (self._output_size[2] - + (w_in - 1) * self._stride[2] + 2 * self._padding[2] + - 1) // self._dilation[2] + 1 + self._filter_size = [filter_size_d, filter_size_h, filter_size_w] + else: + self._filter_size = utils.convert_to_list( + self._filter_size, 3, 'conv3d_transpose.filter_size') + + filter_shape = [ + self._input_channel, self._num_filters // self._groups + ] + self._filter_size + self._img_filter = self.create_parameter( + dtype=self._dtype, shape=filter_shape, attr=self._param_attr) + if self._bias_attr: + self._bias_param = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True) + + def forward(self, input): + pre_bias = self._helper.create_variable_for_type_inference( + dtype=self._dtype) + self._helper.append_op( + type="conv3d_transpose", + inputs={'Input': [input], + 'Filter': [self._img_filter]}, + outputs={'Output': pre_bias}, + attrs={ + 'strides': self._stride, + 'paddings': self._padding, + 'dilations': self._dilation, + 'groups': self._groups if self._groups else 1, + 'use_cudnn': self._use_cudnn + }) + + if self._bias_attr: + pre_act = self._helper.create_variable_for_type_inference( + dtype=self._dtype) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], + 'Y': [self._bias_param]}, + outputs={'Out': [pre_act]}, + attrs={'axis': 1}) + else: + pre_act = pre_bias + + # Currently, we don't support inplace in imperative mode + return self._helper.append_activation(pre_act, act=self._act) + + class Pool2D(layers.Layer): def __init__(self, name_scope, @@ -1365,6 +1663,8 @@ def __init__(self, bias_attr=None, param_attr=None, act=None): + assert not _in_dygraph_mode( + ), "SequenceConv is not supported by dynamic graph mode yet!" super(SequenceConv, self).__init__(name_scope) self._num_filters = num_filters self._filter_size = filter_size @@ -1374,12 +1674,10 @@ def __init__(self, self._param_attr = param_attr def _build_once(self, input): - self._dtype = self._helper.input_dtype(input) - print(self._filter_size) filter_shape = [self._filter_size * input.shape[1], self._num_filters] self._filter_param = self.create_parameter( - attr=self.param_attr, shape=filter_shape, dtype=self._dtype) + attr=self._param_attr, shape=filter_shape, dtype=self._dtype) def forward(self, input): pre_bias = self._helper.create_variable_for_type_inference(self._dtype) @@ -1397,3 +1695,237 @@ def forward(self, input): }) pre_act = self._helper.append_bias_op(pre_bias) return self._helper.append_activation(pre_act) + + +class RowConv(layers.Layer): + def __init__(self, + name_scope, + future_context_size, + param_attr=None, + act=None): + assert not _in_dygraph_mode( + ), "RowConv is not supported by dynamic graph mode yet!" + super(RowConv, self).__init__(name_scope) + self._act = act + self._param_attr = param_attr + self._future_context_size = future_context_size + + def _build_once(self, input): + self._dtype = self._helper.input_dtype(input) + filter_shape = [self._future_context_size + 1, input.shape[1]] + self._filter_param = self.create_parameter( + attr=self._param_attr, + shape=filter_shape, + dtype=self._dtype, + is_bias=False) + + def forward(self, input): + out = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type='row_conv', + inputs={'X': [input], + 'Filter': [self._filter_param]}, + outputs={'Out': [out]}) + return self._helper.append_activation(out, act=self._act) + + +class GroupNorm(layers.Layer): + """ + **Group Normalization Layer** + + Refer to `Group Normalization `_ . + + Args: + name_scope (str): See base class. + groups(int): The number of groups that divided from channels. + epsilon(float): The small value added to the variance to prevent + division by zero. + param_attr(ParamAttr|None): The parameter attribute for the learnable + scale :math:`g`. If it is set to False, no scale will be added to the output units. + If it is set to None, the bias is initialized one. Default: None. + bias_attr(ParamAttr|None): The parameter attribute for the learnable + bias :math:`b`. If it is set to False, no bias will be added to the output units. + If it is set to None, the bias is initialized zero. Default: None. + act(str): Activation to be applied to the output of group normalizaiton. + data_layout(string|NCHW): Only NCHW is supported. + dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc + + Returns: + Variable: A tensor variable which is the result after applying group normalization on the input. + + + """ + + def __init__(self, + name_scope, + groups, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None, + data_layout='NCHW'): + super(GroupNorm, self).__init__(name_scope) + self._param_attr = param_attr + self._bias_attr = bias_attr + self._epsilon = epsilon + self._groups = groups + self._act = act + if data_layout != 'NCHW': + raise ValueError("unsupported data layout:" + data_layout) + + def _build_once(self, input): + self._dtype = self._helper.input_dtype(input) + param_shape = [input.shape[1]] + if self._bias_attr: + self._bias = self.create_parameter( + attr=self._bias_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=True) + + if self._param_attr: + self._scale = self.create_parameter( + attr=self._param_attr, + shape=param_shape, + dtype=self._dtype, + default_initializer=Constant(1.0)) + + def forward(self, input): + inputs = {'X': input} + if self._bias: + inputs['Bias'] = self._bias + if self._scale: + inputs['Scale'] = self._scale + + # create output + mean_out = self._helper.create_variable_for_type_inference( + dtype=self._dtype, stop_gradient=True) + variance_out = self._helper.create_variable_for_type_inference( + dtype=self._dtype, stop_gradient=True) + group_norm_out = self._helper.create_variable_for_type_inference( + dtype=self._dtype) + + self._helper.append_op( + type="group_norm", + inputs=inputs, + outputs={ + "Y": group_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={"epsilon": self._epsilon, + "groups": self._groups}) + + return self._helper.append_activation(group_norm_out, self._act) + + +class SpectralNorm(layers.Layer): + def __init__(self, name_scope, dim=0, power_iters=1, eps=1e-12, name=None): + super(SpectralNorm, self).__init__(name_scope) + self._power_iters = power_iters + self._eps = eps + self._dim = dim + + def _build_once(self, weight): + self._dtype = self._helper.input_dtype(weight) + input_shape = weight.shape + h = input_shape[self._dim] + w = np.prod(input_shape) // h + + self.u = self.create_parameter( + attr=ParamAttr(), + shape=[h], + dtype=self._dtype, + default_initializer=Normal(0., 1.)) + self.u.stop_gradient = True + + self.v = self.create_parameter( + attr=ParamAttr(), + shape=[w], + dtype=self._dtype, + default_initializer=Normal(0., 1.)) + self.v.stop_gradient = True + + def forward(self, weight): + inputs = {'Weight': weight, 'U': self.u, 'V': self.v} + out = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type="spectral_norm", + inputs=inputs, + outputs={"Out": out, }, + attrs={ + "dim": self._dim, + "power_iters": self._power_iters, + "eps": self._eps, + }) + + return out + + +class TreeConv(layers.Layer): + def __init__(self, + name_scope, + output_size, + num_filters=1, + max_depth=2, + act='tanh', + param_attr=None, + bias_attr=None, + name=None): + super(TreeConv, self).__init__(name_scope) + self._name = name + self._output_size = output_size + self._act = act + self._max_depth = max_depth + self._num_filters = num_filters + self._bias_attr = bias_attr + self._param_attr = param_attr + + def _build_once(self, nodes_vector, edge_set): + assert isinstance(nodes_vector, Variable) + assert isinstance(edge_set, Variable) + self._dtype = self._helper.input_dtype(nodes_vector) + + feature_size = nodes_vector.shape[2] + w_shape = [feature_size, 3, self._output_size, self._num_filters] + if self._bias_attr: + self._bias_param = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True) + self.W = self.create_parameter( + attr=self._param_attr, + shape=w_shape, + dtype=self._dtype, + is_bias=False) + + def forward(self, nodes_vector, edge_set): + if self._name: + out = self.create_variable( + name=self._name, dtype=self._dtype, persistable=False) + else: + out = self._helper.create_variable_for_type_inference( + dtype=self._dtype) + + self._helper.append_op( + type='tree_conv', + inputs={ + 'NodesVector': nodes_vector, + 'EdgeSet': edge_set, + 'Filter': self.W + }, + outputs={'Out': out, }, + attrs={'max_depth': self._max_depth}) + if self._bias_attr: + pre_activation = self._helper.create_variable_for_type_inference( + dtype=self._dtype) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [out], + 'Y': [self._bias_param]}, + outputs={'Out': [pre_activation]}, + attrs={'axis': 1}) + else: + pre_activation = out + return self._helper.append_activation(pre_activation, act=self._act) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index d70154decd999d..f99759cdaaf374 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -80,6 +80,7 @@ list(REMOVE_ITEM TEST_OPS test_nearest_interp_op) list(REMOVE_ITEM TEST_OPS test_imperative_resnet) list(REMOVE_ITEM TEST_OPS test_imperative_mnist) list(REMOVE_ITEM TEST_OPS test_ir_memory_optimize_transformer) +list(REMOVE_ITEM TEST_OPS test_layers) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) @@ -114,7 +115,7 @@ py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf SE py_test_modules(test_parallel_executor_fetch_feed MODULES test_parallel_executor_fetch_feed SERIAL) set_tests_properties(test_parallel_executor_fetch_feed PROPERTIES TIMEOUT 450) py_test_modules(test_parallel_executor_transformer MODULES test_parallel_executor_transformer SERIAL) - +py_test_modules(test_layers MODULES test_layers ENVS FLAGS_cudnn_deterministic=1) if(NOT WIN32) py_test_modules(test_ir_memory_optimize_transformer MODULES test_ir_memory_optimize_transformer SERIAL) endif() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index e92ece7acb41b5..6cc3c6d90bdae0 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -560,6 +560,280 @@ def test_nce(self): self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(nce_loss3._numpy(), static_rlt)) + def test_conv3d(self): + with self.static_graph(): + images = layers.data( + name='pixel', shape=[3, 6, 6, 6], dtype='float32') + ret = layers.conv3d(input=images, num_filters=3, filter_size=2) + static_ret = self.get_static_graph_result( + feed={'pixel': np.ones( + [2, 3, 6, 6, 6], dtype='float32')}, + fetch_list=[ret])[0] + + with self.static_graph(): + images = layers.data( + name='pixel', shape=[3, 6, 6, 6], dtype='float32') + conv3d = nn.Conv3D('conv3d', num_filters=3, filter_size=2) + ret = conv3d(images) + static_ret2 = self.get_static_graph_result( + feed={'pixel': np.ones( + [2, 3, 6, 6, 6], dtype='float32')}, + fetch_list=[ret])[0] + + with self.dynamic_graph(): + images = np.ones([2, 3, 6, 6, 6], dtype='float32') + conv3d = nn.Conv3D('conv3d', num_filters=3, filter_size=2) + dy_ret = conv3d(base.to_variable(images)) + + self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, static_ret2)) + + def test_row_conv(self): + input = np.arange(15).reshape([3, 5]).astype('float32') + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + else: + place = core.CPUPlace() + + with self.static_graph(): + x = layers.data( + name='X', + shape=[3, 5], + dtype='float32', + lod_level=1, + append_batch_size=False) + ret = layers.row_conv(input=x, future_context_size=2) + static_ret = self.get_static_graph_result( + feed={ + 'X': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1, 1]], place=place) + }, + fetch_list=[ret], + with_lod=True)[0] + + with self.static_graph(): + x = layers.data( + name='X', + shape=[3, 5], + dtype='float32', + lod_level=1, + append_batch_size=False) + rowConv = nn.RowConv('RowConv', future_context_size=2) + ret = rowConv(x) + static_ret2 = self.get_static_graph_result( + feed={ + 'X': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1, 1]], place=place) + }, + fetch_list=[ret], + with_lod=True)[0] + + # TODO: dygraph can't support LODTensor + + self.assertTrue(np.allclose(static_ret, static_ret2)) + + def test_group_norm(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + else: + place = core.CPUPlace() + + shape = (2, 4, 3, 3) + + input = np.random.random(shape).astype('float32') + + with self.static_graph(): + X = fluid.layers.data( + name='X', + shape=shape, + dtype='float32', + lod_level=1, + append_batch_size=False) + ret = layers.group_norm(input=X, groups=2) + static_ret = self.get_static_graph_result( + feed={ + 'X': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1]], place=place) + }, + fetch_list=[ret], + with_lod=True)[0] + + with self.static_graph(): + X = fluid.layers.data( + name='X', + shape=shape, + dtype='float32', + lod_level=1, + append_batch_size=False) + groupNorm = nn.GroupNorm('GroupNorm', groups=2) + ret = groupNorm(X) + static_ret2 = self.get_static_graph_result( + feed={ + 'X': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1]], place=place) + }, + fetch_list=[ret], + with_lod=True)[0] + + with self.dynamic_graph(): + groupNorm = nn.GroupNorm('GroupNorm', groups=2) + dy_ret = groupNorm(base.to_variable(input)) + + self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, static_ret2)) + + def test_spectral_norm(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + else: + place = core.CPUPlace() + + shape = (2, 4, 3, 3) + + input = np.random.random(shape).astype('float32') + + with self.static_graph(): + Weight = fluid.layers.data( + name='Weight', + shape=shape, + dtype='float32', + lod_level=1, + append_batch_size=False) + ret = layers.spectral_norm(weight=Weight, dim=1, power_iters=2) + static_ret = self.get_static_graph_result( + feed={ + 'Weight': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1]], place=place), + }, + fetch_list=[ret], + with_lod=True)[0] + + with self.static_graph(): + Weight = fluid.layers.data( + name='Weight', + shape=shape, + dtype='float32', + lod_level=1, + append_batch_size=False) + spectralNorm = nn.SpectralNorm('SpectralNorm', dim=1, power_iters=2) + ret = spectralNorm(Weight) + static_ret2 = self.get_static_graph_result( + feed={ + 'Weight': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1]], place=place) + }, + fetch_list=[ret], + with_lod=True)[0] + + with self.dynamic_graph(): + spectralNorm = nn.SpectralNorm('SpectralNorm', dim=1, power_iters=2) + dy_ret = spectralNorm(base.to_variable(input)) + + self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + self.assertTrue(np.allclose(static_ret, static_ret2)) + + def test_tree_conv(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + else: + place = core.CPUPlace() + adj_array = [1, 2, 1, 3, 1, 4, 1, 5, 2, 6, 2, 7, 2, 8, 4, 9, 4, 10] + adj = np.array(adj_array).reshape((1, 9, 2)).astype('int32') + adj = np.tile(adj, (1, 1, 1)) + vectors = np.random.random((1, 10, 5)).astype('float32') + with self.static_graph(): + NodesVector = fluid.layers.data( + name='NodesVector', + shape=(1, 10, 5), + dtype='float32', + lod_level=1, + append_batch_size=False) + EdgeSet = fluid.layers.data( + name='EdgeSet', + shape=(1, 9, 2), + dtype='int32', + lod_level=1, + append_batch_size=False) + ret = layers.tree_conv( + nodes_vector=NodesVector, + edge_set=EdgeSet, + output_size=6, + num_filters=1, + max_depth=2) + static_ret = self.get_static_graph_result( + feed={ + 'NodesVector': fluid.create_lod_tensor( + data=vectors, recursive_seq_lens=[[1]], place=place), + 'EdgeSet': fluid.create_lod_tensor( + data=adj, recursive_seq_lens=[[1]], place=place) + }, + fetch_list=[ret], + with_lod=False)[0] + + with self.static_graph(): + NodesVector = fluid.layers.data( + name='NodesVector', + shape=(1, 10, 5), + dtype='float32', + lod_level=1, + append_batch_size=False) + EdgeSet = fluid.layers.data( + name='EdgeSet', + shape=(1, 9, 2), + dtype='int32', + lod_level=1, + append_batch_size=False) + treeConv = nn.TreeConv( + 'TreeConv', output_size=6, num_filters=1, max_depth=2) + ret = treeConv(NodesVector, EdgeSet) + static_ret2 = self.get_static_graph_result( + feed={ + 'NodesVector': fluid.create_lod_tensor( + data=vectors, recursive_seq_lens=[[1]], place=place), + 'EdgeSet': fluid.create_lod_tensor( + data=adj, recursive_seq_lens=[[1]], place=place) + }, + fetch_list=[ret], + with_lod=False)[0] + + with self.dynamic_graph(): + treeConv = nn.TreeConv( + 'SpectralNorm', output_size=6, num_filters=1, max_depth=2) + dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj)) + + self.assertTrue(np.allclose(static_ret, static_ret2)) + self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) + + def test_conv3d_transpose(self): + input_array = np.arange(0, 48).reshape( + [2, 3, 2, 2, 2]).astype('float32') + + with self.static_graph(): + img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32') + out = layers.conv3d_transpose( + input=img, num_filters=12, filter_size=12, use_cudnn=False) + static_rlt = self.get_static_graph_result( + feed={'pixel': input_array}, fetch_list=[out])[0] + with self.static_graph(): + img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32') + conv3d_transpose = nn.Conv3DTranspose( + 'Conv3DTranspose', + num_filters=12, + filter_size=12, + use_cudnn=False) + out = conv3d_transpose(img) + static_rlt2 = self.get_static_graph_result( + feed={'pixel': input_array}, fetch_list=[out])[0] + with self.dynamic_graph(): + conv3d_transpose = nn.Conv3DTranspose( + 'Conv3DTranspose', + num_filters=12, + filter_size=12, + use_cudnn=False) + dy_rlt = conv3d_transpose(base.to_variable(input_array)) + self.assertTrue(np.allclose(static_rlt2, static_rlt)) + self.assertTrue(np.allclose(dy_rlt._numpy(), static_rlt)) + class TestBook(unittest.TestCase): def test_fit_a_line(self): From 4cc614412c2422f9d3f101e3a7f8c7634b065a65 Mon Sep 17 00:00:00 2001 From: chengduo Date: Wed, 3 Apr 2019 10:24:10 -0500 Subject: [PATCH 06/27] [Cherry-pick]Fix the bug of all_reduce_deps_pass (#16648) * fix the bug of all_reduce_deps_pass test=release/1.4 --- .../framework/details/all_reduce_deps_pass.cc | 241 +++++++++++------- .../framework/details/all_reduce_deps_pass.h | 32 --- .../framework/details/all_reduce_op_handle.cc | 2 +- .../fluid/framework/details/build_strategy.cc | 17 +- .../fluid/framework/details/op_handle_base.cc | 2 +- .../framework/ir/multi_batch_merge_pass.cc | 3 +- paddle/fluid/framework/parallel_executor.cc | 7 +- .../fluid/tests/unittests/test_dist_base.py | 3 +- 8 files changed, 166 insertions(+), 141 deletions(-) delete mode 100644 paddle/fluid/framework/details/all_reduce_deps_pass.h diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.cc b/paddle/fluid/framework/details/all_reduce_deps_pass.cc index 878b950858a71b..c44793cd11d22b 100644 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.cc +++ b/paddle/fluid/framework/details/all_reduce_deps_pass.cc @@ -13,125 +13,186 @@ // limitations under the License. #include -#include +#include #include #include #include +#include #include -#include "paddle/fluid/framework/details/all_reduce_deps_pass.h" #include "paddle/fluid/framework/details/all_reduce_op_handle.h" +#include "paddle/fluid/framework/details/container_cast.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/op_graph_view.h" -#include "paddle/fluid/framework/details/var_handle.h" +#include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/op_proto_maker.h" namespace paddle { namespace framework { namespace details { -VarHandle* GetValidInput(const OpHandleBase* a) { - for (auto p : a->Inputs()) { - VarHandle* b = dynamic_cast(p); - if (b) { - return b; +class AllReduceDepsPass : public ir::Pass { + protected: + void ApplyImpl(ir::Graph* graph) const override { + std::vector all_reduce_op_handles = + GetSortedAllReduceOps(*graph); + + for (size_t i = 1; i < all_reduce_op_handles.size(); ++i) { + auto* dep_var = new DummyVarHandle(graph->CreateControlDepVar()); + graph->Get(kGraphDepVars).emplace(dep_var); + all_reduce_op_handles[i - 1]->AddOutput(dep_var); + all_reduce_op_handles[i]->AddInput(dep_var); } - } - return nullptr; -} - -void AllReduceDepsPass::ApplyImpl(ir::Graph* graph) const { - auto graph_ops = ir::FilterByNodeWrapper(*graph); - - // get vars order - int order = 0; - std::unordered_map vars; - // TODO(gongwb): use graph topology sort to find the order of operators. - // Note that must assert topology sort is stable - auto& ops = graph->Get>(kStaleProgramOpDescs); - for (auto* op_desc : ops) { - try { - bool is_bk_op = - static_cast(boost::get(op_desc->GetAttr( - OpProtoAndCheckerMaker::OpRoleAttrName())) & - static_cast(OpRole::kBackward)); - if (!is_bk_op) continue; - - auto backward_vars = - boost::get>(op_desc->GetNullableAttr( - OpProtoAndCheckerMaker::OpRoleVarAttrName())); - PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0); - - auto outputs = op_desc->Outputs(); - for (auto& o_it : outputs) { - for (auto& v : o_it.second) { // values - vars[v] = order; - VLOG(10) << "in all_reduce_deps_pass:" << v; - } - } - order++; - } catch (boost::bad_get e) { + if (VLOG_IS_ON(10)) { + DebugString(*graph, all_reduce_op_handles); } } - std::vector dist_ops; - // get allreduce ops. - for (auto& op : graph_ops) { - // FIXME(gongwb):add broad cast. - if (op->Name() == "all_reduce" || op->Name() == "reduce") { - dist_ops.push_back(op); + std::vector GetSortedAllReduceOps( + const ir::Graph& graph) const { + std::vector all_reduce_op_handles; + std::unordered_map pending_ops; + std::unordered_set ready_ops; + std::unordered_set next_ready_ops; + + auto op_handles = ir::FilterByNodeWrapper(graph); + size_t num_of_ops = op_handles.size(); + for (OpHandleBase* op : op_handles) { + size_t not_ready_vars = op->NotReadyInputSize(); + if (not_ready_vars) { + pending_ops.insert({op, not_ready_vars}); + } else { + ready_ops.insert(op); + } } - } - - VLOG(10) << "dist_ops size:" << dist_ops.size() - << ", outputs size:" << vars.size() << ", ops size:" << ops.size(); - - std::sort(dist_ops.begin(), dist_ops.end(), [&](OpHandleBase* op1, - OpHandleBase* op2) { - VarHandle* i0 = dynamic_cast(GetValidInput(op1)); - VarHandle* i1 = dynamic_cast(GetValidInput(op2)); - - PADDLE_ENFORCE(i0 != nullptr && i1 != nullptr, "%s convert to %s error", - op1->DebugString(), op2->DebugString()); - auto l_it = vars.find(i0->name()); - auto r_it = vars.find(i1->name()); - - PADDLE_ENFORCE(l_it != vars.end() && r_it != vars.end(), - "can't find var's name %s and %s in opdesc", i0->name(), - i1->name()); - - if (l_it->second < r_it->second) return true; + GetSortedAllReduceOps(ready_ops, &all_reduce_op_handles); + + size_t has_run_ops = ready_ops.size(); + while (has_run_ops != num_of_ops) { + for (auto* op : ready_ops) { + for (auto& ready_var : op->Outputs()) { + for (auto* pend_op : ready_var->PendingOps()) { + auto& deps = --pending_ops[pend_op]; + if (deps == 0) { + next_ready_ops.insert(pend_op); + } + } + } + } - if (l_it->second == r_it->second) { - return i0->name() < i1->name(); + PADDLE_ENFORCE_NE(next_ready_ops.size(), 0, "There maybe have a cycle."); + ready_ops.clear(); + std::swap(ready_ops, next_ready_ops); + GetSortedAllReduceOps(ready_ops, &all_reduce_op_handles); + has_run_ops += ready_ops.size(); } + return all_reduce_op_handles; + } - return false; - }); - - // add dependency. - auto& sorted_ops = dist_ops; - for (size_t i = 1; i < sorted_ops.size(); ++i) { - auto* dep_var = new DummyVarHandle(graph->CreateControlDepVar()); - - auto* pre_op = sorted_ops[i - 1]; - auto* op = sorted_ops[i]; - - pre_op->AddOutput(dep_var); - op->AddInput(dep_var); - graph->Get(kGraphDepVars).emplace(dep_var); + void GetSortedAllReduceOps( + const std::unordered_set& ready_ops, + std::vector* all_reduce_op_handles) const { + std::vector current_all_reduce_op_handles; + for (auto& op_handle : ready_ops) { + auto all_reduce_op_handle = dynamic_cast(op_handle); + if (all_reduce_op_handle) { + current_all_reduce_op_handles.emplace_back(all_reduce_op_handle); + } + } - VLOG(10) << "add all_reduce sequential dependencies between " << pre_op - << " and " << op; + // NOTE(zcd): For distributed training, it is important to keep the order of + // allReduce on each node consistent. Otherwise, hang may occur. + // Sort the current_all_reduce_op_handles according to the name of input. + sort(current_all_reduce_op_handles.begin(), + current_all_reduce_op_handles.end(), + [](const AllReduceOpHandle* left, + const AllReduceOpHandle* right) -> bool { + auto left_in_vars = DynamicCast(left->Inputs()); + auto right_in_vars = DynamicCast(right->Inputs()); + PADDLE_ENFORCE_GT(left_in_vars.size(), 0); + PADDLE_ENFORCE_EQ(left_in_vars.size(), right_in_vars.size()); + return left_in_vars[0]->Name() > right_in_vars[0]->Name(); + }); + + all_reduce_op_handles->insert(all_reduce_op_handles->end(), + current_all_reduce_op_handles.begin(), + current_all_reduce_op_handles.end()); + } - VLOG(10) << "pre_op:" << pre_op->DebugString() - << ", op:" << op->DebugString(); + void DebugString( + const ir::Graph& graph, + const std::vector& all_reduce_op_handles) const { + // get vars order + std::map> vars = + GetSoredGradientsFromStaleProgram(graph); + std::stringstream out; + size_t grads_of_stale_program = 0; + out << "Get Order From kStaleProgramOpDescs: "; + for (auto& var : vars) { + out << "Order " << var.first << " ["; + for (auto& var_name : var.second) { + out << var_name << ", "; + ++grads_of_stale_program; + } + out << "], "; + } + VLOG(10) << out.str(); + + std::stringstream out2; + out2 << "Get Order From Topological order: "; + for (auto& op : all_reduce_op_handles) { + bool find_valid_input = false; + for (auto& in_var : op->Inputs()) { + if (dynamic_cast(in_var)) { + out2 << in_var->Name() << ", "; + find_valid_input = true; + break; + } + } + PADDLE_ENFORCE(find_valid_input, "Doesn't find valid input."); + } + VLOG(10) << out2.str(); + if (grads_of_stale_program != all_reduce_op_handles.size()) { + VLOG(10) + << "The gradients number of stale program and graph is not equal."; + } } -} + std::map> GetSoredGradientsFromStaleProgram( + const ir::Graph& graph) const { + std::map> vars; + auto ops = graph.Get>(kStaleProgramOpDescs); + int order = 0; + for (auto* op_desc : ops) { + try { + bool is_bk_op = + static_cast(boost::get(op_desc->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())) & + static_cast(OpRole::kBackward)); + if (!is_bk_op) continue; + + auto backward_vars = + boost::get>(op_desc->GetNullableAttr( + OpProtoAndCheckerMaker::OpRoleVarAttrName())); + if (backward_vars.empty()) continue; + + PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0); + for (size_t i = 1; i < backward_vars.size(); i += 2) { + vars[order].emplace_back(backward_vars[i]); + VLOG(1) << "get parameter and gradient: " << backward_vars[i - 1] + << ", " << backward_vars[i]; + } + order++; + } catch (boost::bad_get e) { + } + } + return vars; + } +}; } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.h b/paddle/fluid/framework/details/all_reduce_deps_pass.h deleted file mode 100644 index 4ed3736587aa3d..00000000000000 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.h +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/pass.h" - -namespace paddle { -namespace framework { -namespace details { - -// TODO(gongwb): overlap allreduce with backward computation. -class AllReduceDepsPass : public ir::Pass { - protected: - void ApplyImpl(ir::Graph* graph) const override; -}; - -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index 6e477cd2977561..ed75b48090b27a 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -28,7 +28,7 @@ // asynchronous nccl allreduce or synchronous issue: // https://github.com/PaddlePaddle/Paddle/issues/15049 DEFINE_bool( - sync_nccl_allreduce, false, + sync_nccl_allreduce, true, "If set true, will call `cudaStreamSynchronize(nccl_stream)`" "after allreduce, this mode can get better performance in some scenarios."); diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index df69b11ec6ae3b..36720b8ad97c6c 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -163,15 +163,11 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { "graph_printer", new details::GraphvizSSAGraphPrinter); } - // Verify that the graph is correct for multi-device executor. - AppendPass("multi_devices_check_pass"); - - if (VLOG_IS_ON(2)) { - AppendPass("all_reduce_deps_pass"); - } - - if (SeqOnlyAllReduceOps(strategy_)) { - VLOG(10) << "Add all_reduce_deps_pass"; + // experimental shows that the program will be faster if append + // all_reduce_deps_pass here. + if (!strategy_.enable_parallel_graph_ && + (SeqOnlyAllReduceOps(strategy_) || + strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce)) { AppendPass("all_reduce_deps_pass"); } @@ -179,6 +175,9 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { VLOG(10) << "Add modify_op_lock_and_record_event_pass"; AppendPass("modify_op_lock_and_record_event_pass"); } + + // Verify that the graph is correct for multi-device executor. + AppendPass("multi_devices_check_pass"); } // Convert graph to run on multi-devices. diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 413b14961631b3..69cd84ebf2d678 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -68,7 +68,7 @@ void OpHandleBase::Run(bool use_cuda) { if (out_var_handle) { PADDLE_ENFORCE( platform::is_same_place(place, out_var_handle->place()), - "The place of input(%s) is not consistent with the " + "The place of output(%s) is not consistent with the " "place of current op(%s).", out_var_handle->Name(), Name()); out_var_handle->SetGenerateEvent(events_.at(dev_id)); diff --git a/paddle/fluid/framework/ir/multi_batch_merge_pass.cc b/paddle/fluid/framework/ir/multi_batch_merge_pass.cc index dcc48fb934e7a0..a8720ff4bfb5c7 100644 --- a/paddle/fluid/framework/ir/multi_batch_merge_pass.cc +++ b/paddle/fluid/framework/ir/multi_batch_merge_pass.cc @@ -84,7 +84,8 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const { // 1. record op nodes of different roles for (auto node : nodes) { - if (node->IsVar()) continue; + if (!node->IsOp()) continue; + PADDLE_ENFORCE(node->Op(), "must find opdesc"); int op_role = boost::get(node->Op()->GetAttr( framework::OpProtoAndCheckerMaker::OpRoleAttrName())); if ((op_role == static_cast(framework::OpRole::kForward)) || diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index ab0947c631fe9a..3f10daf56ebe0d 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -19,17 +19,14 @@ limitations under the License. */ #include #include #include -#include "paddle/fluid/framework/ir/graph_helper.h" - -#include "paddle/fluid/framework/ir/graph.h" - -#include "paddle/fluid/framework/details/all_reduce_deps_pass.h" #include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h" #include "paddle/fluid/framework/details/reference_count_pass_helper.h" #include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/platform/profiler.h" #ifdef WITH_GPERFTOOLS diff --git a/python/paddle/fluid/tests/unittests/test_dist_base.py b/python/paddle/fluid/tests/unittests/test_dist_base.py index 9c0efe6d905929..e75c4cb30a1b08 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_base.py @@ -139,8 +139,7 @@ def run_trainer(self, args): pass_builder = None if args.batch_merge_repeat > 1: pass_builder = build_stra._finalize_strategy_and_create_passes() - mypass = pass_builder.insert_pass( - len(pass_builder.all_passes()) - 3, "multi_batch_merge_pass") + mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass") mypass.set("num_repeats", args.batch_merge_repeat) if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer": From ab6600e18c85aab255fb067a03ccbc4ec374d9c3 Mon Sep 17 00:00:00 2001 From: chengduo Date: Wed, 3 Apr 2019 20:30:22 -0500 Subject: [PATCH 07/27] Fix bug of FastThreadedExecutor (#16666) test=release/1.4 --- .../fast_threaded_ssa_graph_executor.cc | 16 +++++++--- .../framework/details/fetch_op_handle.cc | 6 ++-- .../details/threaded_ssa_graph_executor.cc | 4 +-- .../test_parallel_executor_fetch_feed.py | 31 ++++++++++++++++--- 4 files changed, 44 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc index 297ee92fc3c84c..3e805bd5b48024 100644 --- a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc @@ -56,6 +56,7 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( fetches.resize(fetch_tensors.size()); std::unordered_map> fetched_vars; std::vector fetch_ops; + std::vector ready_fetch_ops; for (auto &fetch_var_name : fetch_tensors) { for (auto &var_map : graph_->Get(details::kGraphVars)) { @@ -70,8 +71,9 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( auto &var_name = fetch_tensors[i]; auto fetched_var_it = fetched_vars.find(var_name); PADDLE_ENFORCE(fetched_var_it != fetched_vars.end(), - "Cannot find fetched variable.(Perhaps the main_program " - "is not set to ParallelExecutor)"); + "Cannot find fetched variable(%s).(Perhaps the main_program " + "is not set to ParallelExecutor)", + var_name); auto &vars = fetched_var_it->second; @@ -88,7 +90,11 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( op->AddInput(var); } - (*op_deps)[op] = static_cast(op->NotReadyInputSize()); + int dep = static_cast(op->NotReadyInputSize()); + (*op_deps)[op] = dep; + if (dep == 0) { + ready_fetch_ops.emplace_back(op); + } } size_t num_complete = 0; @@ -97,7 +103,9 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( for (auto op : bootstrap_ops_) { RunOpAsync(op_deps.get(), op, complete_q); } - + for (auto op : ready_fetch_ops) { + RunOpAsync(op_deps.get(), op, complete_q); + } while (num_complete != op_deps->size()) { size_t num_comp = complete_q->Pop(); if (num_comp == -1UL) { diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 232d82a5da596a..6c8b8937ebe646 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -13,9 +13,9 @@ // limitations under the License. #include "paddle/fluid/framework/details/fetch_op_handle.h" - #include #include +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { @@ -44,6 +44,7 @@ void FetchOpHandle::WaitAndMergeCPUTensors() const { } void FetchOpHandle::RunImpl() { + platform::RecordEvent record_event(Name()); WaitInputVarGenerated(platform::CPUPlace()); tensors_.resize(inputs_.size()); @@ -62,7 +63,8 @@ void FetchOpHandle::RunImpl() { auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA - TensorCopySync(t, cpu, &tensors_[i]); + TensorCopy(t, cpu, *dev_ctxes_.at(t.place()), &tensors_[i]); + dev_ctxes_.at(t.place())->Wait(); #endif } else { tensors_[i].ShareDataWith(t); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index c00932a7bdb170..356ec373b780d9 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -68,7 +68,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( } set.clear(); }; - auto run_all_op = [&](OpHandleBase *op) { RunOp(ready_vars, op); }; + // Clean run context run_op_futures_.clear(); exception_holder_.Clear(); @@ -102,7 +102,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( auto &deps = pending_ops[op]; --deps; if (deps == 0) { - run_all_op(op); + ready_ops.insert(op); } } } diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py index bda8b666dcde22..645b0188d5f459 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -38,7 +38,15 @@ def Lenet(data, class_dim): class TestFetchAndFeed(unittest.TestCase): - def parallel_exe(self, use_cuda, run_parallel_exe, seed=1): + @classmethod + def setUpClass(cls): + os.environ['CPU_NUM'] = str(4) + + def parallel_exe(self, + use_cuda, + run_parallel_exe, + use_experimental_executor=False, + seed=1): main_program = fluid.Program() startup = fluid.Program() startup.random_seed = seed @@ -63,8 +71,12 @@ def parallel_exe(self, use_cuda, run_parallel_exe, seed=1): build_strategy = fluid.BuildStrategy() build_strategy.enable_inplace = False build_strategy.memory_optimize = False + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.use_experimental_executor = use_experimental_executor train_cp = compiler.CompiledProgram(main_program).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) run_parallel_exe(train_cp, exe, use_cuda, data, label, loss) @@ -131,8 +143,7 @@ def get_data(batch_size=8): if batch_id == 2: break - def test_fetch(self): - os.environ['CPU_NUM'] = str(4) + def test_fetch_with_threaded_executor(self): if core.is_compiled_with_cuda(): self.parallel_exe( use_cuda=True, @@ -140,8 +151,18 @@ def test_fetch(self): self.parallel_exe( use_cuda=False, run_parallel_exe=self.run_parallel_exe_with_fetch) + def test_fetch_with_fast_threaded_executor(self): + if core.is_compiled_with_cuda(): + self.parallel_exe( + use_cuda=True, + run_parallel_exe=self.run_parallel_exe_with_fetch, + use_experimental_executor=True) + self.parallel_exe( + use_cuda=False, + run_parallel_exe=self.run_parallel_exe_with_fetch, + use_experimental_executor=True) + def test_feed(self): - os.environ['CPU_NUM'] = str(4) if core.is_compiled_with_cuda(): self.parallel_exe( use_cuda=True, run_parallel_exe=self.run_parallel_exe_with_feed) From 53c6890a3a92f773c8b6cfb067c17e4a292f0ef2 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Wed, 3 Apr 2019 23:32:00 +0800 Subject: [PATCH 08/27] test_analyzer_int8 tests use default pass order test=release/1.4 --- paddle/fluid/inference/api/paddle_pass_builder.cc | 12 +++++++----- .../analyzer_int8_image_classification_tester.cc | 15 ++++----------- paddle/fluid/inference/tests/api/tester_helper.h | 12 ++++++------ 3 files changed, 17 insertions(+), 22 deletions(-) diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index 87e02a02caebd9..3d72295be4b779 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -86,7 +86,8 @@ const std::vector kAnakinSubgraphPasses({ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { passes_.assign({ - "infer_clean_graph_pass", // + "infer_clean_graph_pass", // + "runtime_context_cache_pass", // // "identity_scale_op_clean_pass", // "conv_affine_channel_fuse_pass", // "conv_eltwiseadd_affine_channel_fuse_pass", // @@ -96,7 +97,6 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { "conv_elementwise_add_act_fuse_pass", // "conv_elementwise_add2_act_fuse_pass", // "conv_elementwise_add_fuse_pass", // - "runtime_context_cache_pass", // #endif // "transpose_flatten_concat_fuse_pass", }); @@ -116,7 +116,11 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { // NOTE the large fusions should be located in the front, so that they will // not be damaged by smaller ones. passes_.assign({ - "infer_clean_graph_pass", // + "infer_clean_graph_pass", // + // TODO(luotao): runtime_context_cache_pass should be located in the + // front, see https://github.com/PaddlePaddle/Paddle/issues/16609, + // will enhance this pass later. + "runtime_context_cache_pass", // "attention_lstm_fuse_pass", // "seqpool_concat_fuse_pass", // "seqconv_eltadd_relu_fuse_pass", // @@ -132,8 +136,6 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { "conv_bn_fuse_pass", // "conv_eltwiseadd_bn_fuse_pass", // "is_test_pass", // - "identity_scale_op_clean_pass", // - "runtime_context_cache_pass", // }); use_gpu_ = false; } diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc index ece094717b8076..fbf67d933786e3 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc @@ -23,18 +23,11 @@ namespace analysis { void SetConfig(AnalysisConfig *cfg) { cfg->SetModel(FLAGS_infer_model); - cfg->SetProgFile("__model__"); cfg->DisableGpu(); cfg->SwitchIrOptim(); - cfg->SwitchSpecifyInputNames(false); + cfg->SwitchSpecifyInputNames(); cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); cfg->EnableMKLDNN(); - cfg->pass_builder()->SetPasses( - {"infer_clean_graph_pass", "mkldnn_placement_pass", - "depthwise_conv_mkldnn_pass", "conv_bn_fuse_pass", - "conv_eltwiseadd_bn_fuse_pass", "conv_bias_mkldnn_fuse_pass", - "conv_elementwise_add_mkldnn_fuse_pass", "conv_relu_mkldnn_fuse_pass", - "fc_fuse_pass", "is_test_pass"}); } template @@ -84,13 +77,13 @@ std::shared_ptr> GetWarmupData( std::to_string(num_images) + " is bigger than all test data size."); PaddleTensor images; - images.name = "input"; + images.name = "image"; images.shape = {num_images, 3, 224, 224}; images.dtype = PaddleDType::FLOAT32; images.data.Resize(sizeof(float) * num_images * 3 * 224 * 224); PaddleTensor labels; - labels.name = "labels"; + labels.name = "label"; labels.shape = {num_images, 1}; labels.dtype = PaddleDType::INT64; labels.data.Resize(sizeof(int64_t) * num_images); @@ -132,7 +125,7 @@ void SetInput(std::vector> *inputs, images_offset_in_file + sizeof(float) * total_images * 3 * 224 * 224; TensorReader image_reader(file, images_offset_in_file, - image_batch_shape, "input"); + image_batch_shape, "image"); TensorReader label_reader(file, labels_offset_in_file, label_batch_shape, "label"); diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 9a0dcc722cf009..5cc54ed299c50b 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -316,7 +316,8 @@ void PredictionRun(PaddlePredictor *predictor, int num_threads, int tid) { int num_times = FLAGS_repeat; int iterations = inputs.size(); // process the whole dataset ... - if (FLAGS_iterations > 0 && FLAGS_iterations < inputs.size()) + if (FLAGS_iterations > 0 && + FLAGS_iterations < static_cast(inputs.size())) iterations = FLAGS_iterations; // ... unless the number of iterations is set outputs->resize(iterations); @@ -329,14 +330,14 @@ void PredictionRun(PaddlePredictor *predictor, #endif if (!FLAGS_zero_copy) { run_timer.tic(); - for (size_t i = 0; i < iterations; i++) { + for (int i = 0; i < iterations; i++) { for (int j = 0; j < num_times; j++) { predictor->Run(inputs[i], &(*outputs)[i], FLAGS_batch_size); } } elapsed_time = run_timer.toc(); } else { - for (size_t i = 0; i < iterations; i++) { + for (int i = 0; i < iterations; i++) { ConvertPaddleTensorToZeroCopyTensor(predictor, inputs[i]); run_timer.tic(); for (int j = 0; j < num_times; j++) { @@ -366,9 +367,8 @@ void TestOneThreadPrediction( const std::vector> &inputs, std::vector> *outputs, bool use_analysis = true) { auto predictor = CreateTestPredictor(config, use_analysis); - PredictionWarmUp(predictor.get(), inputs, outputs, FLAGS_paddle_num_threads, - 0); - PredictionRun(predictor.get(), inputs, outputs, FLAGS_paddle_num_threads, 0); + PredictionWarmUp(predictor.get(), inputs, outputs, 1, 0); + PredictionRun(predictor.get(), inputs, outputs, 1, 0); } void TestMultiThreadPrediction( From 2b800923f6e50f21ce9a6bd04b40f9e32e058039 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 4 Apr 2019 21:17:57 +0800 Subject: [PATCH 09/27] fix avx option (#16684) test=release/1.4 --- paddle/fluid/operators/jit/test.cc | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index d30fa014ed5fba..875d4f864353c1 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -991,15 +991,17 @@ TEST(JITKernel_pool, jitpool) { TEST(JITKernel_pool, more) { const auto& kers = jit::KernelPool::Instance().AllKernels(); -#if defined(__APPLE__) || defined(__OSX__) - EXPECT_EQ(kers.size(), 10UL); -#else -#ifdef PADDLE_WITH_MKLML - EXPECT_EQ(kers.size(), 22UL); -#else - EXPECT_EQ(kers.size(), 8UL); + size_t target_num = 8; + +#ifdef __AVX__ + target_num += 2; #endif + +#ifdef PADDLE_WITH_MKLML + target_num += 12; #endif + + EXPECT_EQ(kers.size(), target_num); } TEST(JITKernel_pool, refer) { From 2ebc20ff5da58f2fd6de3fbe5233c0631fd679af Mon Sep 17 00:00:00 2001 From: bingyanghuang <33643817+bingyanghuang@users.noreply.github.com> Date: Fri, 5 Apr 2019 14:42:50 +0800 Subject: [PATCH 10/27] Cherry-pick #16515 INT8v2 readme to Release 1.4 (#16686) --- .../tests/api/int8_mkldnn_quantization.md | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 paddle/fluid/inference/tests/api/int8_mkldnn_quantization.md diff --git a/paddle/fluid/inference/tests/api/int8_mkldnn_quantization.md b/paddle/fluid/inference/tests/api/int8_mkldnn_quantization.md new file mode 100644 index 00000000000000..46ca08992f00bf --- /dev/null +++ b/paddle/fluid/inference/tests/api/int8_mkldnn_quantization.md @@ -0,0 +1,70 @@ +# INT8 MKL-DNN quantization + +This document describes how to use Paddle inference Engine to convert the FP32 model to INT8 model on ResNet-50 and MobileNet-V1. We provide the instructions on enabling INT8 MKL-DNN quantization in Paddle inference and show the ResNet-50 and MobileNet-V1 results in accuracy and performance. + +## 0. Install PaddlePaddle +Follow PaddlePaddle [installation instruction](https://github.com/PaddlePaddle/models/tree/develop/fluid/PaddleCV/image_classification#installation) to install PaddlePaddle. If you build PaddlePaddle yourself, please use the following cmake arguments. +``` +cmake .. -DWITH_TESTING=ON -WITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_MKL=ON -WITH_SWIG_PY=OFF -DWITH_INFERENCE_API_TEST=ON -DON_INFER=ON + +``` +Note: MKL-DNN and MKL are required. + +## 1. Enable INT8 MKL-DNN quantization +For reference, please examine the code of unit test enclosed in [analyzer_int8_image_classification_tester.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc). + +* ### Create Analysis config +INT8 quantization is one of the optimizations in analysis config. More information about analysis config can be found [here](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/advanced_usage/deploy/inference/native_infer_en.md#upgrade-performance-based-on-contribanalysisconfig-prerelease) + +* ### Create quantize config by analysis config +We enable the MKL-DNN quantization procedure by calling an appropriate method from analysis config. Afterwards, all the required quantization parameters (quantization op names, quantization strategies etc.) can be set through quantizer config which is present in the analysis config. It is also necessary to specify a pre-processed warmup dataset and desired batch size. + +```cpp +//Enable MKL-DNN quantization +cfg.EnableMkldnnQuantizer(); + +//use analysis config to call the MKL-DNN quantization config +cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data); +cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(100); +``` + +## 2. Accuracy and Performance benchmark + +We provide the results of accuracy and performance measured on Intel(R) Xeon(R) Gold 6271 on single core. + + >**I. Top-1 Accuracy on Intel(R) Xeon(R) Gold 6271** + +| Model | Dataset | FP32 Accuracy | INT8 Accuracy | Accuracy Diff | +| :------------: | :------------: | :------------: | :------------: | :------------: | +| ResNet-50 | Full ImageNet Val | 76.63% | 76.48% | 0.15% | +| MobileNet-V1 | Full ImageNet Val | 70.78% | 70.36% | 0.42% | + + >**II. Throughput on Intel(R) Xeon(R) Gold 6271 (batch size 1 on single core)** + +| Model | Dataset | FP32 Throughput | INT8 Throughput | Ratio(INT8/FP32) | +| :------------: | :------------: | :------------: | :------------: | :------------: | +| ResNet-50 | Full ImageNet Val | 13.17 images/s | 49.84 images/s | 3.78 | +| MobileNet-V1 | Full ImageNet Val | 75.49 images/s | 232.38 images/s | 3.07 | + +Notes: +* Measurement of accuracy requires a model which accepts two inputs: data and labels. +* Different sampling batch data may cause slight difference on INT8 top accuracy. +* C-API performance data is better than Python API performance data because of the python overhead. Especially for the small computational model, python overhead will be more obvious. + + +## 3. Commands to reproduce the above accuracy and performance benchmark +* #### Full dataset (Single core) + * ##### Download full ImageNet Validation Dataset +```bash +cd /PATH/TO/PADDLE/build +python ../paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py +``` +The converted data binary file is saved by default in ~/.cache/paddle/dataset/int8/download/int8_full_val.bin + * ##### ResNet50 Full dataset benchmark +```bash +./paddle/fluid/inference/tests/api/test_analyzer_int8_resnet50 --infer_model=third_party/inference_demo/int8v2/resnet50/model --infer_data=/path/to/converted/int8_full_val.bin --batch_size=1 --paddle_num_threads=1 +``` + * ##### Mobilenet-v1 Full dataset benchmark +```bash +./paddle/fluid/inference/tests/api/test_analyzer_int8_mobilenet --infer_model=third_party/inference_demo/int8v2/mobilenet/model --infer_data=/path/to/converted/int8_full_val.bin --batch_size=1 --paddle_num_threads=1 +``` From 79643663150e08a4b2b2ca7649b1c821709832e4 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Sun, 7 Apr 2019 15:31:29 +0800 Subject: [PATCH 11/27] fix batchmerge (#16626) From 065ffcce6fbb63fe90581f8d27781f4eb10ab786 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Sun, 7 Apr 2019 15:31:50 +0800 Subject: [PATCH 12/27] fix dgcclipnorm bug test=develop (#16629) --- paddle/fluid/operators/dgc_clip_by_norm_op.h | 27 +++++++++++--------- python/paddle/fluid/optimizer.py | 4 +-- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/operators/dgc_clip_by_norm_op.h b/paddle/fluid/operators/dgc_clip_by_norm_op.h index bd22d16f7a2187..197bf59b2a470e 100644 --- a/paddle/fluid/operators/dgc_clip_by_norm_op.h +++ b/paddle/fluid/operators/dgc_clip_by_norm_op.h @@ -24,18 +24,21 @@ class DGCClipByNormKernel : public ClipByNormKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto rampup_begin_step = context.Attr("rampup_begin_step"); - if (static_cast(rampup_begin_step) >= 0) { - auto current_step_tensor = - context.Input("current_step"); - auto* current_step = current_step_tensor->data(); - - if (static_cast(*current_step) < - static_cast(rampup_begin_step)) { - VLOG(10) << "current_step:" << *current_step - << " < rampup_begin_step:" << rampup_begin_step - << " so does't use dgc_clip_by_norm"; - return; - } + if (static_cast(rampup_begin_step) < 0) { + return; + } + + auto current_step_tensor = context.Input("current_step"); + auto* current_step = current_step_tensor->data(); + + VLOG(10) << "current_step:" << *current_step + << ", rampup_begin_step:" << rampup_begin_step; + + if (static_cast(*current_step) < static_cast(rampup_begin_step)) { + VLOG(10) << "current_step:" << *current_step + << " < rampup_begin_step:" << rampup_begin_step + << " so does't use dgc_clip_by_norm"; + return; } return ClipByNormKernel::Compute(context); diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 79accabe87869c..7e6e37116fe23f 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -832,7 +832,7 @@ def _clip_by_norm(self, x, max_norm, name=None): type=x.type, name=name, dtype=x.dtype, persistable=False) helper.append_op( - type="clip_by_norm", + type="dgc_clip_by_norm", inputs={"X": x, "current_step": self._global_step_var}, attrs={ @@ -845,7 +845,7 @@ def _clip_by_norm(self, x, max_norm, name=None): def _append_clip_norm(self, grad_var, clip_norm): with grad_var.block.program._backward_role_guard(): return self._clip_by_norm( - x=grad_var, max_norm=clip_norm, name=grad_var.name + "@DGC") + x=grad_var, max_norm=clip_norm, name=grad_var.name) def _dgc_op(self, param_var, clip_var, grad_var, u_var, v_var, k_var, encoded_var): From 89d09b831900bfb84e3f376da2b61fd64389ca35 Mon Sep 17 00:00:00 2001 From: Jiabin Yang Date: Mon, 8 Apr 2019 14:00:33 +0800 Subject: [PATCH 13/27] Cherry pick 1.4/ptb fix (#16607) * test=develop, ptb_rnn fix op * test=release/1.4, refine code * test=release/1.4, fix ci failed error --- .../fluid/dygraph/layer_object_helper.py | 4 + python/paddle/fluid/dygraph/nn.py | 97 ++++++++++++------- .../unittests/test_imperative_ptb_rnn.py | 14 +-- 3 files changed, 69 insertions(+), 46 deletions(-) diff --git a/python/paddle/fluid/dygraph/layer_object_helper.py b/python/paddle/fluid/dygraph/layer_object_helper.py index c56652e103ce93..f8e607aab8491a 100644 --- a/python/paddle/fluid/dygraph/layer_object_helper.py +++ b/python/paddle/fluid/dygraph/layer_object_helper.py @@ -91,6 +91,10 @@ def iter_inputs_and_params(self, inputs_in, param_attr_in=None): Returns input, param_attr """ + param_attr_in = ParamAttr._to_attr(param_attr_in) + if isinstance(param_attr_in, bool): + raise ValueError('Param_attr should not be False in {}'.format( + self.name)) inputs = inputs_in if (inputs_in is not None) else [] inputs = self._multiple_input(inputs) param_attrs = self._multiple_param_attr(len(inputs), param_attr_in) diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 8925381119272d..04da8561a37005 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -20,7 +20,7 @@ from .. import core from ..layers import utils from . import layers -from ..framework import Variable, OpProtoHolder +from ..framework import Variable, OpProtoHolder, Parameter from ..layers import layer_function_generator from ..param_attr import ParamAttr from ..initializer import Normal, Constant, NumpyArrayInitializer @@ -213,46 +213,69 @@ def __init__(self, self._param_attr = param_attr self._bias_attr = bias_attr self._act = act + self.__w = list() - def _build_once(self, input): - input_shape = input.shape - param_shape = [ - reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], 1) - ] + [self._size] - self._w = self.create_parameter( - attr=self._param_attr, - shape=param_shape, - dtype=self._dtype, - is_bias=False) + @property + def _w(self, i=0): + return self.__w[i] - if self._bias_attr: - size = list([self._size]) - self._b = self.create_parameter( - attr=self._bias_attr, - shape=size, - dtype=self._dtype, - is_bias=True) - else: - self._b = None + @_w.setter + def _w(self, value, i=0): + assert isinstance(value, Parameter) + self.__w[i] = value - def forward(self, input): - tmp = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op( - type="mul", - inputs={"X": input, - "Y": self._w}, - outputs={"Out": tmp}, - attrs={ - "x_num_col_dims": self._num_flatten_dims, - "y_num_col_dims": 1 - }) + def _build_once(self, input): + i = 0 + for inp, param in self._helper.iter_inputs_and_params(input, + self._param_attr): + input_shape = inp.shape + + param_shape = [ + reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], + 1) + ] + [self._size] + self.__w.append( + self.add_parameter( + '_w%d' % i, + self.create_parameter( + attr=param, + shape=param_shape, + dtype=self._dtype, + is_bias=False))) + i += 1 + + size = list([self._size]) + self._b = self.create_parameter( + attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True) - pre_bias = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op( - type="sum", - inputs={"X": [tmp]}, - outputs={"Out": pre_bias}, - attrs={"use_mkldnn": False}) + def forward(self, input): + mul_results = list() + i = 0 + for inp, param in self._helper.iter_inputs_and_params(input, + self._param_attr): + tmp = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type="mul", + inputs={"X": inp, + "Y": self.__w[i]}, + outputs={"Out": tmp}, + attrs={ + "x_num_col_dims": self._num_flatten_dims, + "y_num_col_dims": 1 + }) + i += 1 + mul_results.append(tmp) + + if len(mul_results) == 1: + pre_bias = mul_results[0] + else: + pre_bias = self._helper.create_variable_for_type_inference( + self._dtype) + self._helper.append_op( + type="sum", + inputs={"X": mul_results}, + outputs={"Out": pre_bias}, + attrs={"use_mkldnn": False}) if self._b: pre_activation = self._helper.create_variable_for_type_inference( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 998c675815ece9..552eb019500b1e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -200,8 +200,6 @@ def forward(self, input, label, init_hidden, init_cell): rnn_out, shape=[-1, self.num_steps, self.hidden_size]) projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.elementwise_add(projection, self.softmax_bias) - projection = fluid.layers.reshape( - projection, shape=[-1, self.vocab_size]) projection = fluid.layers.reshape( projection, shape=[-1, self.vocab_size]) loss = fluid.layers.softmax_with_cross_entropy( @@ -223,6 +221,7 @@ def test_ptb_rnn_cpu_float32(self): num_steps = 3 init_scale = 0.1 batch_size = 4 + batch_num = 200 with fluid.dygraph.guard(): fluid.default_startup_program().random_seed = seed @@ -242,7 +241,6 @@ def test_ptb_rnn_cpu_float32(self): dy_loss = None last_hidden = None last_cell = None - batch_num = 200 for i in range(batch_num): x_data = np.arange(12).reshape(4, 3).astype('int64') @@ -282,7 +280,8 @@ def test_ptb_rnn_cpu_float32(self): exe = fluid.Executor(fluid.CPUPlace()) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data(name="x", shape=[-1, 3, 1], dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps, 1], dtype='int64') y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') init_hidden = fluid.layers.data( name="init_hidden", shape=[1], dtype='float32') @@ -332,7 +331,6 @@ def test_ptb_rnn_cpu_float32(self): for k in range(3, len(out)): static_param_updated[static_param_name_list[k - 3]] = out[k] - self.assertTrue(np.allclose(static_loss_value, dy_loss._numpy())) self.assertTrue(np.allclose(static_last_cell_value, last_cell._numpy())) self.assertTrue( @@ -340,13 +338,11 @@ def test_ptb_rnn_cpu_float32(self): for key, value in six.iteritems(static_param_init): # print("static_init name: {}, value {}".format(key, value)) # print("dy_init name: {}, value {}".format(key, dy_param_init[key])) - self.assertTrue(np.allclose(value, dy_param_init[key], atol=1e-5)) + self.assertTrue(np.allclose(value, dy_param_init[key])) for key, value in six.iteritems(static_param_updated): # print("static name: {}, value {}".format(key, value)) # print("dy name: {}, value {}".format(key, dy_param_updated[key])) - self.assertTrue( - np.allclose( - value, dy_param_updated[key], atol=1e-5)) + self.assertTrue(np.allclose(value, dy_param_updated[key])) if __name__ == '__main__': From 29f3441615872eb724cb708ffe5fd42b1befe0c0 Mon Sep 17 00:00:00 2001 From: Jiabin Yang Date: Mon, 8 Apr 2019 14:39:27 +0800 Subject: [PATCH 14/27] Cherry pick/fix transformer (#16620) * Imperative deep-first backward process (#16605) * Fix bug of gradient interface * shrink transformer * Right transformer * Change from width-first backward to deep-first backward process test=develop * Reverse iterator op's input test=develop * Polish code * Change the iteration direction in ingrads' map slots test=develop * Polish code test=develop * test=develop, cherry-pick fix for transformer in dygraph * test=develop, fix transformer in dygraph / --- paddle/fluid/imperative/layer.cc | 8 ++-- python/paddle/fluid/framework.py | 3 +- .../unittests/test_imperative_transformer.py | 42 ++++++------------- 3 files changed, 18 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 036d2a50a4a7ea..bc03285a4c5fe6 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -122,14 +122,14 @@ class Autograd { std::map> input_grads = ready_op->ApplyGrad(); - for (auto it : input_grads) { - const std::vector& ingrads = it.second; + for (auto it = input_grads.rbegin(); it != input_grads.rend(); ++it) { + const std::vector& ingrads = it->second; for (size_t i = 0; i < ingrads.size(); ++i) { if (!ingrads[i]) continue; - if (ready_op->input_vars_[it.first][i]->IsStopGradient()) { + if (ready_op->input_vars_[it->first][i]->IsStopGradient()) { continue; } - OpBase* pre_op = ready_op->pre_ops_[it.first][i]; + OpBase* pre_op = ready_op->pre_ops_[it->first][i]; if (!pre_op) continue; dep_counts[pre_op] -= 1; diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 0f5a8f51463a63..7953d98bcbb826 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -493,7 +493,8 @@ def _backward(self): self._ivar._run_backward() def _gradient(self): - return np.array(self._ivar._grad_value()) + new_ivar = self._ivar._grad_ivar()._copy_to(core.CPUPlace(), True) + return np.array(new_ivar.value().get_tensor()) def _clear_gradient(self): self._ivar._clear_gradient() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py index 3bdf3349730b0c..df097360c63ca6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py @@ -302,8 +302,11 @@ def make_all_inputs(input_fields): # if we run sync mode sync = False -# how many batches we use -batch_num = 2 +if not core.is_compiled_with_cuda(): + # how many batches we use + batch_num = 50 +else: + batch_num = 5 np.random.seed = 1 src_word_np = np.random.randint( @@ -335,24 +338,6 @@ def make_all_inputs(input_fields): dtype='int64') lbl_weight_np = np.random.randn(batch_size * seq_len, 1).astype('float32') -# np.random.seed = 1 -# src_word_np = np.arange(0, 10).reshape([batch_size, seq_len, 1]).astype('int64') -# src_pos_np = np.random.randint( -# 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') -# src_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, -# seq_len, seq_len).astype('float32') -# -# trg_word_np = np.arange(0, 10).reshape([batch_size, seq_len, 1]).astype('int64') -# trg_pos_np = np.random.randint( -# 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') -# trg_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, -# seq_len, seq_len).astype('float32') -# trg_src_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, -# seq_len, seq_len).astype('float32') -# -# lbl_word_np = np.arange(0, 10).reshape([batch_size * seq_len, 1]).astype('int64') -# lbl_weight_np = np.random.randn(batch_size * seq_len, 1).astype('float32') -# pos_inp1 = position_encoding_init(ModelHyperParams.max_length, ModelHyperParams.d_model) pos_inp2 = position_encoding_init(ModelHyperParams.max_length, @@ -739,7 +724,7 @@ def forward(self, dec_input, enc_output, slf_attn_bias, dec_enc_attn_bias): enc_attn_output_pp = self._multihead_attention_layer2( pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias) enc_attn_output = self._post_process_layer2( - slf_attn_output, enc_attn_output_pp, self._postprocess_cmd, + slf_attn_output_pp, enc_attn_output_pp, self._postprocess_cmd, self._prepostprcess_dropout) pre_process_rlt3 = self._pre_process_layer3(None, enc_attn_output, self._preprocess_cmd, @@ -1076,20 +1061,17 @@ def test_transformer_float32(self): 4]] = out[k] self.assertTrue( - np.allclose(static_avg_cost_value, dy_avg_cost._numpy())) + np.array_equal(static_avg_cost_value, dy_avg_cost._numpy())) self.assertTrue( - np.allclose(static_sum_cost_value, dy_sum_cost._numpy())) + np.array_equal(static_sum_cost_value, dy_sum_cost._numpy())) self.assertTrue( - np.allclose( - static_predict_value, dy_predict._numpy(), atol=1e-5)) + np.array_equal(static_predict_value, dy_predict._numpy())) self.assertTrue( - np.allclose(static_token_num_value, dy_token_num._numpy())) + np.array_equal(static_token_num_value, dy_token_num._numpy())) for key, value in six.iteritems(static_param_init): - self.assertTrue(np.allclose(value, dy_param_init[key])) + self.assertTrue(np.array_equal(value, dy_param_init[key])) for key, value in six.iteritems(static_param_updated): - self.assertTrue( - np.allclose( - value, dy_param_updated[key], atol=1e-4)) + self.assertTrue(np.array_equal(value, dy_param_updated[key])) if __name__ == '__main__': From 7e560558c2707d7f0381cc64d96db0f9200f35f3 Mon Sep 17 00:00:00 2001 From: Qiyang Min Date: Mon, 8 Apr 2019 21:11:07 +0800 Subject: [PATCH 15/27] Imperative fix bugs (cherry pick to 1.4) (#16680) * Fix imperative bugs test=release/1.4 * Polish code test=release/1.4 --- .../paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 552eb019500b1e..eb8a82430f0620 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -16,6 +16,7 @@ import unittest import paddle.fluid as fluid +from paddle.fluid import core from paddle.fluid.dygraph.nn import Embedding import paddle.fluid.framework as framework from paddle.fluid.optimizer import SGDOptimizer @@ -278,7 +279,8 @@ def test_ptb_rnn_cpu_float32(self): num_steps=num_steps, init_scale=init_scale) - exe = fluid.Executor(fluid.CPUPlace()) + exe = fluid.Executor(fluid.CPUPlace( + ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) sgd = SGDOptimizer(learning_rate=1e-3) x = fluid.layers.data( name="x", shape=[-1, num_steps, 1], dtype='int64') From 266cdf7d016a9ec7581a4174d74f0b35d849e180 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Tue, 9 Apr 2019 11:07:35 +0800 Subject: [PATCH 16/27] Fix dgc bug. (#16709) --- paddle/fluid/framework/details/all_reduce_op_handle.cc | 6 +++++- python/paddle/fluid/optimizer.py | 2 +- python/paddle/fluid/parallel_executor.py | 10 ++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index ed75b48090b27a..61276efedeeca7 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -53,6 +53,10 @@ AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, this->SetDeviceContext(p, nccl_ctxs_->DevCtx(p)); } } + // TODO(gongwb) :polish them! + if (is_encoded) { + VLOG(1) << "Use dgc allreduce mode"; + } } #else AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, @@ -86,7 +90,7 @@ void AllReduceOpHandle::RunImplEncoded() { paddle::framework::GradOriginalVarName(in_var_handles[i]->name()); auto encode_var_name = original_name + g_dgc_encoded; auto *in_var = local_scope->FindVar(encode_var_name); - PADDLE_ENFORCE_NOT_NULL(in_var); + PADDLE_ENFORCE_NOT_NULL(in_var, "%s should not be null", encode_var_name); auto &in = in_var->Get(); ins.emplace_back(&in); diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 7e6e37116fe23f..94bc3d0854d5b1 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -752,7 +752,7 @@ def _append_dgc_ops(self, param_and_grads): force_cpu=True) for param_var, grad_var in param_and_grads: - var_numel = reduce(lambda x, y: x * y, param_var.shape) + var_numel = abs(reduce(lambda x, y: x * y, param_var.shape)) if var_numel < 16384 or \ param_var.type == core.VarDesc.VarType.SELECTED_ROWS or \ grad_var.type == core.VarDesc.VarType.SELECTED_ROWS or \ diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 6b88e7a99fd78f..1a91dafb8a5048 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -104,10 +104,11 @@ def __init__(self, self._scope = scope if scope is not None else executor.global_scope() if main_program is not None and main_program._enable_dgc: + assert num_trainers > 1, "dgc is not useful for single trainer training." assert build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce assert num_trainers * len( - self._places) > 1, "dgc is not useful for single card training" - assert use_cuda + self._places) > 1, "dgc is not useful for single card training." + assert use_cuda, "dgc only used when cuda is used." main_program = main_program if main_program is not None \ else framework.default_main_program() @@ -123,6 +124,11 @@ def __init__(self, exec_strategy=exec_strategy, share_vars_from=share_vars_from._compiled_program if share_vars_from else None) + + # FIXME(gongwb): I will move dgc from dist mode to allreduce mode in next pr. + if main_program._enable_dgc: + self._compiled_program._build_strategy.is_distribution = True + self._place = core.CUDAPlace(0) if use_cuda else core.CPUPlace() self._exe = executor.Executor(self._place) self._compiled_program._compile(place=self._place, scope=self._scope) From a0af374f0185c194ae6ad847857a5bf3f783ac73 Mon Sep 17 00:00:00 2001 From: baojun <32073718+baojun-nervana@users.noreply.github.com> Date: Mon, 8 Apr 2019 21:15:23 -0700 Subject: [PATCH 17/27] fix training validation test=release/1.4 (#16716) --- paddle/fluid/operators/ngraph/ngraph_engine.cc | 8 ++++++-- paddle/fluid/operators/ngraph/ngraph_engine.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/ngraph/ngraph_engine.cc b/paddle/fluid/operators/ngraph/ngraph_engine.cc index 9f73bbc1fdc727..5ef385d2fcbaf0 100644 --- a/paddle/fluid/operators/ngraph/ngraph_engine.cc +++ b/paddle/fluid/operators/ngraph/ngraph_engine.cc @@ -75,6 +75,7 @@ std::vector NgraphEngine::feed_vars = {}; std::vector NgraphEngine::fetch_vars = {}; framework::Variable* NgraphEngine::pre_var_ptr = nullptr; const framework::BlockDesc* NgraphEngine::p_bdesc = nullptr; +bool NgraphEngine::is_training = false; std::unordered_map NgraphEngine::engine_cache = {}; std::unordered_map> NgraphOpIntervals( int size = ops->size(); int left = 0; while (left < size && ops->at(left)->Type() != framework::kFeedOpType && + ops->at(left)->Type() != "read" && ops->at(left)->Type() != framework::kFetchOpType) { ++left; } - while (left < size && ops->at(left)->Type() == framework::kFeedOpType) { + while (left < size && (ops->at(left)->Type() == framework::kFeedOpType || + ops->at(left)->Type() == "read")) { for (auto& var_name_item : ops->at(left)->Outputs()) { for (auto& var_name : var_name_item.second) { NgraphEngine::feed_vars.emplace_back(var_name); @@ -270,6 +273,7 @@ void NgraphEngine::Prepare(const std::vector& interval) { for (auto op_desc : ops_desc) { if (op_desc->Type().find("_grad") != std::string::npos) { + is_training = true; this->is_test_ = false; break; } @@ -590,7 +594,7 @@ void NgraphEngine::Run(const framework::Scope& scope, } bool is_persistable = (p_persistables->find(vi) != p_persistables->end()) ? true : false; - if (is_test && is_persistable) { + if (!is_training && is_test && is_persistable) { ti->set_stale(false); } (*p_t_in).emplace_back(ti); diff --git a/paddle/fluid/operators/ngraph/ngraph_engine.h b/paddle/fluid/operators/ngraph/ngraph_engine.h index b6532519e947bc..19400ac5b0ecd9 100644 --- a/paddle/fluid/operators/ngraph/ngraph_engine.h +++ b/paddle/fluid/operators/ngraph/ngraph_engine.h @@ -57,6 +57,7 @@ class NgraphEngine { void Run(const framework::Scope& scope, const platform::Place& place) const; + static bool is_training; static const framework::BlockDesc* p_bdesc; static std::vector feed_vars, fetch_vars; From 44f50cf4e56d5d2afbb3c99e34f6d4b2929346eb Mon Sep 17 00:00:00 2001 From: Jiabin Yang Date: Tue, 9 Apr 2019 12:16:59 +0800 Subject: [PATCH 18/27] test=release/1.4, fix test_impertative_transformer (#16708) --- .../fluid/tests/unittests/test_imperative_transformer.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py index df097360c63ca6..ef9d3ffca2dfce 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py @@ -302,11 +302,7 @@ def make_all_inputs(input_fields): # if we run sync mode sync = False -if not core.is_compiled_with_cuda(): - # how many batches we use - batch_num = 50 -else: - batch_num = 5 +batch_num = 5 np.random.seed = 1 src_word_np = np.random.randint( From c7cca0a6b08f8471c2c9c1847722f541e62c5208 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Tue, 9 Apr 2019 20:07:51 +0800 Subject: [PATCH 19/27] Fix auto growth bug of optimizer in dygraph mode test=release/1.4 --- python/paddle/fluid/optimizer.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 79accabe87869c..f3e35fdcab5072 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -275,15 +275,26 @@ def _create_optimization_pass(self, parameters_and_grads): self._create_global_learning_rate() optimize_ops = [] - for param_and_grad in parameters_and_grads: - if param_and_grad[1] is None: - continue - with param_and_grad[0].block.program._optimized_guard( - param_and_grad), name_scope("optimizer"): - if param_and_grad[0].trainable is True: - optimize_op = self._append_optimize_op(global_block, - param_and_grad) - optimize_ops.append(optimize_op) + if framework._in_dygraph_mode(): + for param_and_grad in parameters_and_grads: + if param_and_grad[1] is None: + continue + with param_and_grad[0].block.program._optimized_guard( + param_and_grad): + if param_and_grad[0].trainable is True: + optimize_op = self._append_optimize_op(global_block, + param_and_grad) + optimize_ops.append(optimize_op) + else: + for param_and_grad in parameters_and_grads: + if param_and_grad[1] is None: + continue + with param_and_grad[0].block.program._optimized_guard( + param_and_grad), name_scope("optimizer"): + if param_and_grad[0].trainable is True: + optimize_op = self._append_optimize_op(global_block, + param_and_grad) + optimize_ops.append(optimize_op) # Get custom finish ops for subclasses # FIXME: Need to fix this once we figure out how to handle dependencies From cb9c59bdebd2dbdf7f7cf22781548734deb5e383 Mon Sep 17 00:00:00 2001 From: liuwei1031 <46661762+liuwei1031@users.noreply.github.com> Date: Wed, 10 Apr 2019 10:33:34 +0800 Subject: [PATCH 20/27] cherry-pick PR 16547,16736,16739 test=release/1.4 (#16748) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix the bug of reusing different types of variables in memory_optimiz… (#16547) * fix the bug of reusing different types of variables in memory_optimize_pass, test=develop * disable SELECTED_ROWS AND LOD_TENSOR_ARRAY reusage, test=develop * only use the latest version variable for inplace strategy (#16736) * bug-fix, test=develop * tweak code, test=develop * cherry-pick PR 16547,16736,16739 test=release/1.4 --- .../framework/details/inplace_op_pass.cc | 9 ++++ .../details/memory_optimize_helper.cc | 43 ++++++++----------- .../details/memory_optimize_helper.h | 7 +-- 3 files changed, 30 insertions(+), 29 deletions(-) diff --git a/paddle/fluid/framework/details/inplace_op_pass.cc b/paddle/fluid/framework/details/inplace_op_pass.cc index 79150f719e379c..84c9e4a379a5e0 100644 --- a/paddle/fluid/framework/details/inplace_op_pass.cc +++ b/paddle/fluid/framework/details/inplace_op_pass.cc @@ -305,6 +305,12 @@ void InplacePass::TryInplaceOpInputOutput(ir::Node* op, VLOG(4) << "Try to inplace " << in_var_name << " with " << out_var_name; + if (var_nodes_[in_var_name].back() != in_node) { + VLOG(4) << "SKIP since " << in_var_name + << " is also used as output by other ops"; + continue; + } + bool can_replace = true; if (in_var_name == out_var_name) { can_replace = false; @@ -527,6 +533,9 @@ void GraphView::Build(ir::Graph* g) { }; for (auto& node : g->Nodes()) { if (!node->IsOp()) continue; + // avoid optimize the variable used in sub-blocks + if (OpHasSubBlock(node->Op())) update_skip_set(node); + if (node->Name() == "send") update_skip_set(node); if (node->Name() == "recv") update_skip_set(node); if (node->Name() == "prefetch") update_skip_set(node); diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index 894d7dad2e6236..1af57dc4087d2f 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -131,16 +131,7 @@ size_t NodeSize(const VarDesc& node) { return type_size * std::abs(size); } -size_t NodeSize(ir::Node* n) { - VarDesc* desc = nullptr; - // some op do not have block pointer - if (n->inputs[0]->Op() != nullptr) { - desc = FindVarDescInBlock(n); - } else { - desc = n->Var(); - } - return NodeSize(*desc); -} +size_t NodeSize(ir::Node* n) { return NodeSize(*(n->Var())); } std::string DebugStringImpl(VarDesc* var) { std::stringstream ss; @@ -163,24 +154,22 @@ std::string DebugStringImpl(VarDesc* var) { } std::string DebugString(ir::Node* var) { - return DebugStringImpl(FindVarDescInBlock(var)); + return DebugStringImpl(GetVarDesc(var)); } // NOTE(dzh): based ir node, if a large node has been reused // by a small size node, then next time it appear in pool, it will // have the small size. Find the original node shap from blockdesc. -VarDesc* FindVarDescInBlock(ir::Node* n) { +VarDesc* GetVarDesc(ir::Node* n) { PADDLE_ENFORCE(n->IsVar() && !n->IsCtrlVar() && n->inputs.size() == 1); - BlockDesc* block = n->inputs[0]->Op()->Block(); - PADDLE_ENFORCE(block->HasVar(n->Name()), - string::Sprintf("Block do not has var %s", n->Name())); - return block->FindVar(n->Name()); + return n->Var(); } struct NodeComparator { bool operator()(ir::Node* lhs, ir::Node* rhs) const { - auto* lhs_desc = FindVarDescInBlock(lhs); - auto* rhs_desc = FindVarDescInBlock(rhs); + if (lhs->Var()->GetType() != rhs->Var()->GetType()) return false; + auto* lhs_desc = GetVarDesc(lhs); + auto* rhs_desc = GetVarDesc(rhs); // match data type if (lhs_desc->GetDataType() != rhs_desc->GetDataType()) { return false; @@ -204,7 +193,7 @@ void OrderedSet::Insert(ir::Node* var) { return; } - auto* var_desc = FindVarDescInBlock(var); + auto* var_desc = var->Var(); auto var_shape = var_desc->GetShape(); int batch_size = static_cast(var_shape[0]); @@ -212,7 +201,7 @@ void OrderedSet::Insert(ir::Node* var) { Iter it = nodes_.begin(); while (it != nodes_.end()) { auto& prev = it->front(); - auto* cache_desc = FindVarDescInBlock(prev); + auto* cache_desc = GetVarDesc(prev); int cache_batch_size = cache_desc->GetShape()[0]; if ((cache_batch_size == -1 && batch_size == -1) || (cache_batch_size != -1 && batch_size != -1)) { @@ -336,10 +325,16 @@ int MinChunkSize() { bool NodeCanReused(const VarDesc& node) { auto type = node.GetType(); // only these types holds bulk of gpu memory - if (!(type == proto::VarType::LOD_TENSOR || - type == proto::VarType::LOD_TENSOR_ARRAY)) { - return false; - } + // FIXME(liuwei1031) did not find good ways to test SELECTED_ROWS and + // LOD_TENSOR_ARRAY re-use logic, + // disable them in version 1.4 + // if (!(type == proto::VarType::LOD_TENSOR || + // type == proto::VarType::SELECTED_ROWS || + // type == proto::VarType::LOD_TENSOR_ARRAY)) { + // return false; + // } + if (type != proto::VarType::LOD_TENSOR) return false; + // persistable variable is parameter if (node.Persistable()) { return false; diff --git a/paddle/fluid/framework/details/memory_optimize_helper.h b/paddle/fluid/framework/details/memory_optimize_helper.h index b5348cc66eaa44..65c7017d2d4629 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.h +++ b/paddle/fluid/framework/details/memory_optimize_helper.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include "paddle/fluid/framework/data_type.h" @@ -140,11 +141,7 @@ size_t NodeSize(const VarDesc&); std::string DebugString(ir::Node* var); -// NOTE(dzhwinter) -// after node reuse, the replaced node shape is -// different with its VarDesc. So need to find the -// correct VarDesc in Block. -VarDesc* FindVarDescInBlock(ir::Node* n); +VarDesc* GetVarDesc(ir::Node* n); static inline bool IsSameDesc(OpDesc* op1, OpDesc* op2) { return op1->Type() == op2->Type() && op1->Inputs() == op2->Inputs() && From b07584dcf3f5bdaf47b408738978397a27ac511d Mon Sep 17 00:00:00 2001 From: Jiabin Yang Date: Wed, 10 Apr 2019 13:34:38 +0800 Subject: [PATCH 21/27] test=release/1.4, refine test_imperative_transformer (#16737) --- .../unittests/test_imperative_transformer.py | 60 +++++++++++-------- 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py index ef9d3ffca2dfce..6f87051dc4f1e5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py @@ -116,7 +116,7 @@ class ModelHyperParams(object): # to process after each sub-layer postprocess_cmd = "da" # dropout + residual connection # random seed used in dropout for CE. - dropout_seed = 1 + dropout_seed = None # the flag indicating whether to share embedding and softmax weights. # vocabularies in source and target should be same for weight sharing. weight_sharing = True @@ -166,15 +166,21 @@ def create_data(is_static=False): ] else: enc_inputs = [ - to_variable(src_word_np), to_variable(src_pos_np), - to_variable(src_slf_attn_bias_np) + to_variable( + src_word_np, name='src_word'), to_variable( + src_pos_np, name='src_pos'), to_variable( + src_slf_attn_bias_np, name='src_slf_attn_bias') ] dec_inputs = [ - to_variable(trg_word_np), to_variable(trg_pos_np), - to_variable(trg_slf_attn_bias_np), to_variable(trg_src_attn_bias_np) + to_variable( + trg_word_np, name='trg_word'), to_variable( + trg_pos_np, name='trg_pos'), to_variable( + trg_slf_attn_bias_np, name='trg_slf_attn_bias'), + to_variable( + trg_src_attn_bias_np, name='trg_src_attn_bias') ] - label = to_variable(lbl_word_np) - weight = to_variable(lbl_weight_np) + label = to_variable(lbl_word_np, name='lbl_word') + weight = to_variable(lbl_weight_np, name='lbl_weight') return enc_inputs, dec_inputs, label, weight @@ -211,7 +217,7 @@ def make_all_inputs(input_fields): # The placeholder for batch_size in compile time. Must be -1 currently to be # consistent with some ops' infer-shape output in compile time, such as the # sequence_expand op used in beamsearch decoder. -batch_size = 32 +batch_size = -1 # The placeholder for squence length in compile time. seq_len = ModelHyperParams.max_length # Here list the data shapes and data types of all inputs. @@ -304,35 +310,40 @@ def make_all_inputs(input_fields): batch_num = 5 -np.random.seed = 1 +np.random.seed = 90 src_word_np = np.random.randint( 1, ModelHyperParams.src_vocab_size - 1, - size=(batch_size, seq_len, 1), + size=(TrainTaskConfig.batch_size, seq_len, 1), dtype='int64') src_pos_np = np.random.randint( - 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') -src_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, - seq_len, seq_len).astype('float32') + 1, seq_len, size=(TrainTaskConfig.batch_size, seq_len, 1), dtype='int64') +src_slf_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, + ModelHyperParams.n_head, seq_len, + seq_len).astype('float32') trg_word_np = np.random.randint( 1, ModelHyperParams.src_vocab_size - 1, - size=(batch_size, seq_len, 1), + size=(TrainTaskConfig.batch_size, seq_len, 1), dtype='int64') trg_pos_np = np.random.randint( - 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') -trg_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, - seq_len, seq_len).astype('float32') -trg_src_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, - seq_len, seq_len).astype('float32') + 1, seq_len, size=(TrainTaskConfig.batch_size, seq_len, 1), dtype='int64') +trg_slf_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, + ModelHyperParams.n_head, seq_len, + seq_len).astype('float32') +trg_src_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, + ModelHyperParams.n_head, seq_len, + seq_len).astype('float32') lbl_word_np = np.random.randint( 1, ModelHyperParams.src_vocab_size - 1, - size=(batch_size * seq_len, 1), + size=(TrainTaskConfig.batch_size * seq_len, 1), dtype='int64') -lbl_weight_np = np.random.randn(batch_size * seq_len, 1).astype('float32') + +lbl_weight_np = np.random.randn(TrainTaskConfig.batch_size * seq_len, + 1).astype('float32') pos_inp1 = position_encoding_init(ModelHyperParams.max_length, ModelHyperParams.d_model) @@ -447,7 +458,7 @@ def forward(self, queries, keys, values, attn_bias): x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False) transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3]) - #scale dot product attention + # scale dot product attention product = fluid.layers.matmul( x=transpose_q, y=transpose_k, @@ -971,6 +982,7 @@ def test_transformer_float32(self): enc_inputs, dec_inputs, label, weights = create_data() dy_sum_cost, dy_avg_cost, dy_predict, dy_token_num = transformer( enc_inputs, dec_inputs, label, weights) + if i == 0: for param in transformer.parameters(): dy_param_init[param.name] = param._numpy() @@ -978,6 +990,7 @@ def test_transformer_float32(self): dy_avg_cost._backward() optimizer.minimize(dy_avg_cost) transformer.clear_gradients() + if i == batch_num - 1: for param in transformer.parameters(): dy_param_updated[param.name] = param._numpy() @@ -1024,7 +1037,6 @@ def test_transformer_float32(self): static_param_name_list = list() static_sum_cost, static_avg_cost, static_predict, static_token_num = transformer( enc_inputs, dec_inputs, label, weights) - optimizer.minimize(static_avg_cost) for param in transformer.parameters(): static_param_name_list.append(param.name) @@ -1042,8 +1054,8 @@ def test_transformer_float32(self): static_sum_cost, static_avg_cost, static_predict, static_token_num ] - fetch_list.extend(static_param_name_list) + fetch_list.extend(static_param_name_list) out = exe.run(fluid.default_main_program(), feed=feed_dict, fetch_list=fetch_list) From df339c086d07b76750645540e2abbc2055f25b11 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Wed, 10 Apr 2019 14:09:54 +0800 Subject: [PATCH 22/27] Fix mistakes in dgc document. (#16731) --- python/paddle/fluid/optimizer.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 94bc3d0854d5b1..3a4204cbb65768 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -628,16 +628,16 @@ class DGCMomentumOptimizer(MomentumOptimizer): Original paper is https://arxiv.org/abs/1712.01887 - DGC reduce the communication bandwidth by sending only the important gradients (sparse update):\ + DGC reduces the communication bandwidth by sending only the important gradients (sparse update):\ only gradients larger than a threshold are transmitted. - To avoid losing information, DGC accumulate the rest of the gradients locally. + To avoid losing information, DGC accumulates the rest of the gradients locally. Eventually, these gradients become large enough to be transmitted. - Thus, DGC send the large gradients immediately but eventually send all of the gradients over time. + Thus, DGC sends the large gradients immediately but eventually send all of the gradients over time. - To ensure no loss of accuracy, DGC employs momentum correc-tionandlocal gradient clipping on top of the gradient sparsification to maintain model performance. + To ensure no loss of accuracy, DGC employs momentum correction and local gradient clipping on top of the gradient sparsification to maintain model performance. DGC also uses momentum factor masking and warmup training to overcome the staleness problem caused by reduced communication. @@ -652,7 +652,7 @@ class DGCMomentumOptimizer(MomentumOptimizer): learning_rate (float|Variable): the learning rate used to update parameters. \ Can be a float value or a Variable with one float value as data element. momentum (float): Momentum factor. - rampup_begin_step (int): The begining step from which gradient compression is implemented. + rampup_begin_step (int): The beginning step from which gradient compression is implemented. rampup_step (int): How long it use the sparsity periods. Default is 1. for example: If the sparsity is [0.75, 0.9375, 0.984375, 0.996, 0.999], and the rampup_step is 5, \ it will use 0.75 at 0 step, and 0.9375 at 1 step, and so on. And when reach sparsity array ends, \ @@ -660,9 +660,9 @@ class DGCMomentumOptimizer(MomentumOptimizer): sparsity (list[float]): Get top important element from gradient tensor, the ratio is (1 - current sparsity). use_nesterov (bool): Enables Nesterov momentum. True means use nesterov. local_grad_clip_norm (float): Clip norm value if needed. - num_trainers: The number of training node. + num_trainers: The number of training nodes. regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer. - name: A optional name prefix. + name: An optional name prefix. Examples: .. code-block:: python From ed0f1ae4b97e6c9b52efbe5f8c8eaf837390ee07 Mon Sep 17 00:00:00 2001 From: liuwei1031 <46661762+liuwei1031@users.noreply.github.com> Date: Thu, 11 Apr 2019 01:19:19 +0800 Subject: [PATCH 23/27] cherry-pick (#16760), test=release/1.4 (#16775) --- paddle/fluid/framework/details/build_strategy.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h index 85f328b7c40568..beb305ef5e4191 100644 --- a/paddle/fluid/framework/details/build_strategy.h +++ b/paddle/fluid/framework/details/build_strategy.h @@ -83,11 +83,11 @@ struct BuildStrategy { bool sync_batch_norm_{false}; - bool memory_optimize_{true}; - // TODO(dzhwinter): - // make enable_inplace, memory_optimize_ - // memory_early_delete_ true by default - bool enable_inplace_{true}; + // FIXME(liuwei1031) disable memory_optimzie and enable_inplace in 1.4 + // to open them by default, we need to solve the fetch variable issue + bool memory_optimize_{false}; + + bool enable_inplace_{false}; bool enable_sequential_execution_{false}; From 8643dbc233f12f829b64cc0ee6926e41fb891ddf Mon Sep 17 00:00:00 2001 From: nhzlx Date: Thu, 11 Apr 2019 09:02:52 +0000 Subject: [PATCH 24/27] cherry-pick from 16691:Anakin subgraph support yolo_v3 and faster-rcnn --- .../inference/anakin/convert/CMakeLists.txt | 4 +- .../anakin/convert/affine_channel.cc | 100 ++++++++++++++++++ .../inference/anakin/convert/affine_channel.h | 39 +++++++ .../inference/anakin/convert/op_converter.h | 16 +-- paddle/fluid/inference/anakin/convert/relu.cc | 18 ++++ paddle/fluid/inference/anakin/convert/relu.h | 11 ++ .../inference/anakin/convert/roi_align.cc | 59 +++++++++++ .../inference/anakin/convert/roi_align.h | 38 +++++++ .../anakin/convert/test_affine_channel_op.cc | 55 ++++++++++ .../inference/anakin/convert/test_relu_op.cc | 11 +- .../inference/anakin/convert/ut_helper.h | 4 +- paddle/fluid/inference/anakin/engine.cc | 26 +++-- paddle/fluid/inference/anakin/engine.h | 21 ++-- paddle/fluid/inference/anakin/op_teller.cc | 2 + .../inference/anakin/test_anakin_engine.cc | 2 +- .../ir_passes/anakin_subgraph_pass.cc | 3 +- .../analysis/ir_passes/subgraph_util.cc | 2 - paddle/fluid/inference/api/CMakeLists.txt | 1 + .../fluid/inference/api/analysis_predictor.cc | 2 + paddle/fluid/pybind/inference_api.cc | 3 + 20 files changed, 382 insertions(+), 35 deletions(-) create mode 100644 paddle/fluid/inference/anakin/convert/affine_channel.cc create mode 100644 paddle/fluid/inference/anakin/convert/affine_channel.h create mode 100644 paddle/fluid/inference/anakin/convert/roi_align.cc create mode 100644 paddle/fluid/inference/anakin/convert/roi_align.h create mode 100644 paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc diff --git a/paddle/fluid/inference/anakin/convert/CMakeLists.txt b/paddle/fluid/inference/anakin/convert/CMakeLists.txt index d3d1522dccf0d8..7cc75de8ee651e 100644 --- a/paddle/fluid/inference/anakin/convert/CMakeLists.txt +++ b/paddle/fluid/inference/anakin/convert/CMakeLists.txt @@ -1,4 +1,4 @@ -cc_library(anakin_op_converter SRCS fc.cc conv2d.cc conv2d_fusion.cc elementwise.cc activation.cc pool2d.cc concat.cc split.cc relu.cc softmax.cc batch_norm.cc reshape.cc flatten.cc transpose.cc density_prior_box.cc detection_out.cc scale.cc dropout.cc im2sequence.cc sum.cc DEPS anakin_engine framework_proto scope op_registry) +cc_library(anakin_op_converter SRCS fc.cc conv2d.cc conv2d_fusion.cc elementwise.cc activation.cc pool2d.cc concat.cc split.cc relu.cc softmax.cc batch_norm.cc reshape.cc flatten.cc transpose.cc density_prior_box.cc detection_out.cc scale.cc dropout.cc im2sequence.cc sum.cc affine_channel.cc roi_align.cc DEPS anakin_engine framework_proto scope op_registry) cc_test(test_anakin_fc SRCS test_fc_op.cc DEPS anakin_op_converter mul_op SERIAL) cc_test(test_anakin_conv2d SRCS test_conv2d_op.cc DEPS anakin_op_converter conv_op im2col vol2col depthwise_conv SERIAL) @@ -14,5 +14,5 @@ cc_test(test_anakin_flatten SRCS test_flatten_op.cc DEPS anakin_op_converter fla cc_test(test_anakin_transpose SRCS test_transpose_op.cc DEPS anakin_op_converter transpose_op SERIAL) cc_test(test_anakin_batch_norm SRCS test_batch_norm_op.cc DEPS anakin_op_converter batch_norm_op SERIAL) cc_test(test_anakin_dropout SRCS test_dropout_op.cc DEPS anakin_op_converter dropout_op SERIAL) -#cc_test(test_anakin_im2sequence SRCS test_im2sequence_op.cc DEPS anakin_op_converter im2sequence_op im2col) cc_test(test_anakin_sum SRCS test_sum_op.cc DEPS anakin_op_converter sum_op selected_rows_functor SERIAL) +cc_test(test_anakin_affine_channel SRCS test_affine_channel_op.cc DEPS anakin_op_converter affine_channel_op SERIAL) diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.cc b/paddle/fluid/inference/anakin/convert/affine_channel.cc new file mode 100644 index 00000000000000..7c886df082d121 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/affine_channel.cc @@ -0,0 +1,100 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/affine_channel.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void AffineChannelOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + auto input_name = op_desc.Input("X").front(); + auto output_name = op_desc.Output("Out").front(); + + // Copy the Scale to CPUPlace and get the pointer. + auto *scale_v = scope.FindVar(op_desc.Input("Scale").front()); + PADDLE_ENFORCE_NOT_NULL(scale_v); + auto *scale_t = scale_v->GetMutable(); + std::unique_ptr scale_tensor( + new framework::LoDTensor()); + scale_tensor->Resize(scale_t->dims()); + TensorCopySync((*scale_t), platform::CPUPlace(), scale_tensor.get()); + + // Copy the Bias to CPUPlace and get the pointer. + auto *bias_v = scope.FindVar(op_desc.Input("Bias").front()); + PADDLE_ENFORCE_NOT_NULL(bias_v); + auto *bias_t = bias_v->GetMutable(); + std::unique_ptr bias_tensor(new framework::LoDTensor()); + bias_tensor->Resize(bias_t->dims()); + TensorCopySync((*bias_t), platform::CPUPlace(), bias_tensor.get()); + + engine_->AddOp(op_name, "AffineChannel", {input_name}, {output_name}); + + // Generate the Scale parameter of Anakin. + auto scale_shape = framework::vectorize2int(scale_t->dims()); + while (scale_shape.size() < 4) { + scale_shape.insert(scale_shape.begin(), 1); + } + Shape anakin_scale_shape(scale_shape); + auto *weight1 = GraphGlobalMem::Global().template new_block( + anakin_scale_shape); + float *scale_cpu_data = + static_cast(weight1->h_tensor().mutable_data()); + std::copy_n(scale_tensor->data(), scale_tensor->numel(), + scale_cpu_data); + weight1->d_tensor().set_shape(anakin_scale_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + engine_->AddOpAttr(op_name, "weight_1", *weight1); + + // Generate the Bias parameter of Anakin. + auto bias_shape = framework::vectorize2int(bias_t->dims()); + while (bias_shape.size() < 4) { + bias_shape.insert(bias_shape.begin(), 1); + } + Shape anakin_bias_shape(bias_shape); + auto *weight2 = GraphGlobalMem::Global().template new_block( + anakin_bias_shape); + float *bias_cpu_data = + static_cast(weight2->h_tensor().mutable_data()); + std::copy_n(bias_tensor->data(), bias_tensor->numel(), bias_cpu_data); + weight2->d_tensor().set_shape(anakin_bias_shape); + weight2->d_tensor().copy_from(weight2->h_tensor()); + engine_->AddOpAttr(op_name, "weight_2", *weight2); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(affine_channel, AffineChannelOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.h b/paddle/fluid/inference/anakin/convert/affine_channel.h new file mode 100644 index 00000000000000..ea0043670c61b2 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/affine_channel.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class AffineChannelOpConverter : public AnakinOpConverter { + public: + AffineChannelOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) override; + virtual ~AffineChannelOpConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/op_converter.h b/paddle/fluid/inference/anakin/convert/op_converter.h index 1ca62658ef26ff..bffab229ede775 100644 --- a/paddle/fluid/inference/anakin/convert/op_converter.h +++ b/paddle/fluid/inference/anakin/convert/op_converter.h @@ -81,7 +81,6 @@ class AnakinOpConverter { const std::unordered_set ¶meters, const std::vector &outputs, AnakinNvEngine *engine) { ConvertBlock(block_desc, parameters, *scope, engine); - engine->Freeze(); // if the max_batch size int max_batch_size = engine->GetMaxBatchSize(); PADDLE_ENFORCE(max_batch_size > 0, @@ -91,7 +90,12 @@ class AnakinOpConverter { // the block_desc. auto max_input_shape = engine->GetMaxInputShape(); std::map> temp_max_input_shape; - + // Register outputs with anakin using the RegistVar interface before Freeze. + // Note that RegistVar's parameters can only be outputs, not inputs. + for (auto &output : outputs) { + engine->Graph()->RegistVar(output); + } + engine->Freeze(); for (auto &input : inputs) { if (parameters.count(input)) continue; std::vector input_shape; @@ -99,7 +103,7 @@ class AnakinOpConverter { input_shape[0] = max_batch_size; if (max_input_shape.count(input)) { PADDLE_ENFORCE(max_input_shape[input].size() == 4, - "the dimensions of max_input_shape setted from " + "the dimensions of max_input_shape setted from " "config->EnableAnakinEngine must be 4"); for (int i = 1; i < 4; i++) { input_shape[i] = max_input_shape[input][i]; @@ -118,14 +122,10 @@ class AnakinOpConverter { } temp_max_input_shape[input] = input_shape; engine->SetInputShape(input, input_shape); - engine->Graph()->RegistVar(input); // For share from data. } engine->SetMaxInputShape(temp_max_input_shape); engine->Optimize(); - - // For anakin share with fluid tensor. - engine->AllocTmpMem(); - engine->InitGraph(); + engine->InitNet(); } void SetEngine(AnakinNvEngine *engine) { engine_ = engine; } diff --git a/paddle/fluid/inference/anakin/convert/relu.cc b/paddle/fluid/inference/anakin/convert/relu.cc index 993437d014b1f9..744066e88afc61 100644 --- a/paddle/fluid/inference/anakin/convert/relu.cc +++ b/paddle/fluid/inference/anakin/convert/relu.cc @@ -41,8 +41,26 @@ void ReluOpConverter::operator()(const framework::proto::OpDesc &op, engine_->AddOpAttr(op_name, "alpha", 0); } +void LeakyReluOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + auto input_name = op_desc.Input("X").front(); + auto output_name = op_desc.Output("Out").front(); + + float alpha = boost::get(op_desc.GetAttr("alpha")); + engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); + engine_->AddOpAttr(op_name, "alpha", alpha); +} + } // namespace anakin } // namespace inference } // namespace paddle REGISTER_ANAKIN_OP_CONVERTER(relu, ReluOpConverter); +REGISTER_ANAKIN_OP_CONVERTER(leaky_relu, LeakyReluOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/relu.h b/paddle/fluid/inference/anakin/convert/relu.h index 6ede506511917c..d7b6b6934d6f74 100644 --- a/paddle/fluid/inference/anakin/convert/relu.h +++ b/paddle/fluid/inference/anakin/convert/relu.h @@ -33,6 +33,17 @@ class ReluOpConverter : public AnakinOpConverter { virtual ~ReluOpConverter() {} }; +class LeakyReluOpConverter : public AnakinOpConverter { + public: + LeakyReluOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) override; + virtual ~LeakyReluOpConverter() {} +}; + } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/roi_align.cc b/paddle/fluid/inference/anakin/convert/roi_align.cc new file mode 100644 index 00000000000000..0f2b08df08a9ad --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/roi_align.cc @@ -0,0 +1,59 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/roi_align.h" +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; + +namespace paddle { +namespace inference { +namespace anakin { + +void RoiAlignOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("ROIs").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + auto input_x_name = op_desc.Input("X").front(); + auto input_rois_name = op_desc.Input("ROIs").front(); + auto output_name = op_desc.Output("Out").front(); + + auto spatial_scale = boost::get(op_desc.GetAttr("spatial_scale")); + auto pooled_height = boost::get(op_desc.GetAttr("pooled_height")); + auto pooled_width = boost::get(op_desc.GetAttr("pooled_width")); + auto sampling_ratio = boost::get(op_desc.GetAttr("sampling_ratio")); + + engine_->AddOp(op_name, "RoiAlign", {input_x_name, input_rois_name}, + {output_name}); + engine_->AddOpAttr(op_name, "spatial_scale", spatial_scale); + engine_->AddOpAttr(op_name, "pooled_height", pooled_height); + engine_->AddOpAttr(op_name, "pooled_width", pooled_width); + engine_->AddOpAttr(op_name, "sampling_ratio", sampling_ratio); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(roi_align, RoiAlignOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/roi_align.h b/paddle/fluid/inference/anakin/convert/roi_align.h new file mode 100644 index 00000000000000..c6df4754ba9b5e --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/roi_align.h @@ -0,0 +1,38 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class RoiAlignOpConverter : public AnakinOpConverter { + public: + RoiAlignOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) override; + virtual ~RoiAlignOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc b/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc new file mode 100644 index 00000000000000..eb4f4e12eec29d --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc @@ -0,0 +1,55 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/affine_channel.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(affine_channel, native) { + // Declare the difference between the inputs. + std::unordered_set parameters({"scale", "bias"}); + + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("x", {1, 3, 5, 2}); + validator.DeclOutputVar("out", {1, 3, 5, 2}); + validator.DeclParamVar("scale", {1, 3, 1, 1}); + validator.DeclParamVar("bias", {1, 3, 1, 1}); + + // Prepare Op descriptions. + framework::OpDesc desc; + desc.SetType("affine_channel"); + desc.SetInput("X", {"x"}); + desc.SetInput("Bias", {"bias"}); + desc.SetInput("Scale", {"scale"}); + desc.SetOutput("Out", {"out"}); + + // Layout must be explicitly specified here as NCHW. + desc.SetAttr("data_layout", std::string("NCHW")); + + validator.SetOp(*desc.Proto()); + validator.Execute(1); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(affine_channel); +USE_ANAKIN_CONVERTER(affine_channel); diff --git a/paddle/fluid/inference/anakin/convert/test_relu_op.cc b/paddle/fluid/inference/anakin/convert/test_relu_op.cc index 04e624518a5a44..cba19a55857542 100644 --- a/paddle/fluid/inference/anakin/convert/test_relu_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_relu_op.cc @@ -21,7 +21,7 @@ namespace paddle { namespace inference { namespace anakin { -static void test_activation_op(const std::string &op_type) { +static void test_relu_op(const std::string &op_type) { auto *converter = Registry::Global().Lookup(op_type); PADDLE_ENFORCE(converter != nullptr); std::unordered_set parameters; @@ -33,6 +33,9 @@ static void test_activation_op(const std::string &op_type) { desc.SetType(op_type); desc.SetInput("X", {"act-X"}); desc.SetOutput("Out", {"act-Out"}); + if (op_type == "leaky_relu") { + desc.SetAttr("alpha", 0.1f); + } LOG(INFO) << "set OP"; validator.SetOp(*desc.Proto()); @@ -41,10 +44,14 @@ static void test_activation_op(const std::string &op_type) { validator.Execute(5); } -TEST(sigm_op, test) { test_activation_op("relu"); } +TEST(activation, relu) { test_relu_op("relu"); } +TEST(activation, leaky_relu) { test_relu_op("leaky_relu"); } + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(relu); USE_ANAKIN_CONVERTER(relu); +USE_OP(leaky_relu); +USE_ANAKIN_CONVERTER(leaky_relu); diff --git a/paddle/fluid/inference/anakin/convert/ut_helper.h b/paddle/fluid/inference/anakin/convert/ut_helper.h index 029aff6704ff10..a931efbcf4adf6 100644 --- a/paddle/fluid/inference/anakin/convert/ut_helper.h +++ b/paddle/fluid/inference/anakin/convert/ut_helper.h @@ -67,7 +67,7 @@ void RandomizeTensor(framework::LoDTensor* tensor, const platform::Place& place, auto* temp_data = temp_tensor.mutable_data(cpu_place); for (size_t i = 0; i < num_elements; i++) { - *(temp_data + i) = random(0., 1.); + *(temp_data + i) = random(-128., 128.); } TensorCopySync(temp_tensor, place, tensor); @@ -151,7 +151,7 @@ class AnakinConvertValidation { } engine_->SetMaxInputShape(temp_max_input_shape); engine_->Optimize(); - engine_->InitGraph(); + engine_->InitNet(); } // We use the set 'neglected_output' here, because some Ops like batch norm, diff --git a/paddle/fluid/inference/anakin/engine.cc b/paddle/fluid/inference/anakin/engine.cc index ba044c9401a5f0..2b85d266cf0d51 100644 --- a/paddle/fluid/inference/anakin/engine.cc +++ b/paddle/fluid/inference/anakin/engine.cc @@ -35,12 +35,14 @@ namespace anakin { template AnakinEngine::AnakinEngine( bool need_summary, int device, int max_batch_size, - std::map> max_input_shape) + std::map> max_input_shape, + std::vector program_inputs) : graph_(new AnakinGraphT()), net_(new AnakinNetT(need_summary)) { device_ = device; max_batch_size_ = max_batch_size; max_input_shape_ = max_input_shape; + program_inputs_ = program_inputs; } template @@ -54,7 +56,7 @@ void AnakinEngine::SetInputShape( } template -void AnakinEngine::InitGraph() { +void AnakinEngine::InitNet() { net_->init(*graph_); } @@ -85,11 +87,19 @@ void AnakinEngine::Execute( int max_shape_sum = std::accumulate(max_input_shape.begin(), max_input_shape.end(), 1, std::multiplies()); - - PADDLE_ENFORCE(max_shape_sum >= tensor->numel(), - "The anakin input max shape should be greater than" - " or equal to the real input shape, Please set the max " - "input shape using EnableAnakinEngine"); + if (tensor->numel() > max_shape_sum) { + PADDLE_ENFORCE(std::find(program_inputs_.begin(), program_inputs_.end(), + input.first) == program_inputs_.end(), + "The anakin input max shape should be greater than" + " or equal to the real input shape, Please set the max " + "input shape using EnableAnakinEngine"); + VLOG(3) << "Anakin Net will be reset because of the inputs out of range: " + << input.first; + graph_->Reshape(input.first, fluid_input_shape); + net_.reset(new AnakinNetT(true)); + net_->init(*graph_); + anakin_input = net_->get_in(input.first); + } anakin_input->reshape(fluid_input_shape); ::anakin::saber::Tensor tmp_anakin_tensor(data, TargetT(), 0, fluid_input_shape); @@ -114,7 +124,7 @@ void AnakinEngine::Execute( template void AnakinEngine::Freeze() { - PADDLE_ENFORCE(graph_->Freeze_v3(), "Freeze anakin subgraph."); + PADDLE_ENFORCE(graph_->Freeze(), "Freeze anakin subgraph."); } template diff --git a/paddle/fluid/inference/anakin/engine.h b/paddle/fluid/inference/anakin/engine.h index 4845ffdf5b9dcf..1325306557f2e7 100644 --- a/paddle/fluid/inference/anakin/engine.h +++ b/paddle/fluid/inference/anakin/engine.h @@ -58,9 +58,10 @@ class AnakinEngine { public: explicit AnakinEngine( bool need_summary = false, int device = 0, int max_batch_size = 1, - std::map> max_input_shape = {}); + std::map> max_input_shape = {}, + std::vector program_inputs = {}); ~AnakinEngine(); - void InitGraph(); + void InitNet(); void SetInputShape(const std::string &name, std::vector shape); void AddOp(const std::string &name, const std::string &type, const std::vector &inputs, @@ -81,15 +82,16 @@ class AnakinEngine { void SetMaxInputShape(std::map> shape) { max_input_shape_ = shape; } + const std::vector &GetScalableInputs() { + return program_inputs_; + } + void SetScalableInputs(std::vector program_inputs) { + program_inputs_ = program_inputs; + } int GetMaxBatchSize() { return max_batch_size_; } void Freeze(); void Optimize(); - void AllocTmpMem() { - PADDLE_ENFORCE(net_->alloc_memory_first(*graph_), - "anakin alloc temp memory first failed"); - } void Save(std::string path) { graph_->save(path); } - bool IsInit() { return initialized_; } int GetDevice() { return device_; } void Execute(const std::map &inputs, @@ -103,6 +105,7 @@ class AnakinEngine { int device_; std::unique_ptr graph_; std::unique_ptr net_; + std::vector program_inputs_; }; class AnakinEngineManager { @@ -120,10 +123,10 @@ class AnakinEngineManager { AnakinNvEngineT *Create( bool need_summary, int device, int max_batch_size, std::map> max_input_shape, - std::string engine_name) { + std::vector program_inputs, std::string engine_name) { std::unique_lock lk(mut_); auto *p = new AnakinEngine( - need_summary, device, max_batch_size, max_input_shape); + need_summary, device, max_batch_size, max_input_shape, program_inputs); engines_[engine_name].reset(p); return p; } diff --git a/paddle/fluid/inference/anakin/op_teller.cc b/paddle/fluid/inference/anakin/op_teller.cc index 2042fb18ea41f8..72064c1790da13 100644 --- a/paddle/fluid/inference/anakin/op_teller.cc +++ b/paddle/fluid/inference/anakin/op_teller.cc @@ -44,6 +44,8 @@ struct SimpleOpTypeSetTeller : public Teller { teller_set.insert("sum"); teller_set.insert("depthwise_conv2d"); teller_set.insert("prior_box"); + teller_set.insert("leaky_relu"); + teller_set.insert("affine_channel"); } bool operator()(const std::string& op_type, diff --git a/paddle/fluid/inference/anakin/test_anakin_engine.cc b/paddle/fluid/inference/anakin/test_anakin_engine.cc index 8fd6b8bec9ada6..613481a55514f8 100644 --- a/paddle/fluid/inference/anakin/test_anakin_engine.cc +++ b/paddle/fluid/inference/anakin/test_anakin_engine.cc @@ -68,7 +68,7 @@ TEST_F(TestAnakinEngine, Execute) { // engine_->AddOpAttr("x", "input_shape", input_shape); engine_->SetInputShape("x", {1, 1, 1, 1}); engine_->Optimize(); - engine_->InitGraph(); + engine_->InitNet(); framework::LoDTensor x; framework::LoDTensor y; x.Resize({1, 1, 1, 1}); diff --git a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc index b8d8b6fed8ca23..cbf883a8a5ff1f 100644 --- a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc @@ -192,11 +192,12 @@ void AnakinSubgraphPass::CreateAnakinOp( auto max_input_shape = Get>>("max_input_shape"); auto max_batch_size = Get("max_batch_size"); + auto program_inputs = program_desc->GetFeedTargetNames(); auto *anakin_engine = inference::Singleton::Global().Create( true, Get("gpu_device_id"), max_batch_size, max_input_shape, - engine_key); + program_inputs, engine_key); auto *scope = param_scope(); std::unordered_set param_set(params.begin(), params.end()); diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc index 7c4aab06a1d2b3..8f7c6ac7553676 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc @@ -100,7 +100,6 @@ void RenameAndGetOutputs( const std::string arg_value = in_var->arguments(k); const std::string arg_value_with_id = arg_value + std::to_string(var2id[arg_value]); - if (input_names_with_id.count(arg_value_with_id)) { replaced_names.push_back(arg_value); if (graph_var_map.count(arg_value)) { @@ -149,7 +148,6 @@ void RenameAndGetOutputs( const std::string arg_value = out_var->arguments(k); const std::string arg_value_with_id = arg_value + std::to_string(var2id[arg_value]); - if (graph_var_map.count(arg_value)) { add_block_var(arg_value, arg_value_with_id); } diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 882bb3468388e7..9c80b7a839a6bf 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -70,3 +70,4 @@ if (WITH_ANAKIN AND WITH_MKL) # only needed in CI anakin_target(inference_anakin_api) anakin_target(inference_anakin_api_shared) endif() +inference_analysis_test(faster_rcnn_test SRCS faster_rcnn_test.cc EXTRA_DEPS paddle_fluid) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 6942604b0723f8..e5991af4f7bfe5 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -888,4 +888,6 @@ USE_ANAKIN_CONVERTER(density_prior_box); USE_ANAKIN_CONVERTER(dropout); USE_ANAKIN_CONVERTER(sum); USE_ANAKIN_CONVERTER(prior_box); +USE_ANAKIN_CONVERTER(leaky_relu); +USE_ANAKIN_CONVERTER(affine_channel); #endif diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 236afc77f708c3..ace385ec60fec0 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -229,6 +229,9 @@ void BindAnalysisConfig(py::module *m) { py::arg("min_subgraph_size") = 3, py::arg("precision_mode") = AnalysisConfig::Precision::kFloat32, py::arg("use_static") = true) + .def("enable_anakin_engine", &AnalysisConfig::EnableAnakinEngine, + py::arg("max_batch_size") = 1, py::arg("max_input_shape") = {}, + py::arg("min_subgraph_size") = 6) .def("tensorrt_engine_enabled", &AnalysisConfig::tensorrt_engine_enabled) .def("switch_ir_debug", &AnalysisConfig::SwitchIrDebug, py::arg("x") = true) From 7ad182e16cbd099523dd274d3b4051b3734c9adf Mon Sep 17 00:00:00 2001 From: nhzlx Date: Thu, 11 Apr 2019 12:42:39 +0000 Subject: [PATCH 25/27] Cherry-Pick from 16662 : Anakin subgraph cpu support --- cmake/anakin_subgraph.cmake | 3 +- .../inference/anakin/convert/activation.cc | 32 ++++--- .../inference/anakin/convert/activation.h | 13 ++- .../anakin/convert/affine_channel.cc | 32 ++++--- .../inference/anakin/convert/affine_channel.h | 3 +- .../inference/anakin/convert/batch_norm.cc | 55 ++++++----- .../inference/anakin/convert/batch_norm.h | 3 +- .../fluid/inference/anakin/convert/concat.cc | 30 +++--- .../fluid/inference/anakin/convert/concat.h | 3 +- .../fluid/inference/anakin/convert/conv2d.cc | 47 +++++---- .../fluid/inference/anakin/convert/conv2d.h | 3 +- .../inference/anakin/convert/conv2d_fusion.cc | 51 ++++++---- .../inference/anakin/convert/conv2d_fusion.h | 3 +- .../anakin/convert/density_prior_box.cc | 60 +++++++----- .../anakin/convert/density_prior_box.h | 3 +- .../inference/anakin/convert/detection_out.cc | 45 ++++----- .../inference/anakin/convert/detection_out.h | 3 +- .../fluid/inference/anakin/convert/dropout.cc | 32 +++---- .../fluid/inference/anakin/convert/dropout.h | 3 +- .../inference/anakin/convert/elementwise.cc | 50 +++++----- .../inference/anakin/convert/elementwise.h | 6 +- paddle/fluid/inference/anakin/convert/fc.cc | 38 ++++---- paddle/fluid/inference/anakin/convert/fc.h | 9 +- .../fluid/inference/anakin/convert/flatten.cc | 23 ++--- .../fluid/inference/anakin/convert/flatten.h | 3 +- .../inference/anakin/convert/im2sequence.cc | 30 +++--- .../inference/anakin/convert/im2sequence.h | 3 +- .../inference/anakin/convert/op_converter.h | 74 ++++++++------ .../fluid/inference/anakin/convert/pool2d.cc | 36 ++++--- .../fluid/inference/anakin/convert/pool2d.h | 3 +- paddle/fluid/inference/anakin/convert/relu.cc | 39 ++++---- paddle/fluid/inference/anakin/convert/relu.h | 6 +- .../fluid/inference/anakin/convert/reshape.cc | 24 ++--- .../fluid/inference/anakin/convert/reshape.h | 3 +- .../inference/anakin/convert/roi_align.cc | 27 +++--- .../inference/anakin/convert/roi_align.h | 3 +- .../fluid/inference/anakin/convert/scale.cc | 23 ++--- paddle/fluid/inference/anakin/convert/scale.h | 3 +- .../fluid/inference/anakin/convert/softmax.cc | 25 ++--- .../fluid/inference/anakin/convert/softmax.h | 3 +- .../fluid/inference/anakin/convert/split.cc | 30 +++--- paddle/fluid/inference/anakin/convert/split.h | 3 +- paddle/fluid/inference/anakin/convert/sum.cc | 27 +++--- paddle/fluid/inference/anakin/convert/sum.h | 3 +- .../anakin/convert/test_activation_op.cc | 43 +++++++-- .../anakin/convert/test_affine_channel_op.cc | 28 +++++- .../anakin/convert/test_batch_norm_op.cc | 24 ++++- .../anakin/convert/test_concat_op.cc | 41 ++++---- .../anakin/convert/test_conv2d_op.cc | 27 +++++- .../anakin/convert/test_dropout_op.cc | 23 ++++- .../anakin/convert/test_elementwise_op.cc | 41 +++++++- .../inference/anakin/convert/test_fc_op.cc | 27 +++++- .../anakin/convert/test_flatten_op.cc | 26 ++++- .../anakin/convert/test_pool2d_op.cc | 96 ++++++++++--------- .../inference/anakin/convert/test_relu_op.cc | 46 +++++++-- .../anakin/convert/test_reshape_op.cc | 44 +++++++-- .../anakin/convert/test_softmax_op.cc | 26 ++++- .../inference/anakin/convert/test_split_op.cc | 74 ++++++++------ .../inference/anakin/convert/test_sum_op.cc | 23 ++++- .../anakin/convert/test_transpose_op.cc | 44 +++++++-- .../inference/anakin/convert/transpose.cc | 24 ++--- .../inference/anakin/convert/transpose.h | 3 +- .../inference/anakin/convert/ut_helper.h | 51 +++++----- paddle/fluid/inference/anakin/engine.cc | 46 ++++++++- paddle/fluid/inference/anakin/engine.h | 25 +++-- paddle/fluid/inference/analysis/argument.h | 28 +++--- .../inference/analysis/ir_pass_manager.cc | 1 + .../ir_passes/anakin_subgraph_pass.cc | 49 ++++++++-- paddle/fluid/inference/api/CMakeLists.txt | 1 - paddle/fluid/inference/api/analysis_config.cc | 8 +- .../fluid/inference/api/analysis_predictor.cc | 2 +- .../fluid/operators/anakin/anakin_engine_op.h | 40 +++----- 72 files changed, 1149 insertions(+), 680 deletions(-) diff --git a/cmake/anakin_subgraph.cmake b/cmake/anakin_subgraph.cmake index 4a7d32a63553df..b5437e776d31e4 100644 --- a/cmake/anakin_subgraph.cmake +++ b/cmake/anakin_subgraph.cmake @@ -25,8 +25,9 @@ endif() if(ANAKIN_FOUND) message(STATUS "Current ANAKIN header is ${ANAKIN_INCLUDE_DIR}/anakin_config.h. ") + include_directories(${ANAKIN_ROOT}) include_directories(${ANAKIN_ROOT}/include) - include_directories(${ANAKIN_ROOT}/include/saber) + include_directories(${ANAKIN_ROOT}/saber) link_directories(${ANAKIN_ROOT}) add_definitions(-DPADDLE_WITH_ANAKIN) endif() diff --git a/paddle/fluid/inference/anakin/convert/activation.cc b/paddle/fluid/inference/anakin/convert/activation.cc index a9aeb19ffd5f04..11f92c95217b37 100644 --- a/paddle/fluid/inference/anakin/convert/activation.cc +++ b/paddle/fluid/inference/anakin/convert/activation.cc @@ -16,16 +16,13 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -ActivationOpConverter::ActivationOpConverter(const std::string &op_type) +template +ActivationOpConverter::ActivationOpConverter( + const std::string &op_type) : op_type_(op_type) { auto it = anakin_op_types_.find(op_type_); PADDLE_ENFORCE(it != anakin_op_types_.end(), @@ -33,10 +30,10 @@ ActivationOpConverter::ActivationOpConverter(const std::string &op_type) anakin_op_type_ = it->second; } -void ActivationOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ActivationOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -44,13 +41,20 @@ void ActivationOpConverter::operator()(const framework::proto::OpDesc &op, auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); auto input_name = op_desc.Input("X").front(); auto output_name = op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Activation", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "type", anakin_op_type_); + this->engine_->AddOp(op_name, "Activation", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "type", anakin_op_type_); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(sigmoid, SigmoidOpConverter); -REGISTER_ANAKIN_OP_CONVERTER(tanh, TanhOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(sigmoid, + SigmoidOpConverter<::anakin::saber::NV>); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(tanh, TanhOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER(sigmoid, + SigmoidOpConverter<::anakin::saber::X86>); +REGISTER_CPU_ANAKIN_OP_CONVERTER(tanh, TanhOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/activation.h b/paddle/fluid/inference/anakin/convert/activation.h index 592a3d5bd9d127..b3fe4748641cf0 100644 --- a/paddle/fluid/inference/anakin/convert/activation.h +++ b/paddle/fluid/inference/anakin/convert/activation.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class ActivationOpConverter : public AnakinOpConverter { +template +class ActivationOpConverter : public AnakinOpConverter { public: explicit ActivationOpConverter(const std::string &op_type); @@ -39,14 +40,16 @@ class ActivationOpConverter : public AnakinOpConverter { {"sigmoid", "Sigmoid"}}; }; -class TanhOpConverter : public ActivationOpConverter { +template +class TanhOpConverter : public ActivationOpConverter { public: - TanhOpConverter() : ActivationOpConverter("tanh") {} + TanhOpConverter() : ActivationOpConverter("tanh") {} }; -class SigmoidOpConverter : public ActivationOpConverter { +template +class SigmoidOpConverter : public ActivationOpConverter { public: - SigmoidOpConverter() : ActivationOpConverter("sigmoid") {} + SigmoidOpConverter() : ActivationOpConverter("sigmoid") {} }; } // namespace anakin } // namespace inference diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.cc b/paddle/fluid/inference/anakin/convert/affine_channel.cc index 7c886df082d121..6bf913e7ffbc02 100644 --- a/paddle/fluid/inference/anakin/convert/affine_channel.cc +++ b/paddle/fluid/inference/anakin/convert/affine_channel.cc @@ -18,19 +18,16 @@ #include using anakin::graph::GraphGlobalMem; +using anakin::PTuple; using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void AffineChannelOpConverter::operator()( +template +void AffineChannelOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -59,7 +56,7 @@ void AffineChannelOpConverter::operator()( bias_tensor->Resize(bias_t->dims()); TensorCopySync((*bias_t), platform::CPUPlace(), bias_tensor.get()); - engine_->AddOp(op_name, "AffineChannel", {input_name}, {output_name}); + this->engine_->AddOp(op_name, "AffineChannel", {input_name}, {output_name}); // Generate the Scale parameter of Anakin. auto scale_shape = framework::vectorize2int(scale_t->dims()); @@ -67,15 +64,16 @@ void AffineChannelOpConverter::operator()( scale_shape.insert(scale_shape.begin(), 1); } Shape anakin_scale_shape(scale_shape); - auto *weight1 = GraphGlobalMem::Global().template new_block( - anakin_scale_shape); + auto *weight1 = + GraphGlobalMem::Global().template new_block( + anakin_scale_shape); float *scale_cpu_data = static_cast(weight1->h_tensor().mutable_data()); std::copy_n(scale_tensor->data(), scale_tensor->numel(), scale_cpu_data); weight1->d_tensor().set_shape(anakin_scale_shape); weight1->d_tensor().copy_from(weight1->h_tensor()); - engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); // Generate the Bias parameter of Anakin. auto bias_shape = framework::vectorize2int(bias_t->dims()); @@ -83,18 +81,24 @@ void AffineChannelOpConverter::operator()( bias_shape.insert(bias_shape.begin(), 1); } Shape anakin_bias_shape(bias_shape); - auto *weight2 = GraphGlobalMem::Global().template new_block( - anakin_bias_shape); + auto *weight2 = + GraphGlobalMem::Global().template new_block( + anakin_bias_shape); float *bias_cpu_data = static_cast(weight2->h_tensor().mutable_data()); std::copy_n(bias_tensor->data(), bias_tensor->numel(), bias_cpu_data); weight2->d_tensor().set_shape(anakin_bias_shape); weight2->d_tensor().copy_from(weight2->h_tensor()); - engine_->AddOpAttr(op_name, "weight_2", *weight2); + this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(affine_channel, AffineChannelOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER( + affine_channel, AffineChannelOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER( + affine_channel, AffineChannelOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.h b/paddle/fluid/inference/anakin/convert/affine_channel.h index ea0043670c61b2..5da4a736e8d7e0 100644 --- a/paddle/fluid/inference/anakin/convert/affine_channel.h +++ b/paddle/fluid/inference/anakin/convert/affine_channel.h @@ -21,7 +21,8 @@ namespace paddle { namespace inference { namespace anakin { -class AffineChannelOpConverter : public AnakinOpConverter { +template +class AffineChannelOpConverter : public AnakinOpConverter { public: AffineChannelOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.cc b/paddle/fluid/inference/anakin/convert/batch_norm.cc index 38cf6172027b3b..1c837e9c3dfd4e 100644 --- a/paddle/fluid/inference/anakin/convert/batch_norm.cc +++ b/paddle/fluid/inference/anakin/convert/batch_norm.cc @@ -21,17 +21,16 @@ using anakin::graph::GraphGlobalMem; using anakin::AK_FLOAT; -using anakin::saber::NV; using anakin::saber::Shape; namespace paddle { namespace inference { namespace anakin { -void BatchNormOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void BatchNormOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Output("Y").size(), 1); std::map inputs; @@ -48,9 +47,9 @@ void BatchNormOpConverter::operator()(const framework::proto::OpDesc &op, auto bn_op_name = op_name + ":bn"; auto bn_output = bn_op_name + "_output"; - engine_->AddOp(bn_op_name, "BatchNorm", {inputs["X"]}, {bn_output}); - engine_->AddOpAttr(bn_op_name, "epsilon", epsilon); - engine_->AddOpAttr(bn_op_name, "momentum", static_cast(1.0)); + this->engine_->AddOp(bn_op_name, "BatchNorm", {inputs["X"]}, {bn_output}); + this->engine_->AddOpAttr(bn_op_name, "epsilon", epsilon); + this->engine_->AddOpAttr(bn_op_name, "momentum", static_cast(1.0)); auto scale_op_name = op_name + ":scale"; auto get_lod_tensor = [this, &scope, &op_name](const std::string &var_name, @@ -81,48 +80,54 @@ void BatchNormOpConverter::operator()(const framework::proto::OpDesc &op, Shape shape1(fill_shape(4, framework::vectorize2int(mean_t.dims()))); Shape shape2(fill_shape(4, framework::vectorize2int(variance_t.dims()))); auto *weight1 = - GraphGlobalMem::Global().template new_block(shape1); + GraphGlobalMem::Global().template new_block(shape1); auto *mean_data = static_cast(weight1->h_tensor().mutable_data()); std::copy_n(mean_t.data(), mean_t.numel(), mean_data); - engine_->AddOpAttr(bn_op_name, "weight_1", *weight1); + this->engine_->AddOpAttr(bn_op_name, "weight_1", *weight1); auto *weight2 = - GraphGlobalMem::Global().template new_block(shape2); + GraphGlobalMem::Global().template new_block(shape2); auto *variance_data = static_cast(weight2->h_tensor().mutable_data()); std::copy_n(variance_t.data(), variance_t.numel(), variance_data); - engine_->AddOpAttr(bn_op_name, "weight_2", *weight2); + this->engine_->AddOpAttr(bn_op_name, "weight_2", *weight2); Shape shape3(std::vector({1, 1, 1, 1})); auto *weight3 = - GraphGlobalMem::Global().template new_block(shape3); + GraphGlobalMem::Global().template new_block(shape3); auto *alpha_data = static_cast(weight3->h_tensor().mutable_data()); float weight3_data[] = {1}; std::copy(std::begin(weight3_data), std::end(weight3_data), alpha_data); - engine_->AddOpAttr(bn_op_name, "weight_3", *weight3); + this->engine_->AddOpAttr(bn_op_name, "weight_3", *weight3); Shape scale_shape(fill_shape(4, framework::vectorize2int(scale_t.dims()))); - auto *scale = - GraphGlobalMem::Global().template new_block(scale_shape); + auto *scale = GraphGlobalMem::Global().template new_block( + scale_shape); auto *scale_data = static_cast(scale->h_tensor().mutable_data()); std::copy_n(scale_t.data(), scale_t.numel(), scale_data); Shape bias_shape(fill_shape(4, framework::vectorize2int(bias_t.dims()))); - auto *bias = - GraphGlobalMem::Global().template new_block(bias_shape); + auto *bias = GraphGlobalMem::Global().template new_block( + bias_shape); auto *bias_data = static_cast(bias->h_tensor().mutable_data()); std::copy_n(bias_t.data(), bias_t.numel(), bias_data); - engine_->AddOp(scale_op_name, "Scale", {bn_output}, {output}); - engine_->AddOpAttr(scale_op_name, "axis", 1); - engine_->AddOpAttr(scale_op_name, "num_axes", 1); - engine_->AddOpAttr(scale_op_name, "bias_term", true); - engine_->AddOpAttr(scale_op_name, "weight_1", *scale); - engine_->AddOpAttr(scale_op_name, "weight_2", *bias); + this->engine_->AddOp(scale_op_name, "Scale", {bn_output}, {output}); + this->engine_->AddOpAttr(scale_op_name, "axis", 1); + this->engine_->AddOpAttr(scale_op_name, "num_axes", 1); + this->engine_->AddOpAttr(scale_op_name, "bias_term", true); + this->engine_->AddOpAttr(scale_op_name, "weight_1", *scale); + this->engine_->AddOpAttr(scale_op_name, "weight_2", *bias); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(batch_norm, BatchNormOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(batch_norm, + BatchNormOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER(batch_norm, + BatchNormOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.h b/paddle/fluid/inference/anakin/convert/batch_norm.h index c56735f15b435b..dc94b6ff64d13b 100644 --- a/paddle/fluid/inference/anakin/convert/batch_norm.h +++ b/paddle/fluid/inference/anakin/convert/batch_norm.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class BatchNormOpConverter : public AnakinOpConverter { +template +class BatchNormOpConverter : public AnakinOpConverter { public: BatchNormOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/concat.cc b/paddle/fluid/inference/anakin/convert/concat.cc index ae90c083690da6..cfd9540acf60ab 100644 --- a/paddle/fluid/inference/anakin/convert/concat.cc +++ b/paddle/fluid/inference/anakin/convert/concat.cc @@ -15,38 +15,32 @@ #include "paddle/fluid/inference/anakin/convert/concat.h" #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; - namespace paddle { namespace inference { namespace anakin { -void ConcatOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ConcatOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); int axis = boost::get(op_desc.GetAttr("axis")); auto input_names = op_desc.Input("X"); - // PADDLE_ENFORCE(axis > 0, - // "The axis attr of Concat op should be large than 0 for trt"); auto y_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Concat", input_names, {y_name}); - engine_->AddOpAttr(op_name, "axis", axis); + this->engine_->AddOp(op_name, "Concat", input_names, {y_name}); + this->engine_->AddOpAttr(op_name, "axis", axis); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(concat, ConcatOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(concat, + ConcatOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER(concat, + ConcatOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/concat.h b/paddle/fluid/inference/anakin/convert/concat.h index 974ff689bfef68..a32f8a4612921f 100644 --- a/paddle/fluid/inference/anakin/convert/concat.h +++ b/paddle/fluid/inference/anakin/convert/concat.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class ConcatOpConverter : public AnakinOpConverter { +template +class ConcatOpConverter : public AnakinOpConverter { public: ConcatOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/conv2d.cc b/paddle/fluid/inference/anakin/convert/conv2d.cc index 308f14604b9c83..f9ab9874751300 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d.cc @@ -18,19 +18,18 @@ #include using anakin::graph::GraphGlobalMem; +using anakin::PTuple; using anakin::AK_FLOAT; -using anakin::saber::NV; using anakin::saber::Shape; -using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void Conv2dOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void Conv2dOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1UL); PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1UL); @@ -39,7 +38,7 @@ void Conv2dOpConverter::operator()(const framework::proto::OpDesc &op, auto input_name = op_desc.Input("Input").front(); auto output_name = op_desc.Output("Output").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Output").front(); - engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); + this->engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); auto *filter_v = scope.FindVar(op_desc.Input("Filter").front()); PADDLE_ENFORCE_NOT_NULL(filter_v); @@ -51,38 +50,44 @@ void Conv2dOpConverter::operator()(const framework::proto::OpDesc &op, PADDLE_ENFORCE_EQ(weight_tensor->dims().size(), 4UL); - // const int n_output = weight_tensor->dims()[0]; - // const int n_input = weight_tensor->dims()[1]; const int filter_h = weight_tensor->dims()[2]; const int filter_w = weight_tensor->dims()[3]; - // auto filter_num = n_input * filter_h * filter_w ; + auto filter_num = weight_tensor->dims()[0]; - engine_->AddOpAttr(op_name, "filter_num", filter_num); - engine_->AddOpAttr>(op_name, "kernel_size", {filter_h, filter_w}); + this->engine_->template AddOpAttr(op_name, "filter_num", filter_num); + this->engine_->template AddOpAttr>(op_name, "kernel_size", + {filter_h, filter_w}); auto strides = boost::get>(op_desc.GetAttr("strides")); - engine_->AddOpAttr>(op_name, "strides", strides); + this->engine_->template AddOpAttr>(op_name, "strides", strides); auto paddings = boost::get>(op_desc.GetAttr("paddings")); - engine_->AddOpAttr>(op_name, "padding", paddings); + this->engine_->template AddOpAttr>(op_name, "padding", paddings); auto dilations = boost::get>(op_desc.GetAttr("dilations")); - engine_->AddOpAttr>(op_name, "dilation_rate", dilations); + this->engine_->template AddOpAttr>(op_name, "dilation_rate", + dilations); const int groups = boost::get(op_desc.GetAttr("groups")); - engine_->AddOpAttr(op_name, "group", groups); - engine_->AddOpAttr(op_name, "axis", 1); - engine_->AddOpAttr(op_name, "bias_term", false); + this->engine_->AddOpAttr(op_name, "group", groups); + this->engine_->AddOpAttr(op_name, "axis", 1); + this->engine_->AddOpAttr(op_name, "bias_term", false); auto weight_shape = framework::vectorize2int(filter_t->dims()); Shape anakin_shape(weight_shape); auto *weight1 = - GraphGlobalMem::Global().template new_block(anakin_shape); + GraphGlobalMem::Global().template new_block( + anakin_shape); float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); std::copy_n(weight_tensor->data(), weight_tensor->numel(), cpu_data); weight1->d_tensor().set_shape(anakin_shape); weight1->d_tensor().copy_from(weight1->h_tensor()); - engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(conv2d, Conv2dOpConverter); +REGISTER_CPU_ANAKIN_OP_CONVERTER(conv2d, + Conv2dOpConverter<::anakin::saber::X86>); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(conv2d, + Conv2dOpConverter<::anakin::saber::NV>); +#endif diff --git a/paddle/fluid/inference/anakin/convert/conv2d.h b/paddle/fluid/inference/anakin/convert/conv2d.h index dca5d19f468ac6..6ecb32840519e0 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d.h +++ b/paddle/fluid/inference/anakin/convert/conv2d.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class Conv2dOpConverter : public AnakinOpConverter { +template +class Conv2dOpConverter : public AnakinOpConverter { public: Conv2dOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc index fa1ab0efeeb5ca..ff60771f87b33e 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc @@ -18,19 +18,18 @@ #include using anakin::graph::GraphGlobalMem; +using anakin::PTuple; using anakin::AK_FLOAT; -using anakin::saber::NV; using anakin::saber::Shape; -using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void Conv2dFusionOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void Conv2dFusionOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1UL); PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1UL); @@ -40,7 +39,7 @@ void Conv2dFusionOpConverter::operator()(const framework::proto::OpDesc &op, auto input_name = op_desc.Input("Input").front(); auto output_name = op_desc.Output("Output").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Output").front(); - engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); + this->engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); auto *filter_v = scope.FindVar(op_desc.Input("Filter").front()); PADDLE_ENFORCE_NOT_NULL(filter_v); @@ -63,28 +62,31 @@ void Conv2dFusionOpConverter::operator()(const framework::proto::OpDesc &op, const int filter_w = weight_tensor->dims()[3]; // auto filter_num = n_input * filter_h * filter_w ; auto filter_num = weight_tensor->dims()[0]; - engine_->AddOpAttr(op_name, "filter_num", filter_num); - engine_->AddOpAttr>(op_name, "kernel_size", {filter_h, filter_w}); + this->engine_->template AddOpAttr(op_name, "filter_num", filter_num); + this->engine_->template AddOpAttr>(op_name, "kernel_size", + {filter_h, filter_w}); auto strides = boost::get>(op_desc.GetAttr("strides")); - engine_->AddOpAttr>(op_name, "strides", strides); + this->engine_->template AddOpAttr>(op_name, "strides", strides); auto paddings = boost::get>(op_desc.GetAttr("paddings")); - engine_->AddOpAttr>(op_name, "padding", paddings); + this->engine_->template AddOpAttr>(op_name, "padding", paddings); auto dilations = boost::get>(op_desc.GetAttr("dilations")); - engine_->AddOpAttr>(op_name, "dilation_rate", dilations); + this->engine_->template AddOpAttr>(op_name, "dilation_rate", + dilations); const int groups = boost::get(op_desc.GetAttr("groups")); - engine_->AddOpAttr(op_name, "group", groups); - engine_->AddOpAttr(op_name, "axis", 1); - engine_->AddOpAttr(op_name, "bias_term", true); + this->engine_->AddOpAttr(op_name, "group", groups); + this->engine_->AddOpAttr(op_name, "axis", 1); + this->engine_->AddOpAttr(op_name, "bias_term", true); auto weight_shape = framework::vectorize2int(filter_t->dims()); Shape anakin_shape(weight_shape); auto *weight1 = - GraphGlobalMem::Global().template new_block(anakin_shape); + GraphGlobalMem::Global().template new_block( + anakin_shape); float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); std::copy_n(weight_tensor->data(), weight_tensor->numel(), cpu_data); weight1->d_tensor().set_shape(anakin_shape); weight1->d_tensor().copy_from(weight1->h_tensor()); - engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); auto bias_shape = framework::vectorize2int(b_t->dims()); framework::LoDTensor bias_tensor; @@ -98,17 +100,24 @@ void Conv2dFusionOpConverter::operator()(const framework::proto::OpDesc &op, // bias_shape.push_back(1); Shape anakin_bias_shape(bias_shape); - auto *weight2 = GraphGlobalMem::Global().template new_block( - anakin_bias_shape); + auto *weight2 = + GraphGlobalMem::Global().template new_block( + anakin_bias_shape); float *cpu_data2 = static_cast(weight2->h_tensor().mutable_data()); std::copy_n(bias_data, bias_tensor.numel(), cpu_data2); weight2->d_tensor().set_shape(anakin_bias_shape); weight2->d_tensor().copy_from(weight2->h_tensor()); - engine_->AddOpAttr(op_name, "weight_2", *weight2); + this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(conv2d_fusion, Conv2dFusionOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(conv2d_fusion, + Conv2dFusionOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER(conv2d_fusion, + Conv2dFusionOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.h b/paddle/fluid/inference/anakin/convert/conv2d_fusion.h index 0d9ef28183b309..abcf61a75e0fda 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d_fusion.h +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class Conv2dFusionOpConverter : public AnakinOpConverter { +template +class Conv2dFusionOpConverter : public AnakinOpConverter { public: Conv2dFusionOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/density_prior_box.cc b/paddle/fluid/inference/anakin/convert/density_prior_box.cc index 30796f75924271..f552e41c85fb11 100644 --- a/paddle/fluid/inference/anakin/convert/density_prior_box.cc +++ b/paddle/fluid/inference/anakin/convert/density_prior_box.cc @@ -17,17 +17,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void DensityPriorBoxOpConverter::operator()( +template +void DensityPriorBoxOpConverter::operator()( const framework::proto::OpDesc& op, const framework::BlockDesc& block_desc, const framework::Scope& scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -81,27 +78,44 @@ void DensityPriorBoxOpConverter::operator()( std::vector temp_v = {}; - engine_->AddOp(op_name, "PriorBox", {input_name, image_name}, {output_name}); - engine_->AddOpAttr>(op_name, "min_size", min_sizes); - engine_->AddOpAttr>(op_name, "max_size", max_sizes); - engine_->AddOpAttr>(op_name, "aspect_ratio", aspect_ratios); - engine_->AddOpAttr>(op_name, "fixed_size", fixed_sizes); - engine_->AddOpAttr>(op_name, "fixed_ratio", fixed_ratios); - engine_->AddOpAttr>(op_name, "density", dens); - engine_->AddOpAttr(op_name, "is_flip", is_flip); - engine_->AddOpAttr(op_name, "is_clip", is_clip); - engine_->AddOpAttr>(op_name, "variance", variances); - engine_->AddOpAttr(op_name, "img_h", static_cast(0)); - engine_->AddOpAttr(op_name, "img_w", static_cast(0)); - engine_->AddOpAttr(op_name, "step_h", step_h); - engine_->AddOpAttr(op_name, "step_w", step_w); - engine_->AddOpAttr(op_name, "offset", offset); - engine_->AddOpAttr>(op_name, "order", t_order); + this->engine_->AddOp(op_name, "PriorBox", {input_name, image_name}, + {output_name}); + this->engine_->template AddOpAttr>(op_name, "min_size", + min_sizes); + this->engine_->template AddOpAttr>(op_name, "max_size", + max_sizes); + this->engine_->template AddOpAttr>(op_name, "aspect_ratio", + aspect_ratios); + this->engine_->template AddOpAttr>(op_name, "fixed_size", + fixed_sizes); + this->engine_->template AddOpAttr>(op_name, "fixed_ratio", + fixed_ratios); + this->engine_->template AddOpAttr>(op_name, "density", dens); + this->engine_->AddOpAttr(op_name, "is_flip", is_flip); + this->engine_->AddOpAttr(op_name, "is_clip", is_clip); + this->engine_->template AddOpAttr>(op_name, "variance", + variances); + this->engine_->AddOpAttr(op_name, "img_h", static_cast(0)); + this->engine_->AddOpAttr(op_name, "img_w", static_cast(0)); + this->engine_->AddOpAttr(op_name, "step_h", step_h); + this->engine_->AddOpAttr(op_name, "step_w", step_w); + this->engine_->AddOpAttr(op_name, "offset", offset); + this->engine_->template AddOpAttr>(op_name, "order", + t_order); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(density_prior_box, DensityPriorBoxOpConverter); -REGISTER_ANAKIN_OP_CONVERTER(prior_box, DensityPriorBoxOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER( + density_prior_box, DensityPriorBoxOpConverter<::anakin::saber::NV>); +REGISTER_CUDA_ANAKIN_OP_CONVERTER( + prior_box, DensityPriorBoxOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER( + density_prior_box, DensityPriorBoxOpConverter<::anakin::saber::X86>); +REGISTER_CPU_ANAKIN_OP_CONVERTER( + prior_box, DensityPriorBoxOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/density_prior_box.h b/paddle/fluid/inference/anakin/convert/density_prior_box.h index bf9210711a0f69..29f4f6f7f9db50 100644 --- a/paddle/fluid/inference/anakin/convert/density_prior_box.h +++ b/paddle/fluid/inference/anakin/convert/density_prior_box.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class DensityPriorBoxOpConverter : public AnakinOpConverter { +template +class DensityPriorBoxOpConverter : public AnakinOpConverter { public: DensityPriorBoxOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/detection_out.cc b/paddle/fluid/inference/anakin/convert/detection_out.cc index 262ad28a654609..4a28c604f5853a 100644 --- a/paddle/fluid/inference/anakin/convert/detection_out.cc +++ b/paddle/fluid/inference/anakin/convert/detection_out.cc @@ -16,19 +16,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -void DetectionOutOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void DetectionOutOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); auto target_name = op_desc.Input("TargetBox").front(); auto prior_box_name = op_desc.Input("PriorBox").front(); @@ -52,22 +47,28 @@ void DetectionOutOpConverter::operator()(const framework::proto::OpDesc &op, "Not support encode_center_size code_type in DetectionOut of anakin"); } - engine_->AddOp(op_name, "DetectionOutput", - {target_name, scores_name, prior_box_name}, {output_name}); - engine_->AddOpAttr(op_name, "share_location", true); - engine_->AddOpAttr(op_name, "variance_encode_in_target", false); - engine_->AddOpAttr(op_name, "class_num", static_cast(0)); - engine_->AddOpAttr(op_name, "background_id", background_label); - engine_->AddOpAttr(op_name, "keep_top_k", keep_top_k); - engine_->AddOpAttr(op_name, "code_type", anakin_code_type); - engine_->AddOpAttr(op_name, "conf_thresh", score_threshold); - engine_->AddOpAttr(op_name, "nms_top_k", nms_top_k); - engine_->AddOpAttr(op_name, "nms_thresh", nms_threshold); - engine_->AddOpAttr(op_name, "nms_eta", nms_eta); + this->engine_->AddOp(op_name, "DetectionOutput", + {target_name, scores_name, prior_box_name}, + {output_name}); + this->engine_->AddOpAttr(op_name, "share_location", true); + this->engine_->AddOpAttr(op_name, "variance_encode_in_target", false); + this->engine_->AddOpAttr(op_name, "class_num", static_cast(0)); + this->engine_->AddOpAttr(op_name, "background_id", background_label); + this->engine_->AddOpAttr(op_name, "keep_top_k", keep_top_k); + this->engine_->AddOpAttr(op_name, "code_type", anakin_code_type); + this->engine_->AddOpAttr(op_name, "conf_thresh", score_threshold); + this->engine_->AddOpAttr(op_name, "nms_top_k", nms_top_k); + this->engine_->AddOpAttr(op_name, "nms_thresh", nms_threshold); + this->engine_->AddOpAttr(op_name, "nms_eta", nms_eta); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(detection_out, DetectionOutOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(detection_out, + DetectionOutOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER(detection_out, + DetectionOutOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/detection_out.h b/paddle/fluid/inference/anakin/convert/detection_out.h index ca78f10fdc2a7c..396d5c9554fda7 100644 --- a/paddle/fluid/inference/anakin/convert/detection_out.h +++ b/paddle/fluid/inference/anakin/convert/detection_out.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class DetectionOutOpConverter : public AnakinOpConverter { +template +class DetectionOutOpConverter : public AnakinOpConverter { public: DetectionOutOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/dropout.cc b/paddle/fluid/inference/anakin/convert/dropout.cc index bc9b26dcf27333..989eafcd91ef46 100644 --- a/paddle/fluid/inference/anakin/convert/dropout.cc +++ b/paddle/fluid/inference/anakin/convert/dropout.cc @@ -19,21 +19,16 @@ using anakin::graph::GraphGlobalMem; using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void DropoutOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void DropoutOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Mask").size(), 1); @@ -43,25 +38,30 @@ void DropoutOpConverter::operator()(const framework::proto::OpDesc &op, auto out_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Scale", {x_name}, {out_name}); + this->engine_->AddOp(op_name, "Scale", {x_name}, {out_name}); auto dropout_prob = boost::get(op_desc.GetAttr("dropout_prob")); auto factor = 1 - dropout_prob; Shape shape1(std::vector({1, 1, 1, 1})); auto *weight1 = - GraphGlobalMem::Global().template new_block(shape1); + GraphGlobalMem::Global().template new_block(shape1); auto *factor_data = static_cast(weight1->h_tensor().mutable_data()); float weight1_data[] = {factor}; std::copy(std::begin(weight1_data), std::end(weight1_data), factor_data); - engine_->AddOpAttr(op_name, "weight_1", *weight1); - engine_->AddOpAttr(op_name, "axis", 0); - engine_->AddOpAttr(op_name, "num_axes", 0); - engine_->AddOpAttr(op_name, "bias_term", false); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->AddOpAttr(op_name, "axis", 0); + this->engine_->AddOpAttr(op_name, "num_axes", 0); + this->engine_->AddOpAttr(op_name, "bias_term", false); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(dropout, DropoutOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(dropout, + DropoutOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER(dropout, + DropoutOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/dropout.h b/paddle/fluid/inference/anakin/convert/dropout.h index 11412e217ef5fa..c43c851fc0ee60 100644 --- a/paddle/fluid/inference/anakin/convert/dropout.h +++ b/paddle/fluid/inference/anakin/convert/dropout.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class DropoutOpConverter : public AnakinOpConverter { +template +class DropoutOpConverter : public AnakinOpConverter { public: DropoutOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/elementwise.cc b/paddle/fluid/inference/anakin/convert/elementwise.cc index fe9a896d8266e0..81e1d10d82bd66 100644 --- a/paddle/fluid/inference/anakin/convert/elementwise.cc +++ b/paddle/fluid/inference/anakin/convert/elementwise.cc @@ -19,18 +19,15 @@ using anakin::graph::GraphGlobalMem; using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void ElementwiseAddOpConverter::operator()( +template +void ElementwiseAddOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -43,14 +40,16 @@ void ElementwiseAddOpConverter::operator()( auto out_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Eltwise", {x_name, y_name}, {out_name}); + this->engine_->AddOp(op_name, "Eltwise", {x_name, y_name}, {out_name}); std::string elementwise_type = "Add"; - engine_->AddOpAttr(op_name, "type", elementwise_type); + this->engine_->template AddOpAttr(op_name, "type", + elementwise_type); std::vector coeff = {1.0, 1.0}; - engine_->AddOpAttr>(op_name, "coeff", coeff); + this->engine_->template AddOpAttr>(op_name, "coeff", coeff); } -void ElementwiseMulOpConverter::operator()( +template +void ElementwiseMulOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -63,26 +62,25 @@ void ElementwiseMulOpConverter::operator()( auto out_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Scale", {x_name, y_name}, {out_name}); - // Fill a number to weight_1 as a placeholder. - Shape shape1(std::vector({1, 1, 1, 1})); - auto *weight1 = - GraphGlobalMem::Global().template new_block(shape1); - auto *placeholder_data = - static_cast(weight1->h_tensor().mutable_data()); - float weight1_data[] = {1}; - std::copy(std::begin(weight1_data), std::end(weight1_data), placeholder_data); - engine_->AddOpAttr(op_name, "weight_1", *weight1); - - auto axis = boost::get(op_desc.GetAttr("axis")); - engine_->AddOpAttr(op_name, "axis", axis); - engine_->AddOpAttr(op_name, "num_axes", 1); - engine_->AddOpAttr(op_name, "bias_term", false); + this->engine_->AddOp(op_name, "Eltwise", {x_name, y_name}, {out_name}); + std::string elementwise_type = "Prod"; + this->engine_->template AddOpAttr(op_name, "type", + elementwise_type); + std::vector coeff = {1.0, 1.0}; + this->engine_->template AddOpAttr>(op_name, "coeff", coeff); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(elementwise_add, ElementwiseAddOpConverter); -REGISTER_ANAKIN_OP_CONVERTER(elementwise_mul, ElementwiseMulOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER( + elementwise_add, ElementwiseAddOpConverter<::anakin::saber::NV>); +REGISTER_CUDA_ANAKIN_OP_CONVERTER( + elementwise_mul, ElementwiseMulOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER( + elementwise_add, ElementwiseAddOpConverter<::anakin::saber::X86>); +REGISTER_CPU_ANAKIN_OP_CONVERTER( + elementwise_mul, ElementwiseMulOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/elementwise.h b/paddle/fluid/inference/anakin/convert/elementwise.h index e4664493a9d3ce..f64a8c5f7f3234 100644 --- a/paddle/fluid/inference/anakin/convert/elementwise.h +++ b/paddle/fluid/inference/anakin/convert/elementwise.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class ElementwiseAddOpConverter : public AnakinOpConverter { +template +class ElementwiseAddOpConverter : public AnakinOpConverter { public: ElementwiseAddOpConverter() = default; @@ -33,7 +34,8 @@ class ElementwiseAddOpConverter : public AnakinOpConverter { private: }; -class ElementwiseMulOpConverter : public AnakinOpConverter { +template +class ElementwiseMulOpConverter : public AnakinOpConverter { public: ElementwiseMulOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/fc.cc b/paddle/fluid/inference/anakin/convert/fc.cc index a80a1a47e91aa0..a04035eabace01 100644 --- a/paddle/fluid/inference/anakin/convert/fc.cc +++ b/paddle/fluid/inference/anakin/convert/fc.cc @@ -19,17 +19,16 @@ using anakin::graph::GraphGlobalMem; using anakin::AK_FLOAT; -using anakin::saber::NV; using anakin::saber::Shape; namespace paddle { namespace inference { namespace anakin { -void FcBaseOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void FcBaseOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); auto input_names = op_desc.InputNames(); bool with_bias = input_names.size() == 3; @@ -51,13 +50,13 @@ void FcBaseOpConverter::operator()(const framework::proto::OpDesc &op, auto input_name = op_desc.Input(i_name).front(); auto output_name = op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Dense", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "bias_term", with_bias); - engine_->AddOpAttr(op_name, "axis", 1); + this->engine_->AddOp(op_name, "Dense", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "bias_term", with_bias); + this->engine_->AddOpAttr(op_name, "axis", 1); auto weight_shape = framework::vectorize2int(y_t->dims()); int out_dim = weight_shape[1]; - engine_->AddOpAttr(op_name, "out_dim", out_dim); + this->engine_->AddOpAttr(op_name, "out_dim", out_dim); const int w_m = weight_shape[0]; const int w_k = weight_shape[1]; @@ -79,12 +78,13 @@ void FcBaseOpConverter::operator()(const framework::proto::OpDesc &op, } } auto *weight1 = - GraphGlobalMem::Global().template new_block(anakin_shape); + GraphGlobalMem::Global().template new_block( + anakin_shape); float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); std::copy_n(trans_weight_data.data(), weight_tensor.numel(), cpu_data); weight1->d_tensor().set_shape(anakin_shape); weight1->d_tensor().copy_from(weight1->h_tensor()); - engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); // get bias if (with_bias) { @@ -104,13 +104,14 @@ void FcBaseOpConverter::operator()(const framework::proto::OpDesc &op, // bias_shape.push_back(1); Shape anakin_bias_shape(bias_shape); - auto *weight2 = GraphGlobalMem::Global().template new_block( - anakin_bias_shape); + auto *weight2 = + GraphGlobalMem::Global().template new_block( + anakin_bias_shape); float *cpu_data2 = static_cast(weight2->h_tensor().mutable_data()); std::copy_n(bias_data, bias_tensor.numel(), cpu_data2); weight2->d_tensor().set_shape(anakin_bias_shape); weight2->d_tensor().copy_from(weight2->h_tensor()); - engine_->AddOpAttr(op_name, "weight_2", *weight2); + this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } } @@ -118,5 +119,10 @@ void FcBaseOpConverter::operator()(const framework::proto::OpDesc &op, } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(mul, MulOpConverter); -REGISTER_ANAKIN_OP_CONVERTER(fc, FcOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(mul, MulOpConverter<::anakin::saber::NV>); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(fc, FcOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER(mul, MulOpConverter<::anakin::saber::X86>); +REGISTER_CPU_ANAKIN_OP_CONVERTER(fc, FcOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/fc.h b/paddle/fluid/inference/anakin/convert/fc.h index fb461908b35e01..10808c315757b7 100644 --- a/paddle/fluid/inference/anakin/convert/fc.h +++ b/paddle/fluid/inference/anakin/convert/fc.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class FcBaseOpConverter : public AnakinOpConverter { +template +class FcBaseOpConverter : public AnakinOpConverter { public: FcBaseOpConverter() = default; @@ -32,13 +33,15 @@ class FcBaseOpConverter : public AnakinOpConverter { }; // with bias -class FcOpConverter : public FcBaseOpConverter { +template +class FcOpConverter : public FcBaseOpConverter { public: FcOpConverter() = default; }; // without bias -class MulOpConverter : public FcBaseOpConverter { +template +class MulOpConverter : public FcBaseOpConverter { public: MulOpConverter() = default; }; diff --git a/paddle/fluid/inference/anakin/convert/flatten.cc b/paddle/fluid/inference/anakin/convert/flatten.cc index 7f5c1510960d10..a38dec25d831c7 100644 --- a/paddle/fluid/inference/anakin/convert/flatten.cc +++ b/paddle/fluid/inference/anakin/convert/flatten.cc @@ -15,20 +15,16 @@ #include "paddle/fluid/inference/anakin/convert/flatten.h" #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void FlattenOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void FlattenOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1UL); @@ -41,12 +37,17 @@ void FlattenOpConverter::operator()(const framework::proto::OpDesc &op, std::vector out_dims = {0, -1, 1, 1}; auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Reshape", {input}, {output}); - engine_->AddOpAttr>(op_name, "dims", out_dims); + this->engine_->AddOp(op_name, "Reshape", {input}, {output}); + this->engine_->template AddOpAttr>(op_name, "dims", out_dims); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(flatten, FlattenOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(flatten, + FlattenOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER(flatten, + FlattenOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/flatten.h b/paddle/fluid/inference/anakin/convert/flatten.h index c9cc0006eb2448..cd29b6e7d7384d 100644 --- a/paddle/fluid/inference/anakin/convert/flatten.h +++ b/paddle/fluid/inference/anakin/convert/flatten.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class FlattenOpConverter : public AnakinOpConverter { +template +class FlattenOpConverter : public AnakinOpConverter { public: FlattenOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/im2sequence.cc b/paddle/fluid/inference/anakin/convert/im2sequence.cc index 2cc330c3829f60..bd7e9b4b63c501 100644 --- a/paddle/fluid/inference/anakin/convert/im2sequence.cc +++ b/paddle/fluid/inference/anakin/convert/im2sequence.cc @@ -17,23 +17,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void Im2SequenceConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void Im2SequenceConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Y").size(), 0); @@ -43,21 +36,24 @@ void Im2SequenceConverter::operator()(const framework::proto::OpDesc &op, auto out_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Im2Sequence", {x_name}, {out_name}); + this->engine_->AddOp(op_name, "Im2Sequence", {x_name}, {out_name}); std::vector dilations = {1, 1}; auto paddings = boost::get>(op_desc.GetAttr("paddings")); auto strides = boost::get>(op_desc.GetAttr("strides")); auto kernels = boost::get>(op_desc.GetAttr("kernels")); - engine_->AddOpAttr>(op_name, "paddings", paddings); - engine_->AddOpAttr>(op_name, "strides", strides); - engine_->AddOpAttr>(op_name, "window_size", kernels); - engine_->AddOpAttr>(op_name, "dilations", dilations); + this->engine_->template AddOpAttr>(op_name, "paddings", paddings); + this->engine_->template AddOpAttr>(op_name, "strides", strides); + this->engine_->template AddOpAttr>(op_name, "window_size", + kernels); + this->engine_->template AddOpAttr>(op_name, "dilations", + dilations); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(im2sequence, Im2SequenceConverter); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(im2sequence, + Im2SequenceConverter<::anakin::saber::NV>); diff --git a/paddle/fluid/inference/anakin/convert/im2sequence.h b/paddle/fluid/inference/anakin/convert/im2sequence.h index 714679c1d96011..97d1564b02817d 100644 --- a/paddle/fluid/inference/anakin/convert/im2sequence.h +++ b/paddle/fluid/inference/anakin/convert/im2sequence.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class Im2SequenceConverter : public AnakinOpConverter { +template +class Im2SequenceConverter : public AnakinOpConverter { public: Im2SequenceConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/op_converter.h b/paddle/fluid/inference/anakin/convert/op_converter.h index bffab229ede775..71631a7745c9d2 100644 --- a/paddle/fluid/inference/anakin/convert/op_converter.h +++ b/paddle/fluid/inference/anakin/convert/op_converter.h @@ -32,10 +32,10 @@ namespace paddle { namespace inference { namespace anakin { -using AnakinNvEngine = - AnakinEngine<::anakin::saber::NV, ::anakin::Precision::FP32>; - +template class AnakinOpConverter { + using AnakinEngineT = AnakinEngine; + public: AnakinOpConverter() = default; @@ -45,7 +45,7 @@ class AnakinOpConverter { void ConvertOp(const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const std::unordered_set ¶meters, - const framework::Scope &scope, AnakinNvEngine *engine, + const framework::Scope &scope, AnakinEngineT *engine, bool test_mode = false) { framework::OpDesc op_desc(op, nullptr); std::string op_type = op_desc.Type(); @@ -65,7 +65,7 @@ class AnakinOpConverter { void ConvertBlock(framework::BlockDesc *block_desc, const std::unordered_set ¶meters, - const framework::Scope &scope, AnakinNvEngine *engine) { + const framework::Scope &scope, AnakinEngineT *engine) { std::unique_lock lock(mutex_); framework::proto::BlockDesc *block = block_desc->Proto(); for (auto i = 0; i < block->ops_size(); i++) { @@ -79,7 +79,7 @@ class AnakinOpConverter { framework::BlockDesc *block_desc, framework::Scope *scope, const std::vector &inputs, const std::unordered_set ¶meters, - const std::vector &outputs, AnakinNvEngine *engine) { + const std::vector &outputs, AnakinEngineT *engine) { ConvertBlock(block_desc, parameters, *scope, engine); // if the max_batch size int max_batch_size = engine->GetMaxBatchSize(); @@ -128,40 +128,60 @@ class AnakinOpConverter { engine->InitNet(); } - void SetEngine(AnakinNvEngine *engine) { engine_ = engine; } + void SetEngine(AnakinEngineT *engine) { engine_ = engine; } virtual ~AnakinOpConverter() {} protected: bool test_mode_; - AnakinNvEngine *engine_{nullptr}; + AnakinEngineT *engine_{nullptr}; private: - std::unordered_map converters_; + std::unordered_map *> converters_; framework::Scope *scope_{nullptr}; std::mutex mutex_; }; +template class AnakinOpConverter<::anakin::saber::NV>; +template class AnakinOpConverter<::anakin::saber::X86>; } // namespace anakin } // namespace inference } // namespace paddle -#define REGISTER_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ - struct anakin_##op_type__##_converter \ - : public ::paddle::framework::Registrar { \ - anakin_##op_type__##_converter() { \ - LOG(INFO) << "register convert " << #op_type__; \ - ::paddle::inference::Registry< \ - ::paddle::inference::anakin::AnakinOpConverter>::Global() \ - .Register<::paddle::inference::anakin::Converter__>(#op_type__); \ - } \ - }; \ - anakin_##op_type__##_converter anakin_##op_type__##_converter__; \ - int TouchConverterRegister_anakin_##op_type__() { \ - anakin_##op_type__##_converter__.Touch(); \ - return 0; \ +#define REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, \ + place_type__, place_class__) \ + struct anakin_##op_type__##_##place_type__##_converter \ + : public ::paddle::framework::Registrar { \ + anakin_##op_type__##_##place_type__##_converter() { \ + LOG(INFO) << "register convert " << #op_type__ << " "; \ + ::paddle::inference::Registry< \ + ::paddle::inference::anakin::AnakinOpConverter>:: \ + Global() \ + .Register<::paddle::inference::anakin::Converter__>(#op_type__); \ + } \ + }; \ + anakin_##op_type__##_##place_type__##_converter \ + anakin_##op_type__##_##place_type__##_converter__; \ + int TouchConverterRegister_anakin_##op_type__##_##place_type__() { \ + anakin_##op_type__##_##place_type__##_converter__.Touch(); \ + return 0; \ } -#define USE_ANAKIN_CONVERTER(op_type__) \ - extern int TouchConverterRegister_anakin_##op_type__(); \ - int use_op_converter_anakin_##op_type__ __attribute__((unused)) = \ - TouchConverterRegister_anakin_##op_type__(); +#define REGISTER_CUDA_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ + REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, CUDA, \ + ::anakin::saber::NV) + +#define REGISTER_CPU_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ + REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, CPU, \ + ::anakin::saber::X86) + +#define USE_ANAKIN_CONVERTER_BASE(op_type__, place_type__) \ + extern int TouchConverterRegister_anakin_##op_type__##_##place_type__(); \ + int use_op_converter_anakin_##op_type__##_##place_type__ \ + __attribute__((unused)) = \ + TouchConverterRegister_anakin_##op_type__##_##place_type__(); + +#define USE_ANAKIN_CONVERTER(op_type__) \ + USE_ANAKIN_CONVERTER_BASE(op_type__, CUDA) + +#define USE_CPU_ANAKIN_CONVERTER(op_type__) \ + USE_ANAKIN_CONVERTER_BASE(op_type__, CPU) diff --git a/paddle/fluid/inference/anakin/convert/pool2d.cc b/paddle/fluid/inference/anakin/convert/pool2d.cc index 87eefe712a5ad2..d0206a5bf9b4eb 100644 --- a/paddle/fluid/inference/anakin/convert/pool2d.cc +++ b/paddle/fluid/inference/anakin/convert/pool2d.cc @@ -17,23 +17,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void Pool2dOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void Pool2dOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -65,17 +58,22 @@ void Pool2dOpConverter::operator()(const framework::proto::OpDesc &op, PADDLE_THROW("TensorRT unsupported pooling type!"); } - engine_->AddOp(op_name, "Pooling", {x_name}, {y_name}); - engine_->AddOpAttr>(op_name, "pool_size", ksize); - engine_->AddOpAttr>(op_name, "strides", strides); - engine_->AddOpAttr>(op_name, "padding", paddings); - engine_->AddOpAttr(op_name, "method", anakin_pool_type); - engine_->AddOpAttr(op_name, "global_pooling", global_pooling); - engine_->AddOpAttr(op_name, "cmp_out_shape_floor_as_conv", !ceil_mode); + this->engine_->AddOp(op_name, "Pooling", {x_name}, {y_name}); + this->engine_->template AddOpAttr>(op_name, "pool_size", ksize); + this->engine_->template AddOpAttr>(op_name, "strides", strides); + this->engine_->template AddOpAttr>(op_name, "padding", paddings); + this->engine_->AddOpAttr(op_name, "method", anakin_pool_type); + this->engine_->AddOpAttr(op_name, "global_pooling", global_pooling); + this->engine_->AddOpAttr(op_name, "cmp_out_shape_floor_as_conv", !ceil_mode); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(pool2d, Pool2dOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(pool2d, + Pool2dOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER(pool2d, + Pool2dOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/pool2d.h b/paddle/fluid/inference/anakin/convert/pool2d.h index ec28e48ac848ef..0f85ec14b33dd6 100644 --- a/paddle/fluid/inference/anakin/convert/pool2d.h +++ b/paddle/fluid/inference/anakin/convert/pool2d.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class Pool2dOpConverter : public AnakinOpConverter { +template +class Pool2dOpConverter : public AnakinOpConverter { public: Pool2dOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/relu.cc b/paddle/fluid/inference/anakin/convert/relu.cc index 744066e88afc61..71de3113cba1da 100644 --- a/paddle/fluid/inference/anakin/convert/relu.cc +++ b/paddle/fluid/inference/anakin/convert/relu.cc @@ -16,19 +16,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -void ReluOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ReluOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -37,14 +32,14 @@ void ReluOpConverter::operator()(const framework::proto::OpDesc &op, auto input_name = op_desc.Input("X").front(); auto output_name = op_desc.Output("Out").front(); - engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "alpha", 0); + this->engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "alpha", 0); } -void LeakyReluOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void LeakyReluOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -54,13 +49,19 @@ void LeakyReluOpConverter::operator()(const framework::proto::OpDesc &op, auto output_name = op_desc.Output("Out").front(); float alpha = boost::get(op_desc.GetAttr("alpha")); - engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "alpha", alpha); + this->engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "alpha", alpha); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(relu, ReluOpConverter); -REGISTER_ANAKIN_OP_CONVERTER(leaky_relu, LeakyReluOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(relu, ReluOpConverter<::anakin::saber::NV>); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(leaky_relu, + LeakyReluOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER(relu, ReluOpConverter<::anakin::saber::X86>); +REGISTER_CPU_ANAKIN_OP_CONVERTER(leaky_relu, + LeakyReluOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/relu.h b/paddle/fluid/inference/anakin/convert/relu.h index d7b6b6934d6f74..74222a7ea1bb93 100644 --- a/paddle/fluid/inference/anakin/convert/relu.h +++ b/paddle/fluid/inference/anakin/convert/relu.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class ReluOpConverter : public AnakinOpConverter { +template +class ReluOpConverter : public AnakinOpConverter { public: ReluOpConverter() = default; @@ -33,7 +34,8 @@ class ReluOpConverter : public AnakinOpConverter { virtual ~ReluOpConverter() {} }; -class LeakyReluOpConverter : public AnakinOpConverter { +template +class LeakyReluOpConverter : public AnakinOpConverter { public: LeakyReluOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/reshape.cc b/paddle/fluid/inference/anakin/convert/reshape.cc index 17e0a1acb5f4e0..a6696e8e81b72c 100644 --- a/paddle/fluid/inference/anakin/convert/reshape.cc +++ b/paddle/fluid/inference/anakin/convert/reshape.cc @@ -15,20 +15,16 @@ #include "paddle/fluid/inference/anakin/convert/reshape.h" #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void ReshapeOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ReshapeOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1UL); @@ -37,17 +33,23 @@ void ReshapeOpConverter::operator()(const framework::proto::OpDesc &op, auto output = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Reshape", {input}, {output}); + this->engine_->AddOp(op_name, "Reshape", {input}, {output}); auto shape = boost::get>(op_desc.GetAttr("shape")); if (shape.size() < 4) { shape.insert(shape.end(), 4 - shape.size(), 1); } - engine_->AddOpAttr>(op_name, "dims", shape); + this->engine_->template AddOpAttr>(op_name, "dims", shape); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(reshape, ReshapeOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(reshape, + ReshapeOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER(reshape, + ReshapeOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/reshape.h b/paddle/fluid/inference/anakin/convert/reshape.h index 9ce2ea2a4f3f88..bd0fd08c5cb913 100644 --- a/paddle/fluid/inference/anakin/convert/reshape.h +++ b/paddle/fluid/inference/anakin/convert/reshape.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class ReshapeOpConverter : public AnakinOpConverter { +template +class ReshapeOpConverter : public AnakinOpConverter { public: ReshapeOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/roi_align.cc b/paddle/fluid/inference/anakin/convert/roi_align.cc index 0f2b08df08a9ad..152578b50fec38 100644 --- a/paddle/fluid/inference/anakin/convert/roi_align.cc +++ b/paddle/fluid/inference/anakin/convert/roi_align.cc @@ -25,10 +25,10 @@ namespace paddle { namespace inference { namespace anakin { -void RoiAlignOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void RoiAlignOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Input("ROIs").size(), 1); @@ -44,16 +44,21 @@ void RoiAlignOpConverter::operator()(const framework::proto::OpDesc &op, auto pooled_width = boost::get(op_desc.GetAttr("pooled_width")); auto sampling_ratio = boost::get(op_desc.GetAttr("sampling_ratio")); - engine_->AddOp(op_name, "RoiAlign", {input_x_name, input_rois_name}, - {output_name}); - engine_->AddOpAttr(op_name, "spatial_scale", spatial_scale); - engine_->AddOpAttr(op_name, "pooled_height", pooled_height); - engine_->AddOpAttr(op_name, "pooled_width", pooled_width); - engine_->AddOpAttr(op_name, "sampling_ratio", sampling_ratio); + this->engine_->AddOp(op_name, "RoiAlign", {input_x_name, input_rois_name}, + {output_name}); + this->engine_->AddOpAttr(op_name, "spatial_scale", spatial_scale); + this->engine_->AddOpAttr(op_name, "pooled_height", pooled_height); + this->engine_->AddOpAttr(op_name, "pooled_width", pooled_width); + this->engine_->AddOpAttr(op_name, "sampling_ratio", sampling_ratio); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(roi_align, RoiAlignOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(roi_align, + RoiAlignOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER(roi_align, + RoiAlignOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/roi_align.h b/paddle/fluid/inference/anakin/convert/roi_align.h index c6df4754ba9b5e..93c28f3e055629 100644 --- a/paddle/fluid/inference/anakin/convert/roi_align.h +++ b/paddle/fluid/inference/anakin/convert/roi_align.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class RoiAlignOpConverter : public AnakinOpConverter { +template +class RoiAlignOpConverter : public AnakinOpConverter { public: RoiAlignOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/scale.cc b/paddle/fluid/inference/anakin/convert/scale.cc index dd68af4f79a6d1..d72f9a5fa0c28d 100644 --- a/paddle/fluid/inference/anakin/convert/scale.cc +++ b/paddle/fluid/inference/anakin/convert/scale.cc @@ -16,19 +16,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -void ScaleOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ScaleOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -44,14 +39,14 @@ void ScaleOpConverter::operator()(const framework::proto::OpDesc &op, PADDLE_ENFORCE(bias_after_scale, "The anakin scale layer only support bias after scale now."); - engine_->AddOp(op_name, "Power", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "shift", bias); - engine_->AddOpAttr(op_name, "scale", scale); - engine_->AddOpAttr(op_name, "power", static_cast(1.0)); + this->engine_->AddOp(op_name, "Power", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "shift", bias); + this->engine_->AddOpAttr(op_name, "scale", scale); + this->engine_->AddOpAttr(op_name, "power", static_cast(1.0)); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(scale, ScaleOpConverter); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(scale, ScaleOpConverter<::anakin::saber::NV>); diff --git a/paddle/fluid/inference/anakin/convert/scale.h b/paddle/fluid/inference/anakin/convert/scale.h index ba3bcdd21494a4..92d936b526226a 100644 --- a/paddle/fluid/inference/anakin/convert/scale.h +++ b/paddle/fluid/inference/anakin/convert/scale.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class ScaleOpConverter : public AnakinOpConverter { +template +class ScaleOpConverter : public AnakinOpConverter { public: ScaleOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/softmax.cc b/paddle/fluid/inference/anakin/convert/softmax.cc index a6c1e971b16fa7..851dafa8bdf63d 100644 --- a/paddle/fluid/inference/anakin/convert/softmax.cc +++ b/paddle/fluid/inference/anakin/convert/softmax.cc @@ -14,19 +14,14 @@ #include "paddle/fluid/inference/anakin/convert/softmax.h" -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -void SoftMaxOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void SoftMaxOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); @@ -41,12 +36,18 @@ void SoftMaxOpConverter::operator()(const framework::proto::OpDesc &op, auto input_shape_in_fluid = input_var_desc->GetShape(); size_t input_dims = input_shape_in_fluid.size(); - engine_->AddOp(op_name, "Softmax", {input}, {output}); - engine_->AddOpAttr(op_name, "axis", static_cast(input_dims - 1)); + this->engine_->AddOp(op_name, "Softmax", {input}, {output}); + this->engine_->AddOpAttr(op_name, "axis", static_cast(input_dims - 1)); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(softmax, SoftMaxOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(softmax, + SoftMaxOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER(softmax, + SoftMaxOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/softmax.h b/paddle/fluid/inference/anakin/convert/softmax.h index a16356d5bb61ac..c2421f9eb9d2e5 100644 --- a/paddle/fluid/inference/anakin/convert/softmax.h +++ b/paddle/fluid/inference/anakin/convert/softmax.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class SoftMaxOpConverter : public AnakinOpConverter { +template +class SoftMaxOpConverter : public AnakinOpConverter { public: SoftMaxOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/split.cc b/paddle/fluid/inference/anakin/convert/split.cc index ec582c1812623c..f99233e78b59fc 100644 --- a/paddle/fluid/inference/anakin/convert/split.cc +++ b/paddle/fluid/inference/anakin/convert/split.cc @@ -16,23 +16,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void SplitOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void SplitOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); auto input_name = op_desc.Input("X").front(); auto y_names = op_desc.Output("Out"); @@ -51,14 +44,19 @@ void SplitOpConverter::operator()(const framework::proto::OpDesc &op, num_sum += output_lengths[i]; slice_point.push_back(num_sum); } - engine_->AddOp(op_name, "Slice", {input_name}, y_names); - engine_->AddOpAttr(op_name, "axis", axis); - engine_->AddOpAttr>(op_name, "slice_point", slice_point); + this->engine_->AddOp(op_name, "Slice", {input_name}, y_names); + this->engine_->AddOpAttr(op_name, "axis", axis); + this->engine_->template AddOpAttr>(op_name, "slice_point", + slice_point); // slice_dim is useless in anakin - engine_->AddOpAttr(op_name, "slice_dim", 4); + this->engine_->AddOpAttr(op_name, "slice_dim", 4); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(split, SplitOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(split, SplitOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER(split, SplitOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/split.h b/paddle/fluid/inference/anakin/convert/split.h index 184112e589e2bb..989d7acd500e9f 100644 --- a/paddle/fluid/inference/anakin/convert/split.h +++ b/paddle/fluid/inference/anakin/convert/split.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class SplitOpConverter : public AnakinOpConverter { +template +class SplitOpConverter : public AnakinOpConverter { public: SplitOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/sum.cc b/paddle/fluid/inference/anakin/convert/sum.cc index 2a4178e2371389..7fc9d764078849 100644 --- a/paddle/fluid/inference/anakin/convert/sum.cc +++ b/paddle/fluid/inference/anakin/convert/sum.cc @@ -17,22 +17,17 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void SumOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, bool test_mode) { +template +void SumOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 2); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -43,13 +38,17 @@ void SumOpConverter::operator()(const framework::proto::OpDesc &op, std::vector coeff = {1, 1}; std::string elementwise_type = "Add"; - engine_->AddOp(op_name, "Eltwise", input_names, {out_name}); - engine_->AddOpAttr>(op_name, "coeff", coeff); - engine_->AddOpAttr(op_name, "type", elementwise_type); + this->engine_->AddOp(op_name, "Eltwise", input_names, {out_name}); + this->engine_->template AddOpAttr>(op_name, "coeff", coeff); + this->engine_->template AddOpAttr(op_name, "type", + elementwise_type); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(sum, SumOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(sum, SumOpConverter<::anakin::saber::NV>); +#endif +REGISTER_CPU_ANAKIN_OP_CONVERTER(sum, SumOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/sum.h b/paddle/fluid/inference/anakin/convert/sum.h index b5d402b77fcf55..27c15a82ebd471 100644 --- a/paddle/fluid/inference/anakin/convert/sum.h +++ b/paddle/fluid/inference/anakin/convert/sum.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class SumOpConverter : public AnakinOpConverter { +template +class SumOpConverter : public AnakinOpConverter { public: SumOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/test_activation_op.cc b/paddle/fluid/inference/anakin/convert/test_activation_op.cc index 8bedd4a749a645..18b8b6f3b63bc6 100644 --- a/paddle/fluid/inference/anakin/convert/test_activation_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_activation_op.cc @@ -21,12 +21,14 @@ namespace paddle { namespace inference { namespace anakin { -static void test_activation_op(const std::string &op_type) { - auto *converter = Registry::Global().Lookup(op_type); - PADDLE_ENFORCE(converter != nullptr); +template +static void test_activation_op(const std::string& op_type, + const platform::DeviceContext& context, + bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("act-X", {10, 6, 1, 1}); validator.DeclOutputVar("act-Out", {10, 6, 1, 1}); framework::OpDesc desc; @@ -41,13 +43,42 @@ static void test_activation_op(const std::string &op_type) { validator.Execute(5); } -TEST(sigm_op, test) { test_activation_op("sigmoid"); } -TEST(tanh_op, test) { test_activation_op("tanh"); } +#ifdef PADDLE_WITH_CUDA +TEST(sigm_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("sigmoid", ctx, true); +} + +TEST(tanh_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("tanh", ctx, true); +} +#endif + +TEST(sigm_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_activation_op<::anakin::saber::X86>("sigmoid", ctx, false); +} + +TEST(tanh_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_activation_op<::anakin::saber::X86>("tanh", ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(sigmoid); USE_OP(tanh); + +USE_CPU_ANAKIN_CONVERTER(sigmoid); +USE_CPU_ANAKIN_CONVERTER(tanh); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(sigmoid); USE_ANAKIN_CONVERTER(tanh); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc b/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc index eb4f4e12eec29d..123f93370b82a9 100644 --- a/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc @@ -21,16 +21,19 @@ namespace paddle { namespace inference { namespace anakin { -TEST(affine_channel, native) { +template +void test_affine_channel_op(const platform::DeviceContext& context, + bool use_gpu) { // Declare the difference between the inputs. std::unordered_set parameters({"scale", "bias"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("x", {1, 3, 5, 2}); validator.DeclOutputVar("out", {1, 3, 5, 2}); - validator.DeclParamVar("scale", {1, 3, 1, 1}); - validator.DeclParamVar("bias", {1, 3, 1, 1}); + validator.DeclParamVar("scale", {3}); + validator.DeclParamVar("bias", {3}); // Prepare Op descriptions. framework::OpDesc desc; @@ -47,9 +50,26 @@ TEST(affine_channel, native) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(affine_channel_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_affine_channel_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(affine_channel_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_affine_channel_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(affine_channel); +USE_CPU_ANAKIN_CONVERTER(affine_channel); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(affine_channel); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc b/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc index 2832e1c8d167c6..6a6675b6abf5d1 100644 --- a/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc @@ -19,12 +19,14 @@ namespace paddle { namespace inference { namespace anakin { -TEST(batch_norm_op, test) { +template +void test_batchnorm_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters( {"batch_norm_scale", "batch_norm_bias", "batch_norm_mean", "batch_norm_variance"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); std::vector param_shape{2}; validator.DeclInputVar("batch_norm_X", {1, 2, 5, 5}); @@ -64,8 +66,26 @@ TEST(batch_norm_op, test) { validator.Execute(1, neglected_output); } +#ifdef PADDLE_WITH_CUDA +TEST(batch_norm_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_batchnorm_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(batch_norm_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_batchnorm_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(batch_norm); +USE_CPU_ANAKIN_CONVERTER(batch_norm); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(batch_norm); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_concat_op.cc b/paddle/fluid/inference/anakin/convert/test_concat_op.cc index ecf44def5a2429..4ea3305e4664f0 100644 --- a/paddle/fluid/inference/anakin/convert/test_concat_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_concat_op.cc @@ -21,10 +21,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(concat_op, test) { +template +void test_concat_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({""}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("concat_x1", {1, 2, 1, 1}); validator.DeclInputVar("concat_x2", {1, 3, 1, 1}); validator.DeclInputVar("concat_x3", {1, 1, 1, 1}); @@ -44,31 +46,26 @@ TEST(concat_op, test) { validator.Execute(1); } -TEST(concat_op, test2) { - std::unordered_set parameters({""}); - framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); - validator.DeclInputVar("concat_x1", {1, 4}); - validator.DeclInputVar("concat_x2", {3, 4}); - validator.DeclInputVar("concat_x3", {2, 4}); - validator.DeclOutputVar("concat_out", {6, 4}); - - // Prepare Op description - framework::OpDesc desc; - desc.SetType("concat"); - desc.SetInput("X", {"concat_x1", "concat_x2", "concat_x3"}); - desc.SetOutput("Out", {"concat_out"}); - - int axis = 0; - desc.SetAttr("axis", axis); - - validator.SetOp(*desc.Proto()); +#ifdef PADDLE_WITH_CUDA +TEST(concat_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_concat_op<::anakin::saber::NV>(ctx, true); +} +#endif - validator.Execute(1); +TEST(concat_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_concat_op<::anakin::saber::X86>(ctx, false); } } // namespace anakin } // namespace inference } // namespace paddle USE_OP(concat); +USE_CPU_ANAKIN_CONVERTER(concat); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(concat); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc b/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc index 6d93e50bc96b08..fa1b319bc1c65c 100644 --- a/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc @@ -21,13 +21,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(conv2d_op, test) { - auto* conv2d_converter = - Registry::Global().Lookup("conv2d"); - ASSERT_TRUE(conv2d_converter != nullptr); +template +void test_conv2d_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({"conv2d-Y"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("conv2d-X", {1, 3, 3, 3}); validator.DeclParamVar("conv2d-Y", {4, 3, 1, 1}); validator.DeclOutputVar("conv2d-Out", {1, 4, 3, 3}); @@ -54,9 +53,27 @@ TEST(conv2d_op, test) { validator.Execute(3); } +#ifdef PADDLE_WITH_CUDA +TEST(conv2d_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_conv2d_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(conv2d_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_conv2d_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(conv2d); +USE_CPU_ANAKIN_CONVERTER(conv2d); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(conv2d); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_dropout_op.cc b/paddle/fluid/inference/anakin/convert/test_dropout_op.cc index b2de5ae0a6e58e..a252dc74c0bf4f 100644 --- a/paddle/fluid/inference/anakin/convert/test_dropout_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_dropout_op.cc @@ -21,10 +21,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(dropout_op, native) { +template +void test_dropout_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("x", {1, 1, 2, 2}); validator.DeclOutputVar("out", {1, 1, 2, 2}); validator.DeclOutputVar("mask", {1, 1, 2, 2}); @@ -45,9 +47,26 @@ TEST(dropout_op, native) { validator.Execute(1, neglected_output); } +#ifdef PADDLE_WITH_CUDA +TEST(dropout_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_dropout_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(dropout_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_dropout_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(dropout); +USE_CPU_ANAKIN_CONVERTER(dropout); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(dropout); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc b/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc index 3a437f5fdb5656..ee1bedcfb25eba 100644 --- a/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc @@ -21,10 +21,14 @@ namespace paddle { namespace inference { namespace anakin { -static void test_elementwise_op(const std::string &op_type) { +template +static void test_elementwise_op(const std::string& op_type, + const platform::DeviceContext& context, + bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("x", {1, 1, 2, 2}); validator.DeclInputVar("y", {1, 1, 2, 2}); validator.DeclOutputVar("out", {1, 1, 2, 2}); @@ -43,14 +47,41 @@ static void test_elementwise_op(const std::string &op_type) { validator.Execute(1); } -TEST(elementwise_op, native_add) { test_elementwise_op("elementwise_add"); } -TEST(elementwise_op, native_mul) { test_elementwise_op("elementwise_mul"); } +#ifdef PADDLE_WITH_CUDA +TEST(elementwise_op, native_add_gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_elementwise_op<::anakin::saber::NV>("elementwise_add", ctx, true); +} +TEST(elementwise_op, native_mul_gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_elementwise_op<::anakin::saber::NV>("elementwise_mul", ctx, true); +} +#endif + +TEST(elementwise_op, native_add_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_elementwise_op<::anakin::saber::X86>("elementwise_add", ctx, false); +} + +TEST(elementwise_op, native_mul_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_elementwise_op<::anakin::saber::X86>("elementwise_mul", ctx, false); +} } // namespace anakin } // namespace inference } // namespace paddle USE_OP(elementwise_add); -USE_ANAKIN_CONVERTER(elementwise_add); USE_OP(elementwise_mul); +#ifdef PADDLE_WITH_CUDA +USE_ANAKIN_CONVERTER(elementwise_add); USE_ANAKIN_CONVERTER(elementwise_mul); +#endif + +USE_CPU_ANAKIN_CONVERTER(elementwise_add); +USE_CPU_ANAKIN_CONVERTER(elementwise_mul); diff --git a/paddle/fluid/inference/anakin/convert/test_fc_op.cc b/paddle/fluid/inference/anakin/convert/test_fc_op.cc index ee6d1dc291fe37..5510008d3c4f2e 100644 --- a/paddle/fluid/inference/anakin/convert/test_fc_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_fc_op.cc @@ -20,13 +20,13 @@ namespace paddle { namespace inference { namespace anakin { -TEST(fc_op, test) { - auto* fc_converter = Registry::Global().Lookup("fc"); - ASSERT_TRUE(fc_converter); - +template +void test_mul_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({"mul_y"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("mul_x", {1, 1, 2, 2}); validator.DeclParamVar("mul_y", {4, 2}); validator.DeclOutputVar("mul_out", {1, 2}); @@ -42,9 +42,26 @@ TEST(fc_op, test) { validator.Execute(10); } +#ifdef PADDLE_WITH_CUDA +TEST(mul_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_mul_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(mul_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_mul_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(mul); +USE_CPU_ANAKIN_CONVERTER(fc); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(fc); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_flatten_op.cc b/paddle/fluid/inference/anakin/convert/test_flatten_op.cc index d13281f11f03fd..86bc1d810f8943 100644 --- a/paddle/fluid/inference/anakin/convert/test_flatten_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_flatten_op.cc @@ -20,13 +20,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(flatten_op, test) { - auto *converter = Registry::Global().Lookup("flatten"); - ASSERT_TRUE(converter); - +template +void test_flatten_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("flatten-X", {3, 10, 10, 4}); validator.DeclOutputVar("flatten-Out", {3, 400, 1, 1}); framework::OpDesc desc; @@ -42,10 +41,27 @@ TEST(flatten_op, test) { validator.Execute(5); } +#ifdef PADDLE_WITH_CUDA +TEST(flatten_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_flatten_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(flatten_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_flatten_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(reshape); USE_OP_ITSELF(flatten); +USE_CPU_ANAKIN_CONVERTER(flatten); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(flatten); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc b/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc index 1ac01946772160..b1be7f93c67c36 100644 --- a/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc @@ -19,15 +19,14 @@ namespace paddle { namespace inference { namespace anakin { -void test_pool2d(bool global_pooling, bool ceil_mode, +template +void test_pool2d(const platform::DeviceContext& context, bool use_gpu, + bool global_pooling, bool ceil_mode, std::string pool_type = "max") { - auto* pool2d_converter = - Registry::Global().Lookup("pool2d"); - ASSERT_TRUE(pool2d_converter); - framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); // The ITensor's Dims should not contain the batch size. // So, the ITensor's Dims of input and output should be C * H * W. @@ -64,56 +63,61 @@ void test_pool2d(bool global_pooling, bool ceil_mode, validator.Execute(1); } -void test_pool2d2(bool global_pooling, bool ceil_mode, - std::string pool_type = "max") { - auto* pool2d_converter = - Registry::Global().Lookup("pool2d"); - ASSERT_TRUE(pool2d_converter); - - framework::Scope scope; - std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); - - // The ITensor's Dims should not contain the batch size. - // So, the ITensor's Dims of input and output should be C * H * W. - validator.DeclInputVar("pool2d_x", {1, 1, 17, 17}); - validator.DeclOutputVar("pool2d_out", {1, 1, 17, 17}); - - // Prepare Op description - framework::OpDesc desc; - desc.SetType("pool2d"); - desc.SetInput("X", {"pool2d_x"}); - desc.SetOutput("Out", {"pool2d_out"}); - - std::vector ksize({3, 3}); - std::vector strides({1, 1}); - std::vector paddings({1, 1}); - std::string pooling_t = pool_type; +#ifdef PADDLE_WITH_CUDA +TEST(Pool2dOpConverter, normal) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_pool2d<::anakin::saber::NV>(ctx, true, false, false); +} +TEST(Pool2dOpConverter, test_global_pooling) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_pool2d<::anakin::saber::NV>(ctx, true, true, false); +} - desc.SetAttr("pooling_type", pooling_t); - desc.SetAttr("ksize", ksize); - desc.SetAttr("strides", strides); - desc.SetAttr("paddings", paddings); - desc.SetAttr("global_pooling", global_pooling); - desc.SetAttr("ceil_mode", true); +TEST(Pool2dOpConverter, max_ceil_test) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_pool2d<::anakin::saber::NV>(ctx, true, false, true); +} - LOG(INFO) << "set OP"; - validator.SetOp(*desc.Proto()); - LOG(INFO) << "execute"; +TEST(Pool2dOpConverter, avg_ceil_test) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_pool2d<::anakin::saber::NV>(ctx, true, false, true, "avg"); +} +#endif - validator.Execute(1); +TEST(Pool2dOpConverter, normal_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_pool2d<::anakin::saber::X86>(ctx, false, false, false); +} +TEST(Pool2dOpConverter, test_global_pooling_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_pool2d<::anakin::saber::X86>(ctx, false, true, false); } -TEST(Pool2dOpConverter, normal) { test_pool2d(false, false); } -TEST(Pool2dOpConverter, test_global_pooling) { test_pool2d(true, false); } +TEST(Pool2dOpConverter, max_ceil_test_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_pool2d<::anakin::saber::X86>(ctx, false, false, true); +} -TEST(Pool2dOpConverter, max_ceil_test) { test_pool2d(false, true); } -TEST(Pool2dOpConverter, avg_ceil_test) { test_pool2d(false, true, "avg"); } -TEST(Pool2dOpConverter, avg_ceil_test2) { test_pool2d2(false, true, "avg"); } +TEST(Pool2dOpConverter, avg_ceil_test_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_pool2d<::anakin::saber::X86>(ctx, false, false, true, "avg"); +} } // namespace anakin } // namespace inference } // namespace paddle USE_OP(pool2d); +USE_CPU_ANAKIN_CONVERTER(pool2d); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(pool2d); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_relu_op.cc b/paddle/fluid/inference/anakin/convert/test_relu_op.cc index cba19a55857542..369f1920f24943 100644 --- a/paddle/fluid/inference/anakin/convert/test_relu_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_relu_op.cc @@ -21,12 +21,14 @@ namespace paddle { namespace inference { namespace anakin { -static void test_relu_op(const std::string &op_type) { - auto *converter = Registry::Global().Lookup(op_type); - PADDLE_ENFORCE(converter != nullptr); +template +static void test_activation_op(const std::string& op_type, + const platform::DeviceContext& context, + bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("act-X", {10, 6, 1, 1}); validator.DeclOutputVar("act-Out", {10, 6, 1, 1}); framework::OpDesc desc; @@ -44,14 +46,44 @@ static void test_relu_op(const std::string &op_type) { validator.Execute(5); } -TEST(activation, relu) { test_relu_op("relu"); } -TEST(activation, leaky_relu) { test_relu_op("leaky_relu"); } +#ifdef PADDLE_WITH_CUDA +TEST(relu_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("relu", ctx, true); +} + +TEST(leaky_relu_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("leaky_relu", ctx, true); +} +#endif + +/* seems bug here +TEST(relu_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_activation_op<::anakin::saber::X86>("relu", ctx, false); +} + +TEST(leaky_relu_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_activation_op<::anakin::saber::X86>("leaky_relu", ctx, false); +} +*/ } // namespace anakin } // namespace inference } // namespace paddle USE_OP(relu); -USE_ANAKIN_CONVERTER(relu); USE_OP(leaky_relu); +USE_CPU_ANAKIN_CONVERTER(relu); +USE_CPU_ANAKIN_CONVERTER(leaky_relu); + +#ifdef PADDLE_WITH_CUDA +USE_ANAKIN_CONVERTER(relu); USE_ANAKIN_CONVERTER(leaky_relu); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_reshape_op.cc b/paddle/fluid/inference/anakin/convert/test_reshape_op.cc index 306ebf510f29a8..3facdbe9c6944d 100644 --- a/paddle/fluid/inference/anakin/convert/test_reshape_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_reshape_op.cc @@ -20,12 +20,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(reshape, test) { - auto* converter = Registry::Global().Lookup("reshape"); - ASSERT_TRUE(converter); +template +void test_reshape1_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); // validator.DeclInputVar("reshape-X", {2, 3, 3, 1}); // validator.DeclOutputVar("reshape-Out", {3, 2, 1, 3}); @@ -45,10 +45,12 @@ TEST(reshape, test) { validator.Execute(1); } -TEST(reshape, test2) { +template +void test_reshape2_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("reshape-X", {1, 2, 4}); validator.DeclOutputVar("reshape-Out", {1, 4, 2}); @@ -66,9 +68,39 @@ TEST(reshape, test2) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(reshape1_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_reshape1_op<::anakin::saber::NV>(ctx, true); +} + +TEST(reshape2_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_reshape2_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(reshape1_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_reshape2_op<::anakin::saber::X86>(ctx, false); +} + +TEST(reshape2_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_reshape2_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(reshape); +USE_CPU_ANAKIN_CONVERTER(reshape); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(reshape); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_softmax_op.cc b/paddle/fluid/inference/anakin/convert/test_softmax_op.cc index 8c14fae0a67b9e..e15d19135b44cf 100644 --- a/paddle/fluid/inference/anakin/convert/test_softmax_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_softmax_op.cc @@ -20,12 +20,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(softmax, test) { - auto* converter = Registry::Global().Lookup("softmax"); - ASSERT_TRUE(converter); +template +void test_softmax_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("softmax-X", {1, 10, 2}); validator.DeclOutputVar("softmax-Out", {1, 10, 2}); @@ -41,9 +41,27 @@ TEST(softmax, test) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(softmax_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_softmax_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(relu_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_softmax_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(softmax); +USE_CPU_ANAKIN_CONVERTER(softmax); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(softmax); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_split_op.cc b/paddle/fluid/inference/anakin/convert/test_split_op.cc index aa61c01a511c23..7131b07558d1eb 100644 --- a/paddle/fluid/inference/anakin/convert/test_split_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_split_op.cc @@ -21,12 +21,14 @@ namespace paddle { namespace inference { namespace anakin { -template -void AnakinSliceTest(const std::vector &in_shape, +template +void AnakinSliceTest(const platform::DeviceContext &context, bool use_gpu, + const std::vector &in_shape, const std::vector §ions) { std::unordered_set parameters({""}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("split_input", in_shape); std::vector output_vars; @@ -55,51 +57,58 @@ void AnakinSliceTest(const std::vector &in_shape, // batch = 0, axis = 1, same shape TEST(split_op, test_same_shape_axis1_batch1) { - AnakinSliceTest<1>({1, 4, 2, 2}, {2, 2}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 1>(ctx, true, {1, 4, 2, 2}, {2, 2}); } // batch = 0, axis = 1, different shape TEST(split_op, test_different_shape_axis1_batch1) { - AnakinSliceTest<1>({1, 3, 2, 2}, {2, 1}); -} -// batch = 10, axis = 1, same shape -TEST(split_op, test_same_shape_axis1_batch10) { - AnakinSliceTest<1>({1, 4, 2, 2}, {2, 2}); -} -// batch = 10, axis = 1, different shape -TEST(split_op, test_different_shape_axis1_batch10) { - AnakinSliceTest<1>({1, 3, 2, 2}, {2, 1}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 1>(ctx, true, {1, 3, 2, 2}, {2, 1}); } // batch = 0, axis = 2, same shape TEST(split_op, test_same_shape_axis2_batch1) { - AnakinSliceTest<2>({1, 3, 4, 2}, {2, 2}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 2>(ctx, true, {1, 3, 4, 2}, {2, 2}); } // batch = 0, axis = 2, different shape TEST(split_op, test_different_shape_axis2_batch1) { - AnakinSliceTest<2>({1, 3, 3, 2}, {2, 1}); -} -// batch = 10, axis = 2, same shape -TEST(split_op, test_same_shape_axis2_batch10) { - AnakinSliceTest<2>({1, 3, 4, 2}, {2, 2}); -} -// batch = 10, axis = 2, different shape -TEST(split_op, test_different_shape_axis2_batch10) { - AnakinSliceTest<2>({1, 3, 3, 2}, {2, 1}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 2>(ctx, true, {1, 3, 3, 2}, {2, 1}); } + // batch = 0, axis = 3, same shape TEST(split_op, test_same_shape_axis3_batch1) { - AnakinSliceTest<3>({1, 3, 2, 4}, {2, 2}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 3>(ctx, true, {1, 3, 2, 4}, {2, 2}); } // batch = 0, axis = 3, different shape TEST(split_op, test_different_shape_axis3_batch1) { - AnakinSliceTest<3>({1, 3, 2, 3}, {2, 1}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 3>(ctx, true, {1, 3, 2, 3}, {2, 1}); } -// batch = 10, axis = 3, same shape -TEST(split_op, test_same_shape_axis3_batch10) { - AnakinSliceTest<3>({1, 3, 2, 4}, {2, 2}); + +TEST(split_op, test_different_shape_axis1_batch1_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + AnakinSliceTest<::anakin::saber::X86, 1>(ctx, false, {1, 3, 2, 3}, {2, 1}); +} + +TEST(split_op, test_different_shape_axis2_batch1_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + AnakinSliceTest<::anakin::saber::X86, 2>(ctx, false, {1, 3, 4, 2}, {2, 2}); } -// batch = 10, axis = 3, different shape -TEST(split_op, test_different_shape_axis3_batch10) { - AnakinSliceTest<3>({1, 3, 2, 3}, {2, 1}); + +TEST(split_op, test_different_shape_axis3_batch1_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + AnakinSliceTest<::anakin::saber::X86, 3>(ctx, false, {1, 3, 2, 4}, {2, 2}); } } // namespace anakin @@ -107,4 +116,7 @@ TEST(split_op, test_different_shape_axis3_batch10) { } // namespace paddle USE_OP(split); +USE_CPU_ANAKIN_CONVERTER(split); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(split); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_sum_op.cc b/paddle/fluid/inference/anakin/convert/test_sum_op.cc index d6a59a0166be92..8714890666c298 100644 --- a/paddle/fluid/inference/anakin/convert/test_sum_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_sum_op.cc @@ -22,10 +22,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(sum, native) { +template +static void test_sum_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("sum_x1", {1, 2, 1, 2}); validator.DeclInputVar("sum_x2", {1, 2, 1, 2}); validator.DeclOutputVar("sum_out", {1, 2, 1, 2}); @@ -40,9 +42,26 @@ TEST(sum, native) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(sum_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_sum_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(sum_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_sum_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(sum); +USE_CPU_ANAKIN_CONVERTER(sum); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(sum); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_transpose_op.cc b/paddle/fluid/inference/anakin/convert/test_transpose_op.cc index 016ed26f02f782..6b2f1ed1566d5c 100644 --- a/paddle/fluid/inference/anakin/convert/test_transpose_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_transpose_op.cc @@ -20,12 +20,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(transpose_op, test) { - auto* converter = Registry::Global().Lookup("transpose"); - ASSERT_TRUE(converter != nullptr); +template +void test_transpose1_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("transpose-X", {2, 3, 4, 5}); validator.DeclOutputVar("transpose-Out", {4, 2, 5, 3}); @@ -43,11 +43,12 @@ TEST(transpose_op, test) { validator.Execute(3); } -// test input shape's dims < 4 -TEST(transpose_op, test2) { +template +void test_transpose2_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator(parameters, &scope, context, + use_gpu); validator.DeclInputVar("transpose-X", {3, 4, 5}); validator.DeclOutputVar("transpose-Out", {3, 5, 4}); @@ -65,9 +66,38 @@ TEST(transpose_op, test2) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(transpose1_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_transpose1_op<::anakin::saber::NV>(ctx, true); +} + +TEST(transpose2_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_transpose2_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(transpose1_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_transpose2_op<::anakin::saber::X86>(ctx, false); +} + +TEST(transpose2_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_transpose2_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(transpose); +USE_CPU_ANAKIN_CONVERTER(transpose); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(transpose); +#endif diff --git a/paddle/fluid/inference/anakin/convert/transpose.cc b/paddle/fluid/inference/anakin/convert/transpose.cc index f35372fe5c315e..cffc526065f8c8 100644 --- a/paddle/fluid/inference/anakin/convert/transpose.cc +++ b/paddle/fluid/inference/anakin/convert/transpose.cc @@ -17,20 +17,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void TransposeOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void TransposeOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -38,7 +34,7 @@ void TransposeOpConverter::operator()(const framework::proto::OpDesc &op, auto input = op_desc.Input("X").front(); auto output = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Permute", {input}, {output}); + this->engine_->AddOp(op_name, "Permute", {input}, {output}); auto axis = boost::get>(op_desc.GetAttr("axis")); size_t axis_size = axis.size(); @@ -46,11 +42,17 @@ void TransposeOpConverter::operator()(const framework::proto::OpDesc &op, axis.push_back(axis_size); axis_size += 1; } - engine_->AddOpAttr>(op_name, "dims", axis); + this->engine_->template AddOpAttr>(op_name, "dims", axis); } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_ANAKIN_OP_CONVERTER(transpose, TransposeOpConverter); +#ifdef PADDLE_WITH_CUDA +REGISTER_CUDA_ANAKIN_OP_CONVERTER(transpose, + TransposeOpConverter<::anakin::saber::NV>); +#endif + +REGISTER_CPU_ANAKIN_OP_CONVERTER(transpose, + TransposeOpConverter<::anakin::saber::X86>); diff --git a/paddle/fluid/inference/anakin/convert/transpose.h b/paddle/fluid/inference/anakin/convert/transpose.h index bacbf152bc1231..54090468ae13c6 100644 --- a/paddle/fluid/inference/anakin/convert/transpose.h +++ b/paddle/fluid/inference/anakin/convert/transpose.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class TransposeOpConverter : public AnakinOpConverter { +template +class TransposeOpConverter : public AnakinOpConverter { public: TransposeOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/ut_helper.h b/paddle/fluid/inference/anakin/convert/ut_helper.h index a931efbcf4adf6..140a33a7cbb6fe 100644 --- a/paddle/fluid/inference/anakin/convert/ut_helper.h +++ b/paddle/fluid/inference/anakin/convert/ut_helper.h @@ -32,14 +32,8 @@ limitations under the License. */ #include "paddle/fluid/inference/utils/singleton.h" #include "paddle/fluid/platform/enforce.h" -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; using anakin::Precision; -using anakin::saber::NV; using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; namespace paddle { namespace inference { @@ -55,8 +49,8 @@ float random(float low, float high) { return dist(mt); } -void RandomizeTensor(framework::LoDTensor* tensor, const platform::Place& place, - const platform::DeviceContext& ctx) { +void RandomizeTensor(framework::LoDTensor* tensor, + const platform::Place& place) { auto dims = tensor->dims(); size_t num_elements = analysis::AccuDims(dims, dims.size()); PADDLE_ENFORCE_GT(num_elements, 0); @@ -78,17 +72,19 @@ void RandomizeTensor(framework::LoDTensor* tensor, const platform::Place& place, * anakin * layer. */ +template class AnakinConvertValidation { - using AnakinNvEngineT = AnakinEngine; + using AnakinNvEngineT = AnakinEngine; public: AnakinConvertValidation() = delete; AnakinConvertValidation(const std::unordered_set& parameters, - framework::Scope* scope) - : parameters_(parameters), scope_(scope), place_(0) { - PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0); - engine_.reset(new AnakinEngine(true)); + framework::Scope* scope, + const platform::DeviceContext& ctx, + bool use_gpu = true) + : parameters_(parameters), scope_(scope), ctx_(ctx), use_gpu_(use_gpu) { + engine_.reset(new AnakinEngine(true)); } // Declare a Variable as input with random initialization. @@ -108,11 +104,10 @@ class AnakinConvertValidation { } void DeclVar(const std::string& name, const std::vector dim_vec) { - platform::CUDADeviceContext ctx(place_); auto* x = scope_->Var(name); auto* x_tensor = x->GetMutable(); x_tensor->Resize(framework::make_ddim(dim_vec)); - RandomizeTensor(x_tensor, place_, ctx); + RandomizeTensor(x_tensor, ctx_.GetPlace()); std::vector dim_vec_int64; for (auto& ele : dim_vec) { @@ -132,7 +127,7 @@ class AnakinConvertValidation { // should init anakin engine here. auto& block_desc = program_desc_.Block(framework::kRootBlockIndex); - Singleton::Global().ConvertOp( + Singleton>::Global().ConvertOp( desc, block_desc, parameters_, *scope_, engine_.get(), true /*test_mode*/); engine_->Freeze(); @@ -160,11 +155,8 @@ class AnakinConvertValidation { void Execute(int batch_size, std::unordered_set neglected_output = {}) { // Execute Fluid Op - platform::CUDADeviceContext ctx(place_); - op_->Run(*scope_, place_); + op_->Run(*scope_, ctx_.GetPlace()); - // std::vector input_vector; - // std::vector output_vector; std::map inputs; for (const auto& input : op_desc_->InputArgumentNames()) { if (parameters_.count(input)) continue; @@ -180,20 +172,27 @@ class AnakinConvertValidation { std::vector fluid_out; auto* var = scope_->FindVar(output); auto tensor = var->GetMutable(); - framework::TensorToVector(*tensor, ctx, &fluid_out); + framework::TensorToVector(*tensor, ctx_, &fluid_out); fluid_outputs.push_back(fluid_out); outputs.insert({output, tensor}); } - engine_->Execute(inputs, outputs, stream_); + if (!use_gpu_) { + engine_->Execute(inputs, outputs); + } else { + cudaStream_t stream; + PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream), 0); + engine_->Execute(inputs, outputs, stream); + } + int i_output = 0; for (const auto& output : op_desc_->OutputArgumentNames()) { if (neglected_output.count(output)) continue; std::vector anakin_out; auto* var = scope_->FindVar(output); auto tensor = var->GetMutable(); - framework::TensorToVector(*tensor, ctx, &anakin_out); + framework::TensorToVector(*tensor, ctx_, &anakin_out); size_t anakin_out_size = anakin_out.size(); auto fluid_out = fluid_outputs[i_output++]; @@ -205,15 +204,17 @@ class AnakinConvertValidation { private: std::unique_ptr engine_{nullptr}; - cudaStream_t stream_; std::unique_ptr op_; std::unique_ptr op_desc_; framework::ProgramDesc program_desc_; const std::unordered_set& parameters_; framework::Scope* scope_; - platform::CUDAPlace place_; + const platform::DeviceContext& ctx_; + bool use_gpu_{true}; }; +template class AnakinConvertValidation<::anakin::saber::NV>; +template class AnakinConvertValidation<::anakin::saber::X86>; } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/engine.cc b/paddle/fluid/inference/anakin/engine.cc index 2b85d266cf0d51..17e661222433bb 100644 --- a/paddle/fluid/inference/anakin/engine.cc +++ b/paddle/fluid/inference/anakin/engine.cc @@ -69,11 +69,11 @@ void AnakinEngine::AddOp( } template -void AnakinEngine::Execute( - const std::map &inputs, - const std::map &outputs, - cudaStream_t stream) { +void AnakinEngine::BindInput( + const std::map &inputs) { +#ifdef PADDLE_WITH_CUDA cudaDeviceSynchronize(); +#endif for (const auto &input : inputs) { auto *tensor = input.second; auto *data = tensor->data(); @@ -105,6 +105,35 @@ void AnakinEngine::Execute( fluid_input_shape); anakin_input->copy_from(tmp_anakin_tensor); } +} + +template +void AnakinEngine::Execute( + const std::map &inputs, + const std::map &outputs) { + BindInput(inputs); + net_->prediction(); + for (const auto &output : outputs) { + platform::CPUPlace cpu_place; + auto *tensor = output.second; + auto *anakin_output = net_->get_out(output.first); + auto *anakin_data = anakin_output->data(); + auto anakin_output_shape = anakin_output->valid_shape(); + tensor->Resize(framework::make_ddim(anakin_output_shape)); + auto *fluid_data = tensor->mutable_data(cpu_place); + memory::Copy(cpu_place, static_cast(fluid_data), cpu_place, + static_cast(anakin_data), + tensor->numel() * sizeof(float)); + } +} + +#ifdef PADDLE_WITH_CUDA +template +void AnakinEngine::Execute( + const std::map &inputs, + const std::map &outputs, + cudaStream_t stream) { + BindInput(inputs); net_->prediction(); cudaDeviceSynchronize(); for (const auto &output : outputs) { @@ -121,6 +150,7 @@ void AnakinEngine::Execute( } cudaDeviceSynchronize(); } +#endif template void AnakinEngine::Freeze() { @@ -140,7 +170,15 @@ AnakinEngine::Clone() { return std::unique_ptr(engine); } +#ifdef PADDLE_WITH_CUDA template class AnakinEngine<::anakin::saber::NV, ::anakin::Precision::FP32>; +template class AnakinEngineManager<::anakin::saber::NV>; +#endif + +template class AnakinEngine<::anakin::saber::X86, ::anakin::Precision::FP32>; +template class AnakinEngineManager<::anakin::saber::X86>; + +// template class AnakinEngine<::anakin::saber::X86, ::anakin::Precision::FP32>; } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/engine.h b/paddle/fluid/inference/anakin/engine.h index 1325306557f2e7..215c8a6c6146a2 100644 --- a/paddle/fluid/inference/anakin/engine.h +++ b/paddle/fluid/inference/anakin/engine.h @@ -32,7 +32,6 @@ #include "saber/saber_types.h" using anakin::Precision; -using anakin::saber::NV; namespace anakin { @@ -94,9 +93,16 @@ class AnakinEngine { void Save(std::string path) { graph_->save(path); } bool IsInit() { return initialized_; } int GetDevice() { return device_; } + void Execute(const std::map &inputs, + const std::map &outputs); +#ifdef PADDLE_WITH_CUDA void Execute(const std::map &inputs, const std::map &outputs, cudaStream_t stream); +#endif + + private: + void BindInput(const std::map &inputs); private: bool initialized_{false}; @@ -108,24 +114,25 @@ class AnakinEngine { std::vector program_inputs_; }; +template class AnakinEngineManager { - using AnakinNvEngineT = AnakinEngine; + using AnakinEngineT = AnakinEngine; public: bool HasEngine(const std::string &name) const { if (engines_.count(name) == 0) return false; return engines_.at(name).get() != nullptr; } - AnakinNvEngineT *Get(const std::string &name) const { + AnakinEngineT *Get(const std::string &name) const { return engines_.at(name).get(); } - AnakinNvEngineT *Create( - bool need_summary, int device, int max_batch_size, - std::map> max_input_shape, - std::vector program_inputs, std::string engine_name) { + AnakinEngineT *Create(bool need_summary, int device, int max_batch_size, + std::map> max_input_shape, + std::vector program_inputs, + std::string engine_name) { std::unique_lock lk(mut_); - auto *p = new AnakinEngine( + auto *p = new AnakinEngine( need_summary, device, max_batch_size, max_input_shape, program_inputs); engines_[engine_name].reset(p); return p; @@ -138,7 +145,7 @@ class AnakinEngineManager { } private: - std::unordered_map> engines_; + std::unordered_map> engines_; std::mutex mut_; }; } // namespace anakin diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index a736ca393ccb71..37b7583fde29cc 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -64,20 +64,20 @@ struct Argument { bool Has(const std::string& key) const { return valid_fields_.count(key); } -#define DECL_ARGUMENT_FIELD(field__, Field, type__) \ - public: \ - type__& field__() { \ - PADDLE_ENFORCE(Has(#field__)); \ - return field__##_; \ - } \ - void Set##Field(const type__& x) { \ - field__##_ = x; \ - valid_fields_.insert(#field__); \ - } \ - DECL_ARGUMENT_FIELD_VALID(field__); \ - type__* field__##_ptr() { return &field__##_; } \ - \ - private: \ +#define DECL_ARGUMENT_FIELD(field__, Field, type__) \ + public: \ + type__& field__() { \ + PADDLE_ENFORCE(Has(#field__), "There is no such field"); \ + return field__##_; \ + } \ + void Set##Field(const type__& x) { \ + field__##_ = x; \ + valid_fields_.insert(#field__); \ + } \ + DECL_ARGUMENT_FIELD_VALID(field__); \ + type__* field__##_ptr() { return &field__##_; } \ + \ + private: \ type__ field__##_; #define DECL_ARGUMENT_FIELD_VALID(field__) \ diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 78e502c670f0eb..bbc3938969a6d4 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -114,6 +114,7 @@ void IRPassManager::CreatePasses(Argument *argument, if (pass_name == "anakin_subgraph_pass") { pass->Set("program", new framework::ProgramDesc *(&argument->main_program())); + pass->Set("use_gpu", new bool(argument->use_gpu())); pass->Set("gpu_device_id", new int(argument->gpu_device_id())); pass->Set("model_from_memory", new bool(argument->model_from_memory())); pass->Set("engine_opt_info", new std::map( diff --git a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc index cbf883a8a5ff1f..658006c22cd842 100644 --- a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc @@ -194,20 +194,49 @@ void AnakinSubgraphPass::CreateAnakinOp( auto max_batch_size = Get("max_batch_size"); auto program_inputs = program_desc->GetFeedTargetNames(); - auto *anakin_engine = - inference::Singleton::Global().Create( - true, Get("gpu_device_id"), max_batch_size, max_input_shape, - program_inputs, engine_key); + bool use_gpu = Get("use_gpu"); + SetAttr(op_desc->Proto(), "use_gpu", use_gpu); + + if (use_gpu) { +#ifdef PADDLE_WITH_CUDA + inference::Singleton< + anakin::AnakinEngineManager<::anakin::saber::NV>>::Global() + .Create(true, Get("gpu_device_id"), max_batch_size, + max_input_shape, program_inputs, engine_key); +#endif + } else { + inference::Singleton< + anakin::AnakinEngineManager<::anakin::saber::X86>>::Global() + .Create(true, Get("gpu_device_id"), max_batch_size, + max_input_shape, program_inputs, engine_key); + } auto *scope = param_scope(); std::unordered_set param_set(params.begin(), params.end()); framework::BlockDesc block_desc_temp(nullptr, block_desc.Proto()); - - inference::Singleton::Global() - .ConvertBlockToAnakinEngine( - &block_desc_temp, scope, - std::vector(input_names.begin(), input_names.end()), - param_set, output_mapping, anakin_engine); + if (use_gpu) { + auto *anakin_engine = + inference::Singleton>::Global() + .Get(engine_key); + inference::Singleton< + inference::anakin::AnakinOpConverter<::anakin::saber::NV>>::Global() + .ConvertBlockToAnakinEngine( + &block_desc_temp, scope, + std::vector(input_names.begin(), input_names.end()), + param_set, output_mapping, anakin_engine); + } else { + auto *anakin_engine = + inference::Singleton>::Global() + .Get(engine_key); + inference::Singleton< + inference::anakin::AnakinOpConverter<::anakin::saber::X86>>::Global() + .ConvertBlockToAnakinEngine( + &block_desc_temp, scope, + std::vector(input_names.begin(), input_names.end()), + param_set, output_mapping, anakin_engine); + } } } // namespace analysis diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 9c80b7a839a6bf..882bb3468388e7 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -70,4 +70,3 @@ if (WITH_ANAKIN AND WITH_MKL) # only needed in CI anakin_target(inference_anakin_api) anakin_target(inference_anakin_api_shared) endif() -inference_analysis_test(faster_rcnn_test SRCS faster_rcnn_test.cc EXTRA_DEPS paddle_fluid) diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 0109b4a4fa7617..4f9e0b639564a3 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -268,9 +268,11 @@ void AnalysisConfig::Update() { PADDLE_ENFORCE(!use_tensorrt_, "Anakin sub-graph and TensorRT sub-graph are not allowed to " "run at the same time!"); - PADDLE_ENFORCE( - use_gpu_, - "Anakin sub-graph engine need gpu, please use the EnableGpu API."); + if (use_gpu_) { + LOG(INFO) << "Run Anakin GPU mode"; + } else { + LOG(INFO) << "Run Anakin CPU mode"; + } pass_builder()->ClearPasses(); for (const auto &pass : kAnakinSubgraphPasses) { diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index e5991af4f7bfe5..231beab641a9db 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -382,7 +382,7 @@ void AnalysisPredictor::PrepareArgument() { argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_); } - if (config_.use_gpu() && config_.anakin_engine_enabled()) { + if (config_.anakin_engine_enabled()) { argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_); argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_); argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_); diff --git a/paddle/fluid/operators/anakin/anakin_engine_op.h b/paddle/fluid/operators/anakin/anakin_engine_op.h index e4feb14b2271a5..99c5a6dc84a094 100644 --- a/paddle/fluid/operators/anakin/anakin_engine_op.h +++ b/paddle/fluid/operators/anakin/anakin_engine_op.h @@ -34,28 +34,16 @@ limitations under the License. */ namespace paddle { namespace operators { -using FluidDT = framework::proto::VarType_Type; using inference::Singleton; - -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; using inference::anakin::AnakinEngine; class AnakinEngineOp : public framework::OperatorBase { - using AnakinNvEngineT = AnakinEngine; - private: std::vector input_names_; std::unordered_set param_names_; - mutable AnakinNvEngineT *anakin_engine_; std::string engine_key_; std::string engine_serialized_data_; + bool use_gpu_; public: AnakinEngineOp(const std::string &type, @@ -66,10 +54,10 @@ class AnakinEngineOp : public framework::OperatorBase { input_names_ = Inputs("Xs"); engine_key_ = Attr("engine_key"); auto params = Attr>("parameters"); + use_gpu_ = Attr("use_gpu"); for (const auto ¶m : params) { param_names_.insert(param); } - anakin_engine_ = nullptr; } protected: @@ -80,7 +68,6 @@ class AnakinEngineOp : public framework::OperatorBase { void RunAnakin(const framework::Scope &scope, const platform::Place &dev_place) const { - auto *engine = GetEngine(scope, dev_place); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); auto stream = @@ -92,7 +79,6 @@ class AnakinEngineOp : public framework::OperatorBase { Attr>("output_name_mapping"); std::map inputs; - // Convert input tensor from fluid to engine. for (const auto &x : Inputs("Xs")) { if (param_names_.count(x)) continue; auto &t = @@ -110,17 +96,21 @@ class AnakinEngineOp : public framework::OperatorBase { outputs.insert({output_maps[output_index], fluid_t}); output_index += 1; } - engine->Execute(inputs, outputs, stream); - } - - AnakinNvEngineT *GetEngine(const framework::Scope &scope, - const platform::Place &dev_place) const { - if (anakin_engine_ == nullptr) { - anakin_engine_ = - inference::Singleton::Global() + if (use_gpu_) { +#ifdef PADDLE_WITH_CUDA + auto *engine = + inference::Singleton>::Global() + .Get(engine_key_); + engine->Execute(inputs, outputs, stream); +#endif + } else { + auto *engine = + inference::Singleton>::Global() .Get(engine_key_); + engine->Execute(inputs, outputs); } - return anakin_engine_; } }; From e14ab180fe76b97aa33c0089f98d1cfa771905e9 Mon Sep 17 00:00:00 2001 From: nhzlx Date: Thu, 11 Apr 2019 17:07:32 +0000 Subject: [PATCH 26/27] Cherry-pick from 1662, 16797.. : add anakin int8 support --- paddle/fluid/framework/ir/fc_fuse_pass.cc | 3 +- .../framework/ir/graph_pattern_detector.cc | 25 ++-- .../framework/ir/graph_pattern_detector.h | 3 +- .../ir/quant_conv2d_dequant_fuse_pass.cc | 28 ++-- .../inference/anakin/convert/CMakeLists.txt | 7 +- .../inference/anakin/convert/activation.cc | 49 ++++-- .../inference/anakin/convert/activation.h | 17 ++- .../anakin/convert/affine_channel.cc | 79 +++------- .../inference/anakin/convert/affine_channel.h | 4 +- .../inference/anakin/convert/batch_norm.cc | 106 +++++-------- .../inference/anakin/convert/batch_norm.h | 4 +- .../fluid/inference/anakin/convert/concat.cc | 25 +++- .../fluid/inference/anakin/convert/concat.h | 4 +- .../fluid/inference/anakin/convert/conv2d.cc | 79 +++++++--- .../fluid/inference/anakin/convert/conv2d.h | 4 +- .../inference/anakin/convert/conv2d_fusion.cc | 111 +++++++------- .../inference/anakin/convert/conv2d_fusion.h | 4 +- .../anakin/convert/density_prior_box.cc | 31 ++-- .../anakin/convert/density_prior_box.h | 5 +- .../inference/anakin/convert/detection_out.cc | 25 +++- .../inference/anakin/convert/detection_out.h | 4 +- .../fluid/inference/anakin/convert/dropout.cc | 37 +++-- .../fluid/inference/anakin/convert/dropout.h | 4 +- .../inference/anakin/convert/elementwise.cc | 46 ++++-- .../inference/anakin/convert/elementwise.h | 10 +- paddle/fluid/inference/anakin/convert/fc.cc | 140 +++++++++++------- paddle/fluid/inference/anakin/convert/fc.h | 12 +- .../fluid/inference/anakin/convert/flatten.cc | 25 +++- .../fluid/inference/anakin/convert/flatten.h | 4 +- .../fluid/inference/anakin/convert/helper.cc | 32 ++++ .../fluid/inference/anakin/convert/helper.h | 88 +++++++++++ .../inference/anakin/convert/im2sequence.cc | 21 ++- .../inference/anakin/convert/im2sequence.h | 4 +- .../inference/anakin/convert/op_converter.h | 81 +++++++--- .../fluid/inference/anakin/convert/pool2d.cc | 25 +++- .../fluid/inference/anakin/convert/pool2d.h | 4 +- paddle/fluid/inference/anakin/convert/relu.cc | 45 ++++-- paddle/fluid/inference/anakin/convert/relu.h | 8 +- .../fluid/inference/anakin/convert/reshape.cc | 24 ++- .../fluid/inference/anakin/convert/reshape.h | 4 +- .../inference/anakin/convert/roi_align.cc | 30 ++-- .../inference/anakin/convert/roi_align.h | 4 +- .../fluid/inference/anakin/convert/scale.cc | 24 ++- paddle/fluid/inference/anakin/convert/scale.h | 4 +- .../fluid/inference/anakin/convert/softmax.cc | 25 +++- .../fluid/inference/anakin/convert/softmax.h | 4 +- .../fluid/inference/anakin/convert/split.cc | 23 ++- paddle/fluid/inference/anakin/convert/split.h | 4 +- paddle/fluid/inference/anakin/convert/sum.cc | 28 +++- paddle/fluid/inference/anakin/convert/sum.h | 4 +- .../anakin/convert/test_activation_op.cc | 6 +- .../anakin/convert/test_affine_channel_op.cc | 4 +- .../anakin/convert/test_batch_norm_op.cc | 4 +- .../anakin/convert/test_concat_op.cc | 4 +- .../anakin/convert/test_conv2d_op.cc | 4 +- .../anakin/convert/test_dropout_op.cc | 4 +- .../anakin/convert/test_elementwise_op.cc | 4 +- .../inference/anakin/convert/test_fc_op.cc | 4 +- .../anakin/convert/test_flatten_op.cc | 4 +- .../anakin/convert/test_pool2d_op.cc | 4 +- .../inference/anakin/convert/test_relu_op.cc | 18 +-- .../anakin/convert/test_reshape_op.cc | 8 +- .../anakin/convert/test_softmax_op.cc | 4 +- .../inference/anakin/convert/test_split_op.cc | 4 +- .../inference/anakin/convert/test_sum_op.cc | 4 +- .../anakin/convert/test_transpose_op.cc | 8 +- .../inference/anakin/convert/transpose.cc | 20 ++- .../inference/anakin/convert/transpose.h | 4 +- .../inference/anakin/convert/ut_helper.h | 21 ++- paddle/fluid/inference/anakin/engine.cc | 13 +- paddle/fluid/inference/anakin/engine.h | 13 +- paddle/fluid/inference/analysis/argument.h | 6 + .../inference/analysis/ir_pass_manager.cc | 5 + .../ir_passes/anakin_subgraph_pass.cc | 54 +++++-- .../analysis/ir_passes/anakin_subgraph_pass.h | 8 + paddle/fluid/inference/api/analysis_config.cc | 15 +- .../fluid/inference/api/analysis_predictor.cc | 3 + .../inference/api/paddle_analysis_config.h | 7 +- .../inference/api/paddle_pass_builder.cc | 16 +- .../fluid/operators/anakin/anakin_engine_op.h | 28 +++- paddle/fluid/pybind/inference_api.cc | 10 +- 81 files changed, 1103 insertions(+), 589 deletions(-) create mode 100644 paddle/fluid/inference/anakin/convert/helper.cc create mode 100644 paddle/fluid/inference/anakin/convert/helper.h diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc index ca008763bff8ff..a5488eaa1b6203 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc @@ -48,8 +48,9 @@ void FCFusePass::ApplyImpl(ir::Graph* graph) const { GET_IR_NODE_FROM_SUBGRAPH(elementwise_add, elementwise_add, fc_pattern); GET_IR_NODE_FROM_SUBGRAPH(mul_out, mul_out, fc_pattern); + auto base_op_desc = *mul->Op()->Proto(); // Create an FC Node. - OpDesc desc; + OpDesc desc(base_op_desc, nullptr); std::string fc_x_in = subgraph.at(x)->Name(); std::string fc_Y_in = w->Name(); std::string fc_bias_in = fc_bias->Name(); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 8468f9ccc12a01..77f50e914b668e 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1640,7 +1640,8 @@ PDNode *patterns::FillConstantElementWiseMulFuse::operator()( void patterns::QuantDequantOpFuse::operator()(PDNode *quant_op_input, const std::string &op_type, const std::string &weight_name, - int times) { + int times, + const std::string &quant_type) { const int kNumFields = 5; const int kQuantizedWeightOffset = 0; const int kQuantizedOpOffset = 1; @@ -1648,24 +1649,22 @@ void patterns::QuantDequantOpFuse::operator()(PDNode *quant_op_input, const int kDequantOpOffset = 3; const int kDequantOpOutOffset = 4; // the quant op always be one. - auto quant_op_in_scale = - pattern->NewNode(GetNodeName("quant_op_in_scale")) - ->assert_is_op_input("fake_quantize_range_abs_max", "InScale") - ->AsInput(); - auto quant_op = pattern->NewNode(GetNodeName("quant_op")) - ->assert_is_op("fake_quantize_range_abs_max"); + auto quant_op_in_scale = pattern->NewNode(GetNodeName("quant_op_in_scale")) + ->assert_is_op_input(quant_type, "InScale") + ->AsInput(); + auto quant_op = + pattern->NewNode(GetNodeName("quant_op"))->assert_is_op(quant_type); auto quant_op_out_scale = pattern->NewNode(GetNodeName("quant_op_out_scale")) - ->assert_is_op_output("fake_quantize_range_abs_max", "OutScale") + ->assert_is_op_output(quant_type, "OutScale") ->assert_is_op_input("fake_dequantize_max_abs", "Scale") ->AsIntermediate(); - auto quant_op_out = - pattern->NewNode(GetNodeName("quant_op_out")) - ->assert_is_op_output("fake_quantize_range_abs_max", "Out") - ->assert_is_op_input(op_type) - ->AsIntermediate(); + auto quant_op_out = pattern->NewNode(GetNodeName("quant_op_out")) + ->assert_is_op_output(quant_type, "Out") + ->assert_is_op_input(op_type) + ->AsIntermediate(); // there are 'times' quantized and dequant op std::vector nodes; diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index a5ac3a0c3733cf..525987e0072cb0 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -880,7 +880,8 @@ struct QuantDequantOpFuse : public PatternBase { : PatternBase(pattern, name_scope, "quant_dequant_fuse") {} void operator()(PDNode* quant_op_input, const std::string& op_name, - const std::string& weight_name, int times = 1); + const std::string& weight_name, int times, + const std::string& quant_type); std::string GetNodeName(const std::string& op_type) { return PDNodeName(name_scope_, repr_, id_, op_type); diff --git a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc index 7cab9c353d35cb..017e3ef234c95d 100644 --- a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc +++ b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc @@ -25,7 +25,8 @@ namespace framework { namespace ir { void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, - std::string op_type) { + const std::string& op_type, + const std::string& quant_type) { const std::string pattern_name = "quant_dequant_fuse"; // FusePassBase::Init(pattern_name, graph); const int kNumFields = 5; @@ -38,7 +39,7 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, GraphPatternDetector gpd; auto* x = gpd.mutable_pattern() ->NewNode("x") - ->assert_is_op_input("fake_quantize_range_abs_max", "X") + ->assert_is_op_input(quant_type, "X") ->AsInput(); std::string quantized_op_type = ""; @@ -46,6 +47,9 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, if (op_type == "conv2d") { quantized_op_type = "conv2d"; weight_name = "Filter"; + } else if (op_type == "depthwise_conv2d") { + quantized_op_type = "depthwise_conv2d"; + weight_name = "Filter"; } else if (op_type == "conv2d_fusion") { quantized_op_type = "conv2d_fusion"; weight_name = "Filter"; @@ -62,7 +66,7 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, } patterns::QuantDequantOpFuse pattern(gpd.mutable_pattern(), pattern_name); - pattern(x, quantized_op_type, weight_name, times); + pattern(x, quantized_op_type, weight_name, times, quant_type); auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { @@ -103,7 +107,6 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, std::unordered_set delete_nodes; for (int i = 0; i < times; i++) { - // max_range = (range * range) / weight_scale float max_range = boost::get( nodes[i * kNumFields + kDequantOpOffset]->Op()->GetAttr("max_range")); float weight_scale = (range * range) / max_range; @@ -118,7 +121,8 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, new_op_desc.SetType(quantized_op_type); if (quantized_op_type == "conv2d" || - quantized_op_type == "conv2d_fusion") { + quantized_op_type == "conv2d_fusion" || + quantized_op_type == "depthwise_conv2d") { new_op_desc.SetInput("Input", {new_input}); new_op_desc.SetOutput("Output", {new_output}); } else if (quantized_op_type == "fc") { @@ -156,11 +160,17 @@ void QuantDequantFusePass::ApplyImpl(ir::Graph* graph) const { const std::string pattern_name = "quant_dequant_fuse"; FusePassBase::Init(pattern_name, graph); - std::unordered_set quantized_op_types = {"conv2d", "mul"}; + std::unordered_set quant_types = { + "fake_quantize_range_abs_max", "fake_quantize_moving_average_abs_max"}; + + std::unordered_set quantized_op_types = {"conv2d", "mul", + "depthwise_conv2d"}; auto* scope = param_scope(); - for (auto& op_type : quantized_op_types) { - for (int i = 1; i <= 6; i++) { - RunQuantDequant(graph, scope, i, op_type); + for (auto& quant_type : quant_types) { + for (auto& op_type : quantized_op_types) { + for (int i = 6; i >= 1; i--) { + RunQuantDequant(graph, scope, i, op_type, quant_type); + } } } } diff --git a/paddle/fluid/inference/anakin/convert/CMakeLists.txt b/paddle/fluid/inference/anakin/convert/CMakeLists.txt index 7cc75de8ee651e..6546d3b855fbc1 100644 --- a/paddle/fluid/inference/anakin/convert/CMakeLists.txt +++ b/paddle/fluid/inference/anakin/convert/CMakeLists.txt @@ -1,4 +1,9 @@ -cc_library(anakin_op_converter SRCS fc.cc conv2d.cc conv2d_fusion.cc elementwise.cc activation.cc pool2d.cc concat.cc split.cc relu.cc softmax.cc batch_norm.cc reshape.cc flatten.cc transpose.cc density_prior_box.cc detection_out.cc scale.cc dropout.cc im2sequence.cc sum.cc affine_channel.cc roi_align.cc DEPS anakin_engine framework_proto scope op_registry) +cc_library(anakin_op_converter SRCS fc.cc conv2d.cc conv2d_fusion.cc +elementwise.cc activation.cc pool2d.cc concat.cc split.cc relu.cc softmax.cc +batch_norm.cc reshape.cc flatten.cc transpose.cc density_prior_box.cc +detection_out.cc scale.cc dropout.cc im2sequence.cc sum.cc affine_channel.cc +roi_align.cc helper.cc DEPS anakin_engine framework_proto scope op_registry +gtest) cc_test(test_anakin_fc SRCS test_fc_op.cc DEPS anakin_op_converter mul_op SERIAL) cc_test(test_anakin_conv2d SRCS test_conv2d_op.cc DEPS anakin_op_converter conv_op im2col vol2col depthwise_conv SERIAL) diff --git a/paddle/fluid/inference/anakin/convert/activation.cc b/paddle/fluid/inference/anakin/convert/activation.cc index 11f92c95217b37..6e52357483d754 100644 --- a/paddle/fluid/inference/anakin/convert/activation.cc +++ b/paddle/fluid/inference/anakin/convert/activation.cc @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -ActivationOpConverter::ActivationOpConverter( +template +ActivationOpConverter::ActivationOpConverter( const std::string &op_type) : op_type_(op_type) { auto it = anakin_op_types_.find(op_type_); @@ -30,8 +30,8 @@ ActivationOpConverter::ActivationOpConverter( anakin_op_type_ = it->second; } -template -void ActivationOpConverter::operator()( +template +void ActivationOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -50,11 +50,40 @@ void ActivationOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(sigmoid, - SigmoidOpConverter<::anakin::saber::NV>); -REGISTER_CUDA_ANAKIN_OP_CONVERTER(tanh, TanhOpConverter<::anakin::saber::NV>); +using sigmoid_nv_fp32 = + ::paddle::inference::anakin::SigmoidOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using sigmoid_nv_int8 = + ::paddle::inference::anakin::SigmoidOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +using tanh_nv_fp32 = + ::paddle::inference::anakin::TanhOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using tanh_nv_int8 = + ::paddle::inference::anakin::TanhOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; + +REGISTER_CUDA_ANAKIN_OP_CONVERTER(sigmoid, sigmoid_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(sigmoid, sigmoid_nv_int8); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(tanh, tanh_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(tanh, tanh_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(sigmoid, - SigmoidOpConverter<::anakin::saber::X86>); -REGISTER_CPU_ANAKIN_OP_CONVERTER(tanh, TanhOpConverter<::anakin::saber::X86>); +using sigmoid_cpu_fp32 = + ::paddle::inference::anakin::SigmoidOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using sigmoid_cpu_int8 = + ::paddle::inference::anakin::SigmoidOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +using tanh_cpu_fp32 = + ::paddle::inference::anakin::TanhOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using tanh_cpu_int8 = + ::paddle::inference::anakin::TanhOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; + +REGISTER_CPU_ANAKIN_OP_CONVERTER(sigmoid, sigmoid_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(sigmoid, sigmoid_cpu_int8); + +REGISTER_CPU_ANAKIN_OP_CONVERTER(tanh, tanh_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(tanh, tanh_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/activation.h b/paddle/fluid/inference/anakin/convert/activation.h index b3fe4748641cf0..021ec4c7fdf3e9 100644 --- a/paddle/fluid/inference/anakin/convert/activation.h +++ b/paddle/fluid/inference/anakin/convert/activation.h @@ -22,8 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class ActivationOpConverter : public AnakinOpConverter { +template +class ActivationOpConverter : public AnakinOpConverter { public: explicit ActivationOpConverter(const std::string &op_type); @@ -40,16 +40,17 @@ class ActivationOpConverter : public AnakinOpConverter { {"sigmoid", "Sigmoid"}}; }; -template -class TanhOpConverter : public ActivationOpConverter { +template +class TanhOpConverter : public ActivationOpConverter { public: - TanhOpConverter() : ActivationOpConverter("tanh") {} + TanhOpConverter() : ActivationOpConverter("tanh") {} }; -template -class SigmoidOpConverter : public ActivationOpConverter { +template +class SigmoidOpConverter : public ActivationOpConverter { public: - SigmoidOpConverter() : ActivationOpConverter("sigmoid") {} + SigmoidOpConverter() + : ActivationOpConverter("sigmoid") {} }; } // namespace anakin } // namespace inference diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.cc b/paddle/fluid/inference/anakin/convert/affine_channel.cc index 6bf913e7ffbc02..074c1b26ba8913 100644 --- a/paddle/fluid/inference/anakin/convert/affine_channel.cc +++ b/paddle/fluid/inference/anakin/convert/affine_channel.cc @@ -16,18 +16,14 @@ #include #include #include - -using anakin::graph::GraphGlobalMem; -using anakin::PTuple; -using anakin::AK_FLOAT; -using anakin::saber::Shape; +#include "paddle/fluid/inference/anakin/convert/helper.h" namespace paddle { namespace inference { namespace anakin { -template -void AffineChannelOpConverter::operator()( +template +void AffineChannelOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -35,60 +31,20 @@ void AffineChannelOpConverter::operator()( PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - auto input_name = op_desc.Input("X").front(); auto output_name = op_desc.Output("Out").front(); + this->engine_->AddOp(op_name, "AffineChannel", {input_name}, {output_name}); // Copy the Scale to CPUPlace and get the pointer. auto *scale_v = scope.FindVar(op_desc.Input("Scale").front()); PADDLE_ENFORCE_NOT_NULL(scale_v); - auto *scale_t = scale_v->GetMutable(); - std::unique_ptr scale_tensor( - new framework::LoDTensor()); - scale_tensor->Resize(scale_t->dims()); - TensorCopySync((*scale_t), platform::CPUPlace(), scale_tensor.get()); + auto weight1 = pblock_from_var(*scale_v); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); // Copy the Bias to CPUPlace and get the pointer. auto *bias_v = scope.FindVar(op_desc.Input("Bias").front()); PADDLE_ENFORCE_NOT_NULL(bias_v); - auto *bias_t = bias_v->GetMutable(); - std::unique_ptr bias_tensor(new framework::LoDTensor()); - bias_tensor->Resize(bias_t->dims()); - TensorCopySync((*bias_t), platform::CPUPlace(), bias_tensor.get()); - - this->engine_->AddOp(op_name, "AffineChannel", {input_name}, {output_name}); - - // Generate the Scale parameter of Anakin. - auto scale_shape = framework::vectorize2int(scale_t->dims()); - while (scale_shape.size() < 4) { - scale_shape.insert(scale_shape.begin(), 1); - } - Shape anakin_scale_shape(scale_shape); - auto *weight1 = - GraphGlobalMem::Global().template new_block( - anakin_scale_shape); - float *scale_cpu_data = - static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(scale_tensor->data(), scale_tensor->numel(), - scale_cpu_data); - weight1->d_tensor().set_shape(anakin_scale_shape); - weight1->d_tensor().copy_from(weight1->h_tensor()); - this->engine_->AddOpAttr(op_name, "weight_1", *weight1); - - // Generate the Bias parameter of Anakin. - auto bias_shape = framework::vectorize2int(bias_t->dims()); - while (bias_shape.size() < 4) { - bias_shape.insert(bias_shape.begin(), 1); - } - Shape anakin_bias_shape(bias_shape); - auto *weight2 = - GraphGlobalMem::Global().template new_block( - anakin_bias_shape); - float *bias_cpu_data = - static_cast(weight2->h_tensor().mutable_data()); - std::copy_n(bias_tensor->data(), bias_tensor->numel(), bias_cpu_data); - weight2->d_tensor().set_shape(anakin_bias_shape); - weight2->d_tensor().copy_from(weight2->h_tensor()); + auto weight2 = pblock_from_var(*bias_v); this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } @@ -97,8 +53,21 @@ void AffineChannelOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER( - affine_channel, AffineChannelOpConverter<::anakin::saber::NV>); +using affine_channel_nv_fp32 = + ::paddle::inference::anakin::AffineChannelOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using affine_channel_nv_int8 = + ::paddle::inference::anakin::AffineChannelOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(affine_channel, affine_channel_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(affine_channel, affine_channel_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER( - affine_channel, AffineChannelOpConverter<::anakin::saber::X86>); + +using affine_channel_cpu_fp32 = + ::paddle::inference::anakin::AffineChannelOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using affine_channel_cpu_int8 = + ::paddle::inference::anakin::AffineChannelOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(affine_channel, affine_channel_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(affine_channel, affine_channel_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.h b/paddle/fluid/inference/anakin/convert/affine_channel.h index 5da4a736e8d7e0..443f6101288af4 100644 --- a/paddle/fluid/inference/anakin/convert/affine_channel.h +++ b/paddle/fluid/inference/anakin/convert/affine_channel.h @@ -21,8 +21,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class AffineChannelOpConverter : public AnakinOpConverter { +template +class AffineChannelOpConverter : public AnakinOpConverter { public: AffineChannelOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.cc b/paddle/fluid/inference/anakin/convert/batch_norm.cc index 1c837e9c3dfd4e..3e1e422aea19bc 100644 --- a/paddle/fluid/inference/anakin/convert/batch_norm.cc +++ b/paddle/fluid/inference/anakin/convert/batch_norm.cc @@ -18,17 +18,14 @@ #include #include #include - -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::Shape; +#include "paddle/fluid/inference/anakin/convert/helper.h" namespace paddle { namespace inference { namespace anakin { -template -void BatchNormOpConverter::operator()( +template +void BatchNormOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -36,87 +33,46 @@ void BatchNormOpConverter::operator()( std::map inputs; for (auto k : {"X", "Scale", "Bias", "Mean", "Variance"}) { PADDLE_ENFORCE_EQ(op_desc.Input(k).size(), 1UL); - auto v = op_desc.Input(k).front(); - inputs.insert({k, v}); } + auto input = op_desc.Input("X").front(); auto output = op_desc.Output("Y").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Y").front(); auto epsilon = boost::get(op_desc.GetAttr("epsilon")); - // auto momentum = boost::get(op_desc.GetAttr("momentum")); auto bn_op_name = op_name + ":bn"; auto bn_output = bn_op_name + "_output"; - this->engine_->AddOp(bn_op_name, "BatchNorm", {inputs["X"]}, {bn_output}); + this->engine_->AddOp(bn_op_name, "BatchNorm", {input}, {bn_output}); this->engine_->AddOpAttr(bn_op_name, "epsilon", epsilon); this->engine_->AddOpAttr(bn_op_name, "momentum", static_cast(1.0)); auto scale_op_name = op_name + ":scale"; - auto get_lod_tensor = [this, &scope, &op_name](const std::string &var_name, - framework::LoDTensor *tensor) { - auto *v = scope.FindVar(var_name); - PADDLE_ENFORCE_NOT_NULL(v); - auto *t = v->GetMutable(); - tensor->Resize(t->dims()); - TensorCopySync(*t, platform::CPUPlace(), tensor); - }; - - framework::LoDTensor bias_t; - framework::LoDTensor mean_t; - framework::LoDTensor scale_t; - framework::LoDTensor variance_t; - get_lod_tensor(inputs["Bias"], &bias_t); - get_lod_tensor(inputs["Mean"], &mean_t); - get_lod_tensor(inputs["Scale"], &scale_t); - get_lod_tensor(inputs["Variance"], &variance_t); + this->engine_->AddOp(scale_op_name, "Scale", {bn_output}, {output}); + this->engine_->AddOpAttr(scale_op_name, "axis", 1); + this->engine_->AddOpAttr(scale_op_name, "num_axes", 1); + this->engine_->AddOpAttr(scale_op_name, "bias_term", true); - auto fill_shape = [](size_t n, std::vector shape) { - shape.insert(shape.begin(), 1); - if (shape.size() < n) { - shape.insert(shape.end(), n - shape.size(), 1); - } - return shape; - }; - Shape shape1(fill_shape(4, framework::vectorize2int(mean_t.dims()))); - Shape shape2(fill_shape(4, framework::vectorize2int(variance_t.dims()))); - auto *weight1 = - GraphGlobalMem::Global().template new_block(shape1); - auto *mean_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(mean_t.data(), mean_t.numel(), mean_data); + auto *mean_v = scope.FindVar(op_desc.Input("Mean").front()); + PADDLE_ENFORCE_NOT_NULL(mean_v); + auto weight1 = pblock_from_var(*mean_v); this->engine_->AddOpAttr(bn_op_name, "weight_1", *weight1); - auto *weight2 = - GraphGlobalMem::Global().template new_block(shape2); - auto *variance_data = - static_cast(weight2->h_tensor().mutable_data()); - std::copy_n(variance_t.data(), variance_t.numel(), variance_data); + auto *variance_v = scope.FindVar(op_desc.Input("Variance").front()); + PADDLE_ENFORCE_NOT_NULL(variance_v); + auto weight2 = pblock_from_var(*variance_v); this->engine_->AddOpAttr(bn_op_name, "weight_2", *weight2); - Shape shape3(std::vector({1, 1, 1, 1})); - auto *weight3 = - GraphGlobalMem::Global().template new_block(shape3); - auto *alpha_data = static_cast(weight3->h_tensor().mutable_data()); - float weight3_data[] = {1}; - std::copy(std::begin(weight3_data), std::end(weight3_data), alpha_data); + auto *weight3 = pblock_from_vector(std::vector({1})); this->engine_->AddOpAttr(bn_op_name, "weight_3", *weight3); - Shape scale_shape(fill_shape(4, framework::vectorize2int(scale_t.dims()))); - auto *scale = GraphGlobalMem::Global().template new_block( - scale_shape); - auto *scale_data = static_cast(scale->h_tensor().mutable_data()); - std::copy_n(scale_t.data(), scale_t.numel(), scale_data); - - Shape bias_shape(fill_shape(4, framework::vectorize2int(bias_t.dims()))); - auto *bias = GraphGlobalMem::Global().template new_block( - bias_shape); - auto *bias_data = static_cast(bias->h_tensor().mutable_data()); - std::copy_n(bias_t.data(), bias_t.numel(), bias_data); - - this->engine_->AddOp(scale_op_name, "Scale", {bn_output}, {output}); - this->engine_->AddOpAttr(scale_op_name, "axis", 1); - this->engine_->AddOpAttr(scale_op_name, "num_axes", 1); - this->engine_->AddOpAttr(scale_op_name, "bias_term", true); + auto *scale_v = scope.FindVar(op_desc.Input("Scale").front()); + PADDLE_ENFORCE_NOT_NULL(scale_v); + auto scale = pblock_from_var(*scale_v); this->engine_->AddOpAttr(scale_op_name, "weight_1", *scale); + + auto *bias_v = scope.FindVar(op_desc.Input("Bias").front()); + PADDLE_ENFORCE_NOT_NULL(bias_v); + auto bias = pblock_from_var(*bias_v); this->engine_->AddOpAttr(scale_op_name, "weight_2", *bias); } @@ -125,9 +81,17 @@ void BatchNormOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(batch_norm, - BatchNormOpConverter<::anakin::saber::NV>); +using bn_nv_fp32 = ::paddle::inference::anakin::BatchNormOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using bn_nv_int8 = ::paddle::inference::anakin::BatchNormOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(batch_norm, bn_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(batch_norm, bn_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(batch_norm, - BatchNormOpConverter<::anakin::saber::X86>); +using bn_cpu_fp32 = ::paddle::inference::anakin::BatchNormOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using bn_cpu_int8 = ::paddle::inference::anakin::BatchNormOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(batch_norm, bn_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(batch_norm, bn_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.h b/paddle/fluid/inference/anakin/convert/batch_norm.h index dc94b6ff64d13b..52156aeb0283af 100644 --- a/paddle/fluid/inference/anakin/convert/batch_norm.h +++ b/paddle/fluid/inference/anakin/convert/batch_norm.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class BatchNormOpConverter : public AnakinOpConverter { +template +class BatchNormOpConverter : public AnakinOpConverter { public: BatchNormOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/concat.cc b/paddle/fluid/inference/anakin/convert/concat.cc index cfd9540acf60ab..6655c2f047a0da 100644 --- a/paddle/fluid/inference/anakin/convert/concat.cc +++ b/paddle/fluid/inference/anakin/convert/concat.cc @@ -19,8 +19,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void ConcatOpConverter::operator()( +template +void ConcatOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -39,8 +39,21 @@ void ConcatOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(concat, - ConcatOpConverter<::anakin::saber::NV>); +using concat_nv_fp32 = + ::paddle::inference::anakin::ConcatOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using concat_nv_int8 = + ::paddle::inference::anakin::ConcatOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(concat, concat_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(concat, concat_nv_int8); + #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(concat, - ConcatOpConverter<::anakin::saber::X86>); +using concat_cpu_fp32 = + ::paddle::inference::anakin::ConcatOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using concat_cpu_int8 = + ::paddle::inference::anakin::ConcatOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(concat, concat_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(concat, concat_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/concat.h b/paddle/fluid/inference/anakin/convert/concat.h index a32f8a4612921f..fb5514affa78d2 100644 --- a/paddle/fluid/inference/anakin/convert/concat.h +++ b/paddle/fluid/inference/anakin/convert/concat.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class ConcatOpConverter : public AnakinOpConverter { +template +class ConcatOpConverter : public AnakinOpConverter { public: ConcatOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/conv2d.cc b/paddle/fluid/inference/anakin/convert/conv2d.cc index f9ab9874751300..4bd380e7bb23b3 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d.cc @@ -16,18 +16,16 @@ #include #include #include +#include "paddle/fluid/inference/anakin/convert/helper.h" -using anakin::graph::GraphGlobalMem; using anakin::PTuple; -using anakin::AK_FLOAT; -using anakin::saber::Shape; namespace paddle { namespace inference { namespace anakin { -template -void Conv2dOpConverter::operator()( +template +void Conv2dOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -42,11 +40,8 @@ void Conv2dOpConverter::operator()( auto *filter_v = scope.FindVar(op_desc.Input("Filter").front()); PADDLE_ENFORCE_NOT_NULL(filter_v); - auto *filter_t = filter_v->GetMutable(); - std::unique_ptr weight_tensor( - new framework::LoDTensor()); - weight_tensor->Resize(filter_t->dims()); - TensorCopySync((*filter_t), platform::CPUPlace(), weight_tensor.get()); + auto weight_tensor = tensor_from_var(*filter_v, platform::CPUPlace()); + auto weight_shape = framework::vectorize2int(weight_tensor->dims()); PADDLE_ENFORCE_EQ(weight_tensor->dims().size(), 4UL); @@ -69,25 +64,61 @@ void Conv2dOpConverter::operator()( this->engine_->AddOpAttr(op_name, "axis", 1); this->engine_->AddOpAttr(op_name, "bias_term", false); - auto weight_shape = framework::vectorize2int(filter_t->dims()); - Shape anakin_shape(weight_shape); - auto *weight1 = - GraphGlobalMem::Global().template new_block( - anakin_shape); - float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(weight_tensor->data(), weight_tensor->numel(), cpu_data); - weight1->d_tensor().set_shape(anakin_shape); - weight1->d_tensor().copy_from(weight1->h_tensor()); - this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + ::anakin::saber::Shape anakin_shape(weight_shape); + bool enable_int8 = boost::get(op_desc.HasAttr("enable_int8")); + + if (enable_int8) { + const float int8_range = 127.; + float in_scale = boost::get(op_desc.GetAttr("input_scale")); + float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); + auto *weight1 = ::anakin::graph::GraphGlobalMem::Global() + .template new_block<::anakin::AK_INT8>(anakin_shape); + float *weight_data = weight_tensor->data(); + std::vector weight_int8; + int weight_num = weight_tensor->numel(); + for (int i = 0; i < weight_tensor->numel(); i++) { + bool is_valid_int8 = + ((weight_data[i] >= -128) && (weight_data[i] <= 127)); + PADDLE_ENFORCE(is_valid_int8, + "We are in anakin subgraph int8 mode, the weight of conv " + "should be in range [-128, 127]"); + weight_int8.push_back(static_cast(weight_data[i])); + } + memcpy(static_cast(weight1->h_tensor().mutable_data()), + static_cast(weight_int8.data()), sizeof(char) * weight_num); + weight1->d_tensor().set_shape(anakin_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->Graph()->SetOpPrec(op_name, ::anakin::AK_INT8); + this->engine_->Graph()->SetWeightsScale(op_name, + {weight_scale / int8_range}, false); + this->engine_->AddTensorScale(input_name, in_scale / int8_range); + } else { + auto *weight1 = pblock_from_tensor(*weight_tensor, weight_shape); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + } } } // namespace anakin } // namespace inference } // namespace paddle -REGISTER_CPU_ANAKIN_OP_CONVERTER(conv2d, - Conv2dOpConverter<::anakin::saber::X86>); #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(conv2d, - Conv2dOpConverter<::anakin::saber::NV>); +using conv2d_nv_fp32 = + ::paddle::inference::anakin::Conv2dOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using conv2d_nv_int8 = + ::paddle::inference::anakin::Conv2dOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(conv2d, conv2d_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(conv2d, conv2d_nv_int8); #endif + +using conv2d_cpu_fp32 = + ::paddle::inference::anakin::Conv2dOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using conv2d_cpu_int8 = + ::paddle::inference::anakin::Conv2dOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(conv2d, conv2d_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(conv2d, conv2d_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/conv2d.h b/paddle/fluid/inference/anakin/convert/conv2d.h index 6ecb32840519e0..b22cb8ea9318cf 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d.h +++ b/paddle/fluid/inference/anakin/convert/conv2d.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class Conv2dOpConverter : public AnakinOpConverter { +template +class Conv2dOpConverter : public AnakinOpConverter { public: Conv2dOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc index ff60771f87b33e..a8ef73d50f2a42 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc @@ -16,18 +16,16 @@ #include #include #include +#include "paddle/fluid/inference/anakin/convert/helper.h" -using anakin::graph::GraphGlobalMem; using anakin::PTuple; -using anakin::AK_FLOAT; -using anakin::saber::Shape; namespace paddle { namespace inference { namespace anakin { -template -void Conv2dFusionOpConverter::operator()( +template +void Conv2dFusionOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -43,24 +41,16 @@ void Conv2dFusionOpConverter::operator()( auto *filter_v = scope.FindVar(op_desc.Input("Filter").front()); PADDLE_ENFORCE_NOT_NULL(filter_v); - auto *filter_t = filter_v->GetMutable(); + + auto weight_tensor = tensor_from_var(*filter_v, platform::CPUPlace()); + auto weight_shape = framework::vectorize2int(weight_tensor->dims()); auto *b_v = scope.FindVar(op_desc.Input("Bias").front()); PADDLE_ENFORCE_NOT_NULL(b_v); - auto *b_t = b_v->GetMutable(); - - std::unique_ptr weight_tensor( - new framework::LoDTensor()); - weight_tensor->Resize(filter_t->dims()); - TensorCopySync((*filter_t), platform::CPUPlace(), weight_tensor.get()); PADDLE_ENFORCE_EQ(weight_tensor->dims().size(), 4UL); - - // const int n_output = weight_tensor->dims()[0]; - // const int n_input = weight_tensor->dims()[1]; const int filter_h = weight_tensor->dims()[2]; const int filter_w = weight_tensor->dims()[3]; - // auto filter_num = n_input * filter_h * filter_w ; auto filter_num = weight_tensor->dims()[0]; this->engine_->template AddOpAttr(op_name, "filter_num", filter_num); this->engine_->template AddOpAttr>(op_name, "kernel_size", @@ -77,37 +67,42 @@ void Conv2dFusionOpConverter::operator()( this->engine_->AddOpAttr(op_name, "axis", 1); this->engine_->AddOpAttr(op_name, "bias_term", true); - auto weight_shape = framework::vectorize2int(filter_t->dims()); - Shape anakin_shape(weight_shape); - auto *weight1 = - GraphGlobalMem::Global().template new_block( - anakin_shape); - float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(weight_tensor->data(), weight_tensor->numel(), cpu_data); - weight1->d_tensor().set_shape(anakin_shape); - weight1->d_tensor().copy_from(weight1->h_tensor()); - this->engine_->AddOpAttr(op_name, "weight_1", *weight1); - - auto bias_shape = framework::vectorize2int(b_t->dims()); - framework::LoDTensor bias_tensor; - bias_tensor.Resize(b_t->dims()); - TensorCopySync((*b_t), platform::CPUPlace(), &bias_tensor); - auto *bias_data = bias_tensor.data(); - bias_shape.insert(bias_shape.begin(), 1); - bias_shape.insert(bias_shape.begin(), 1); - bias_shape.insert(bias_shape.begin(), 1); - // bias_shape.push_back(1); - // bias_shape.push_back(1); - Shape anakin_bias_shape(bias_shape); - - auto *weight2 = - GraphGlobalMem::Global().template new_block( - anakin_bias_shape); - float *cpu_data2 = static_cast(weight2->h_tensor().mutable_data()); - std::copy_n(bias_data, bias_tensor.numel(), cpu_data2); - weight2->d_tensor().set_shape(anakin_bias_shape); - weight2->d_tensor().copy_from(weight2->h_tensor()); - this->engine_->AddOpAttr(op_name, "weight_2", *weight2); + ::anakin::saber::Shape anakin_shape(weight_shape); + bool enable_int8 = boost::get(op_desc.HasAttr("enable_int8")); + if (enable_int8) { + const float int8_range = 127.; + float in_scale = boost::get(op_desc.GetAttr("input_scale")); + float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); + auto *weight1 = ::anakin::graph::GraphGlobalMem::Global() + .template new_block<::anakin::AK_INT8>(anakin_shape); + float *weight_data = weight_tensor->data(); + std::vector weight_int8; + int weight_num = weight_tensor->numel(); + for (int i = 0; i < weight_tensor->numel(); i++) { + bool is_valid_int8 = + ((weight_data[i] >= -128) && (weight_data[i] <= 127)); + PADDLE_ENFORCE(is_valid_int8, + "We are in anakin subgraph int8 mode, the weight of conv " + "should be in range [-128, 127]"); + weight_int8.push_back(static_cast(weight_data[i])); + } + memcpy(static_cast(weight1->h_tensor().mutable_data()), + static_cast(weight_int8.data()), sizeof(char) * weight_num); + weight1->d_tensor().set_shape(anakin_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->Graph()->SetOpPrec(op_name, ::anakin::AK_INT8); + this->engine_->Graph()->SetWeightsScale(op_name, + {weight_scale / int8_range}, false); + this->engine_->AddTensorScale(input_name, in_scale / int8_range); + } else { + auto weight_tensor = tensor_from_var(*filter_v, platform::CPUPlace()); + auto weight_shape = framework::vectorize2int(weight_tensor->dims()); + auto *weight1 = pblock_from_tensor(*weight_tensor, weight_shape); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + auto weight2 = pblock_from_var(*b_v); + this->engine_->AddOpAttr(op_name, "weight_2", *weight2); + } } } // namespace anakin @@ -115,9 +110,21 @@ void Conv2dFusionOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(conv2d_fusion, - Conv2dFusionOpConverter<::anakin::saber::NV>); +using conv2d_fusion_nv_fp32 = + ::paddle::inference::anakin::Conv2dFusionOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using conv2d_fusion_nv_int8 = + ::paddle::inference::anakin::Conv2dFusionOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(conv2d_fusion, conv2d_fusion_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(conv2d_fusion, conv2d_fusion_nv_int8); #endif - -REGISTER_CPU_ANAKIN_OP_CONVERTER(conv2d_fusion, - Conv2dFusionOpConverter<::anakin::saber::X86>); +using conv2d_fusion_cpu_fp32 = + ::paddle::inference::anakin::Conv2dFusionOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using conv2d_fusion_cpu_int8 = + ::paddle::inference::anakin::Conv2dFusionOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; + +REGISTER_CPU_ANAKIN_OP_CONVERTER(conv2d_fusion, conv2d_fusion_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(conv2d_fusion, conv2d_fusion_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.h b/paddle/fluid/inference/anakin/convert/conv2d_fusion.h index abcf61a75e0fda..768814d3f996dd 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d_fusion.h +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class Conv2dFusionOpConverter : public AnakinOpConverter { +template +class Conv2dFusionOpConverter : public AnakinOpConverter { public: Conv2dFusionOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/density_prior_box.cc b/paddle/fluid/inference/anakin/convert/density_prior_box.cc index f552e41c85fb11..92d147708bf647 100644 --- a/paddle/fluid/inference/anakin/convert/density_prior_box.cc +++ b/paddle/fluid/inference/anakin/convert/density_prior_box.cc @@ -23,8 +23,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void DensityPriorBoxOpConverter::operator()( +template +void DensityPriorBoxOpConverter::operator()( const framework::proto::OpDesc& op, const framework::BlockDesc& block_desc, const framework::Scope& scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -109,13 +109,24 @@ void DensityPriorBoxOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER( - density_prior_box, DensityPriorBoxOpConverter<::anakin::saber::NV>); -REGISTER_CUDA_ANAKIN_OP_CONVERTER( - prior_box, DensityPriorBoxOpConverter<::anakin::saber::NV>); +using ds_pr_nv_fp32 = ::paddle::inference::anakin::DensityPriorBoxOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using ds_pr_nv_int8 = ::paddle::inference::anakin::DensityPriorBoxOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; + +REGISTER_CUDA_ANAKIN_OP_CONVERTER(density_prior_box, ds_pr_nv_fp32); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(prior_box, ds_pr_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(density_prior_box, ds_pr_nv_int8); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(prior_box, ds_pr_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER( - density_prior_box, DensityPriorBoxOpConverter<::anakin::saber::X86>); -REGISTER_CPU_ANAKIN_OP_CONVERTER( - prior_box, DensityPriorBoxOpConverter<::anakin::saber::X86>); +using ds_pr_cpu_fp32 = ::paddle::inference::anakin::DensityPriorBoxOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using ds_pr_cpu_int8 = ::paddle::inference::anakin::DensityPriorBoxOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; + +REGISTER_CPU_ANAKIN_OP_CONVERTER(density_prior_box, ds_pr_cpu_fp32); +REGISTER_CPU_ANAKIN_OP_CONVERTER(prior_box, ds_pr_cpu_fp32); + +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(density_prior_box, ds_pr_cpu_int8); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(prior_box, ds_pr_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/density_prior_box.h b/paddle/fluid/inference/anakin/convert/density_prior_box.h index 29f4f6f7f9db50..5714f57a04b7b3 100644 --- a/paddle/fluid/inference/anakin/convert/density_prior_box.h +++ b/paddle/fluid/inference/anakin/convert/density_prior_box.h @@ -22,8 +22,9 @@ namespace paddle { namespace inference { namespace anakin { -template -class DensityPriorBoxOpConverter : public AnakinOpConverter { +template +class DensityPriorBoxOpConverter + : public AnakinOpConverter { public: DensityPriorBoxOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/detection_out.cc b/paddle/fluid/inference/anakin/convert/detection_out.cc index 4a28c604f5853a..c06a8860e167f6 100644 --- a/paddle/fluid/inference/anakin/convert/detection_out.cc +++ b/paddle/fluid/inference/anakin/convert/detection_out.cc @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void DetectionOutOpConverter::operator()( +template +void DetectionOutOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -67,8 +67,21 @@ void DetectionOutOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(detection_out, - DetectionOutOpConverter<::anakin::saber::NV>); +using detection_out_nv_fp32 = + ::paddle::inference::anakin::DetectionOutOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using detection_out_nv_int8 = + ::paddle::inference::anakin::DetectionOutOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(detection_out, detection_out_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(detection_out, detection_out_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(detection_out, - DetectionOutOpConverter<::anakin::saber::X86>); + +using detection_out_cpu_fp32 = + ::paddle::inference::anakin::DetectionOutOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using detection_out_cpu_int8 = + ::paddle::inference::anakin::DetectionOutOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(detection_out, detection_out_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(detection_out, detection_out_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/detection_out.h b/paddle/fluid/inference/anakin/convert/detection_out.h index 396d5c9554fda7..c34342a66c1c6c 100644 --- a/paddle/fluid/inference/anakin/convert/detection_out.h +++ b/paddle/fluid/inference/anakin/convert/detection_out.h @@ -22,8 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class DetectionOutOpConverter : public AnakinOpConverter { +template +class DetectionOutOpConverter : public AnakinOpConverter { public: DetectionOutOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/dropout.cc b/paddle/fluid/inference/anakin/convert/dropout.cc index 989eafcd91ef46..e779aca7308397 100644 --- a/paddle/fluid/inference/anakin/convert/dropout.cc +++ b/paddle/fluid/inference/anakin/convert/dropout.cc @@ -16,17 +16,14 @@ #include #include #include - -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::Shape; +#include "paddle/fluid/inference/anakin/convert/helper.h" namespace paddle { namespace inference { namespace anakin { -template -void DropoutOpConverter::operator()( +template +void DropoutOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -42,12 +39,7 @@ void DropoutOpConverter::operator()( auto dropout_prob = boost::get(op_desc.GetAttr("dropout_prob")); auto factor = 1 - dropout_prob; - Shape shape1(std::vector({1, 1, 1, 1})); - auto *weight1 = - GraphGlobalMem::Global().template new_block(shape1); - auto *factor_data = static_cast(weight1->h_tensor().mutable_data()); - float weight1_data[] = {factor}; - std::copy(std::begin(weight1_data), std::end(weight1_data), factor_data); + auto *weight1 = pblock_from_vector(std::vector({factor})); this->engine_->AddOpAttr(op_name, "weight_1", *weight1); this->engine_->AddOpAttr(op_name, "axis", 0); @@ -60,8 +52,21 @@ void DropoutOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(dropout, - DropoutOpConverter<::anakin::saber::NV>); +using dropout_nv_fp32 = + ::paddle::inference::anakin::DropoutOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using dropout_nv_int8 = + ::paddle::inference::anakin::DropoutOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(dropout, dropout_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(dropout, dropout_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(dropout, - DropoutOpConverter<::anakin::saber::X86>); + +using dropout_cpu_fp32 = + ::paddle::inference::anakin::DropoutOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using dropout_cpu_int8 = + ::paddle::inference::anakin::DropoutOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(dropout, dropout_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(dropout, dropout_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/dropout.h b/paddle/fluid/inference/anakin/convert/dropout.h index c43c851fc0ee60..801aa3dd16f850 100644 --- a/paddle/fluid/inference/anakin/convert/dropout.h +++ b/paddle/fluid/inference/anakin/convert/dropout.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class DropoutOpConverter : public AnakinOpConverter { +template +class DropoutOpConverter : public AnakinOpConverter { public: DropoutOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/elementwise.cc b/paddle/fluid/inference/anakin/convert/elementwise.cc index 81e1d10d82bd66..e3ea6b2a97dd6a 100644 --- a/paddle/fluid/inference/anakin/convert/elementwise.cc +++ b/paddle/fluid/inference/anakin/convert/elementwise.cc @@ -17,17 +17,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -template -void ElementwiseAddOpConverter::operator()( +template +void ElementwiseAddOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -48,8 +45,8 @@ void ElementwiseAddOpConverter::operator()( this->engine_->template AddOpAttr>(op_name, "coeff", coeff); } -template -void ElementwiseMulOpConverter::operator()( +template +void ElementwiseMulOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -75,12 +72,31 @@ void ElementwiseMulOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER( - elementwise_add, ElementwiseAddOpConverter<::anakin::saber::NV>); -REGISTER_CUDA_ANAKIN_OP_CONVERTER( - elementwise_mul, ElementwiseMulOpConverter<::anakin::saber::NV>); +using elet_nv_fp32 = ::paddle::inference::anakin::ElementwiseAddOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using elet_nv_int8 = ::paddle::inference::anakin::ElementwiseAddOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; +using eletmul_nv_fp32 = ::paddle::inference::anakin::ElementwiseMulOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using eletmul_nv_int8 = ::paddle::inference::anakin::ElementwiseMulOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; + +REGISTER_CUDA_ANAKIN_OP_CONVERTER(elementwise_add, elet_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(elementwise_add, elet_nv_int8); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(elementwise_mul, eletmul_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(elementwise_mul, eletmul_nv_int8); + #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER( - elementwise_add, ElementwiseAddOpConverter<::anakin::saber::X86>); -REGISTER_CPU_ANAKIN_OP_CONVERTER( - elementwise_mul, ElementwiseMulOpConverter<::anakin::saber::X86>); +using elet_cpu_fp32 = ::paddle::inference::anakin::ElementwiseAddOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using elet_cpu_int8 = ::paddle::inference::anakin::ElementwiseAddOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; +using eletmul_cpu_fp32 = ::paddle::inference::anakin::ElementwiseMulOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using eletmul_cpu_int8 = ::paddle::inference::anakin::ElementwiseMulOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; + +REGISTER_CPU_ANAKIN_OP_CONVERTER(elementwise_add, elet_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(elementwise_add, elet_cpu_int8); +REGISTER_CPU_ANAKIN_OP_CONVERTER(elementwise_mul, eletmul_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(elementwise_mul, eletmul_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/elementwise.h b/paddle/fluid/inference/anakin/convert/elementwise.h index f64a8c5f7f3234..190a8b55f0e3c2 100644 --- a/paddle/fluid/inference/anakin/convert/elementwise.h +++ b/paddle/fluid/inference/anakin/convert/elementwise.h @@ -20,8 +20,9 @@ namespace paddle { namespace inference { namespace anakin { -template -class ElementwiseAddOpConverter : public AnakinOpConverter { +template +class ElementwiseAddOpConverter + : public AnakinOpConverter { public: ElementwiseAddOpConverter() = default; @@ -34,8 +35,9 @@ class ElementwiseAddOpConverter : public AnakinOpConverter { private: }; -template -class ElementwiseMulOpConverter : public AnakinOpConverter { +template +class ElementwiseMulOpConverter + : public AnakinOpConverter { public: ElementwiseMulOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/fc.cc b/paddle/fluid/inference/anakin/convert/fc.cc index a04035eabace01..10ceb2154b1b7c 100644 --- a/paddle/fluid/inference/anakin/convert/fc.cc +++ b/paddle/fluid/inference/anakin/convert/fc.cc @@ -16,22 +16,19 @@ #include #include #include - -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::Shape; +#include "paddle/fluid/inference/anakin/convert/helper.h" namespace paddle { namespace inference { namespace anakin { -template -void FcBaseOpConverter::operator()( +template +void FcBaseOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); auto input_names = op_desc.InputNames(); - bool with_bias = input_names.size() == 3; + bool with_bias = input_names.size() >= 3; std::string w_name = "Y"; std::string i_name = "X"; @@ -45,7 +42,12 @@ void FcBaseOpConverter::operator()( // get weights auto *y_v = scope.FindVar(op_desc.Input(w_name).front()); PADDLE_ENFORCE_NOT_NULL(y_v); - auto *y_t = y_v->GetMutable(); + auto weight_tensor = tensor_from_var(*y_v, platform::CPUPlace()); + auto weight_shape = framework::vectorize2int(weight_tensor->dims()); + + int out_dim = weight_shape[1]; + const int w_m = weight_shape[0]; + const int w_k = weight_shape[1]; auto input_name = op_desc.Input(i_name).front(); auto output_name = op_desc.Output("Out").front(); @@ -53,64 +55,58 @@ void FcBaseOpConverter::operator()( this->engine_->AddOp(op_name, "Dense", {input_name}, {output_name}); this->engine_->AddOpAttr(op_name, "bias_term", with_bias); this->engine_->AddOpAttr(op_name, "axis", 1); - - auto weight_shape = framework::vectorize2int(y_t->dims()); - int out_dim = weight_shape[1]; this->engine_->AddOpAttr(op_name, "out_dim", out_dim); - const int w_m = weight_shape[0]; - const int w_k = weight_shape[1]; - - if (weight_shape.size() < 4UL) { - weight_shape.insert(weight_shape.begin(), 4UL - weight_shape.size(), 1); - } - Shape anakin_shape(weight_shape); - framework::LoDTensor weight_tensor; - weight_tensor.Resize(y_t->dims()); - TensorCopySync((*y_t), platform::CPUPlace(), &weight_tensor); - auto *weight_data = weight_tensor.data(); - PADDLE_ENFORCE(w_m * w_k == weight_tensor.numel()); + auto *weight_data = weight_tensor->data(); + PADDLE_ENFORCE(w_m * w_k == weight_tensor->numel()); - std::vector trans_weight_data(weight_tensor.numel()); + std::vector trans_weight_data(weight_tensor->numel()); for (int i = 0; i < w_m; i++) { for (int j = 0; j < w_k; j++) { trans_weight_data[i + j * w_m] = weight_data[i * w_k + j]; } } - auto *weight1 = - GraphGlobalMem::Global().template new_block( - anakin_shape); - float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(trans_weight_data.data(), weight_tensor.numel(), cpu_data); - weight1->d_tensor().set_shape(anakin_shape); - weight1->d_tensor().copy_from(weight1->h_tensor()); - this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + + int weight_num = weight_tensor->numel(); + bool enable_int8 = boost::get(op_desc.HasAttr("enable_int8")); + if (enable_int8) { + if (weight_shape.size() < 4UL) { + weight_shape.insert(weight_shape.begin(), 4UL - weight_shape.size(), 1); + } + ::anakin::saber::Shape anakin_shape(weight_shape); + const float int8_range = 127.; + float in_scale = boost::get(op_desc.GetAttr("input_scale")); + float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); + auto *weight1 = ::anakin::graph::GraphGlobalMem::Global() + .template new_block<::anakin::AK_INT8>(anakin_shape); + std::vector weight_int8; + for (int i = 0; i < weight_num; i++) { + bool is_valid_int8 = + ((trans_weight_data[i] >= -128) && (trans_weight_data[i] <= 127)); + PADDLE_ENFORCE(is_valid_int8, + "We are in anakin subgraph int8 mode, the weight of fc " + "should be in range [-128, 127]"); + weight_int8.push_back(static_cast(trans_weight_data[i])); + } + memcpy(static_cast(weight1->h_tensor().mutable_data()), + static_cast(weight_int8.data()), sizeof(char) * weight_num); + weight1->d_tensor().set_shape(anakin_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->Graph()->SetOpPrec(op_name, ::anakin::AK_INT8); + this->engine_->Graph()->SetWeightsScale(op_name, + {weight_scale / int8_range}, false); + this->engine_->AddTensorScale(input_name, in_scale / int8_range); + } else { + auto *weight1 = pblock_from_vector(trans_weight_data); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + } // get bias if (with_bias) { auto *b_v = scope.FindVar(op_desc.Input("Bias").front()); PADDLE_ENFORCE_NOT_NULL(b_v); - auto *b_t = b_v->GetMutable(); - - auto bias_shape = framework::vectorize2int(b_t->dims()); - framework::LoDTensor bias_tensor; - bias_tensor.Resize(b_t->dims()); - TensorCopySync((*b_t), platform::CPUPlace(), &bias_tensor); - auto *bias_data = bias_tensor.data(); - bias_shape.insert(bias_shape.begin(), 1); - bias_shape.insert(bias_shape.begin(), 1); - bias_shape.insert(bias_shape.begin(), 1); - // bias_shape.push_back(1); - // bias_shape.push_back(1); - Shape anakin_bias_shape(bias_shape); - - auto *weight2 = - GraphGlobalMem::Global().template new_block( - anakin_bias_shape); - float *cpu_data2 = static_cast(weight2->h_tensor().mutable_data()); - std::copy_n(bias_data, bias_tensor.numel(), cpu_data2); - weight2->d_tensor().set_shape(anakin_bias_shape); - weight2->d_tensor().copy_from(weight2->h_tensor()); + auto weight2 = pblock_from_var(*b_v); this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } } @@ -120,9 +116,39 @@ void FcBaseOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(mul, MulOpConverter<::anakin::saber::NV>); -REGISTER_CUDA_ANAKIN_OP_CONVERTER(fc, FcOpConverter<::anakin::saber::NV>); +using mul_nv_fp32 = + ::paddle::inference::anakin::MulOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using fc_nv_fp32 = + ::paddle::inference::anakin::FcOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using mul_nv_int8 = + ::paddle::inference::anakin::MulOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +using fc_nv_int8 = + ::paddle::inference::anakin::FcOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; + +REGISTER_CUDA_ANAKIN_OP_CONVERTER(mul, mul_nv_fp32); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(fc, fc_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(mul, mul_nv_int8); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(fc, fc_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(mul, MulOpConverter<::anakin::saber::X86>); -REGISTER_CPU_ANAKIN_OP_CONVERTER(fc, FcOpConverter<::anakin::saber::X86>); +using mul_cpu_fp32 = + ::paddle::inference::anakin::MulOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using fc_cpu_fp32 = + ::paddle::inference::anakin::FcOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using mul_cpu_int8 = + ::paddle::inference::anakin::MulOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +using fc_cpu_int8 = + ::paddle::inference::anakin::FcOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; + +REGISTER_CPU_ANAKIN_OP_CONVERTER(mul, mul_cpu_fp32); +REGISTER_CPU_ANAKIN_OP_CONVERTER(fc, fc_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(mul, mul_cpu_int8); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(fc, fc_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/fc.h b/paddle/fluid/inference/anakin/convert/fc.h index 10808c315757b7..6fe65e3ecd4ec4 100644 --- a/paddle/fluid/inference/anakin/convert/fc.h +++ b/paddle/fluid/inference/anakin/convert/fc.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class FcBaseOpConverter : public AnakinOpConverter { +template +class FcBaseOpConverter : public AnakinOpConverter { public: FcBaseOpConverter() = default; @@ -33,15 +33,15 @@ class FcBaseOpConverter : public AnakinOpConverter { }; // with bias -template -class FcOpConverter : public FcBaseOpConverter { +template +class FcOpConverter : public FcBaseOpConverter { public: FcOpConverter() = default; }; // without bias -template -class MulOpConverter : public FcBaseOpConverter { +template +class MulOpConverter : public FcBaseOpConverter { public: MulOpConverter() = default; }; diff --git a/paddle/fluid/inference/anakin/convert/flatten.cc b/paddle/fluid/inference/anakin/convert/flatten.cc index a38dec25d831c7..7ef9e11b091ffd 100644 --- a/paddle/fluid/inference/anakin/convert/flatten.cc +++ b/paddle/fluid/inference/anakin/convert/flatten.cc @@ -21,8 +21,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void FlattenOpConverter::operator()( +template +void FlattenOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -46,8 +46,21 @@ void FlattenOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(flatten, - FlattenOpConverter<::anakin::saber::NV>); +using flatten_nv_fp32 = + ::paddle::inference::anakin::FlattenOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using flatten_nv_int8 = + ::paddle::inference::anakin::FlattenOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; + +REGISTER_CUDA_ANAKIN_OP_CONVERTER(flatten, flatten_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(flatten, flatten_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(flatten, - FlattenOpConverter<::anakin::saber::X86>); +using flatten_cpu_fp32 = + ::paddle::inference::anakin::FlattenOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using flatten_cpu_int8 = + ::paddle::inference::anakin::FlattenOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(flatten, flatten_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(flatten, flatten_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/flatten.h b/paddle/fluid/inference/anakin/convert/flatten.h index cd29b6e7d7384d..6e5e059927d4d3 100644 --- a/paddle/fluid/inference/anakin/convert/flatten.h +++ b/paddle/fluid/inference/anakin/convert/flatten.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class FlattenOpConverter : public AnakinOpConverter { +template +class FlattenOpConverter : public AnakinOpConverter { public: FlattenOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/helper.cc b/paddle/fluid/inference/anakin/convert/helper.cc new file mode 100644 index 00000000000000..7804619bf836d9 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/helper.cc @@ -0,0 +1,32 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +std::unique_ptr tensor_from_var( + const framework::Variable& var, const platform::Place& place) { + auto& src = var.Get(); + std::unique_ptr dst(new framework::LoDTensor()); + dst->Resize(src.dims()); + TensorCopySync((src), place, dst.get()); + return dst; +} + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/helper.h b/paddle/fluid/inference/anakin/convert/helper.h new file mode 100644 index 00000000000000..5581f7dd641c57 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/helper.h @@ -0,0 +1,88 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include + +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/variable.h" + +#include "framework/core/net/net.h" +#include "framework/core/types.h" +#include "framework/graph/graph.h" +#include "framework/graph/graph_global_mem.h" +#include "saber/saber_types.h" + +using anakin::saber::Shape; +using anakin::AK_FLOAT; +using anakin::PBlock; +using anakin::graph::GraphGlobalMem; + +namespace paddle { +namespace inference { +namespace anakin { + +std::unique_ptr tensor_from_var( + const framework::Variable& var, const platform::Place& place); +template +PBlock* pblock_from_tensor(const framework::LoDTensor& tensor, + std::vector shape) { + while (shape.size() < 4) { + shape.insert(shape.begin(), 1); + } + Shape anakin_shape(shape); + auto* weight = + GraphGlobalMem::Global().template new_block(anakin_shape); + float* cpu_data = static_cast(weight->h_tensor().mutable_data()); + std::copy_n(tensor.data(), tensor.numel(), cpu_data); + weight->d_tensor().set_shape(anakin_shape); + weight->d_tensor().copy_from(weight->h_tensor()); + return weight; +} + +template +PBlock* pblock_from_vector(const std::vector& vec, + std::vector shape_vec) { + while (shape_vec.size() < 4) { + shape_vec.insert(shape_vec.begin(), 1); + } + Shape shape(shape_vec); + auto* weight = + GraphGlobalMem::Global().template new_block(shape); + auto* weight_data = static_cast(weight->h_tensor().mutable_data()); + std::copy(std::begin(vec), std::end(vec), weight_data); + weight->d_tensor().set_shape(shape); + weight->d_tensor().copy_from(weight->h_tensor()); + return weight; +} + +template +PBlock* pblock_from_vector(const std::vector& vec) { + int size = vec.size(); + return pblock_from_vector(vec, std::vector({1, 1, 1, size})); +} + +template +PBlock* pblock_from_var(const framework::Variable& var) { + auto tensor = tensor_from_var(var, platform::CPUPlace()); + auto shape = framework::vectorize2int(tensor->dims()); + return pblock_from_tensor(*tensor, shape); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/im2sequence.cc b/paddle/fluid/inference/anakin/convert/im2sequence.cc index bd7e9b4b63c501..37f3f425a4fedd 100644 --- a/paddle/fluid/inference/anakin/convert/im2sequence.cc +++ b/paddle/fluid/inference/anakin/convert/im2sequence.cc @@ -23,8 +23,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void Im2SequenceConverter::operator()( +template +void Im2SequenceConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -55,5 +55,18 @@ void Im2SequenceConverter::operator()( } // namespace inference } // namespace paddle -REGISTER_CUDA_ANAKIN_OP_CONVERTER(im2sequence, - Im2SequenceConverter<::anakin::saber::NV>); +#ifdef PADDLE_WITH_CUDA +using im2sequence_nv_fp32 = ::paddle::inference::anakin::Im2SequenceConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using im2sequence_nv_int8 = ::paddle::inference::anakin::Im2SequenceConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(im2sequence, im2sequence_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(im2sequence, im2sequence_nv_int8); +#endif + +using im2sequence_cpu_fp32 = ::paddle::inference::anakin::Im2SequenceConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using im2sequence_cpu_int8 = ::paddle::inference::anakin::Im2SequenceConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(im2sequence, im2sequence_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(im2sequence, im2sequence_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/im2sequence.h b/paddle/fluid/inference/anakin/convert/im2sequence.h index 97d1564b02817d..8241d4d6f9ce78 100644 --- a/paddle/fluid/inference/anakin/convert/im2sequence.h +++ b/paddle/fluid/inference/anakin/convert/im2sequence.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class Im2SequenceConverter : public AnakinOpConverter { +template +class Im2SequenceConverter : public AnakinOpConverter { public: Im2SequenceConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/op_converter.h b/paddle/fluid/inference/anakin/convert/op_converter.h index 71631a7745c9d2..6ff49c4a820ccb 100644 --- a/paddle/fluid/inference/anakin/convert/op_converter.h +++ b/paddle/fluid/inference/anakin/convert/op_converter.h @@ -32,9 +32,9 @@ namespace paddle { namespace inference { namespace anakin { -template +template class AnakinOpConverter { - using AnakinEngineT = AnakinEngine; + using AnakinEngineT = AnakinEngine; public: AnakinOpConverter() = default; @@ -96,6 +96,13 @@ class AnakinOpConverter { engine->Graph()->RegistVar(output); } engine->Freeze(); + // Add scale for tensor in int8 mode. + auto tensor_scales = engine->GetTensorScales(); + + for (auto &item : tensor_scales) { + engine->Graph()->SetVarScale(item.first, item.second); + } + for (auto &input : inputs) { if (parameters.count(input)) continue; std::vector input_shape; @@ -136,52 +143,78 @@ class AnakinOpConverter { AnakinEngineT *engine_{nullptr}; private: - std::unordered_map *> converters_; + std::unordered_map *> + converters_; framework::Scope *scope_{nullptr}; std::mutex mutex_; }; -template class AnakinOpConverter<::anakin::saber::NV>; -template class AnakinOpConverter<::anakin::saber::X86>; +template class AnakinOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +template class AnakinOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; + +template class AnakinOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +template class AnakinOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; } // namespace anakin } // namespace inference } // namespace paddle #define REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, \ - place_type__, place_class__) \ - struct anakin_##op_type__##_##place_type__##_converter \ + place_type__, place_class__, \ + precision_type__, precision_class__) \ + struct anakin_##op_type__##_##place_type__##_##precision_type__##_converter \ : public ::paddle::framework::Registrar { \ - anakin_##op_type__##_##place_type__##_converter() { \ + anakin_##op_type__##_##place_type__##_##precision_type__##_converter() { \ LOG(INFO) << "register convert " << #op_type__ << " "; \ ::paddle::inference::Registry< \ - ::paddle::inference::anakin::AnakinOpConverter>:: \ - Global() \ - .Register<::paddle::inference::anakin::Converter__>(#op_type__); \ + ::paddle::inference::anakin::AnakinOpConverter< \ + place_class__, precision_class__>>::Global() \ + .Register(#op_type__); \ } \ }; \ - anakin_##op_type__##_##place_type__##_converter \ - anakin_##op_type__##_##place_type__##_converter__; \ - int TouchConverterRegister_anakin_##op_type__##_##place_type__() { \ - anakin_##op_type__##_##place_type__##_converter__.Touch(); \ + anakin_##op_type__##_##place_type__##_##precision_type__##_converter \ + anakin_##op_type__##_##place_type__##_##precision_type__##_converter__; \ + int Touch_anakin_##op_type__##_##place_type__##_##precision_type__() { \ + anakin_##op_type__##_##place_type__##_##precision_type__##_converter__ \ + .Touch(); \ return 0; \ } #define REGISTER_CUDA_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, CUDA, \ - ::anakin::saber::NV) + ::anakin::saber::NV, FP32, \ + ::anakin::Precision::FP32) + +#define REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ + REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, CUDA, \ + ::anakin::saber::NV, INT8, \ + ::anakin::Precision::INT8) #define REGISTER_CPU_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, CPU, \ - ::anakin::saber::X86) + ::anakin::saber::X86, FP32, \ + ::anakin::Precision::FP32) + +#define REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ + REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, CPU, \ + ::anakin::saber::X86, INT8, \ + ::anakin::Precision::INT8) -#define USE_ANAKIN_CONVERTER_BASE(op_type__, place_type__) \ - extern int TouchConverterRegister_anakin_##op_type__##_##place_type__(); \ - int use_op_converter_anakin_##op_type__##_##place_type__ \ - __attribute__((unused)) = \ - TouchConverterRegister_anakin_##op_type__##_##place_type__(); +#define USE_ANAKIN_CONVERTER_BASE(op_type__, place_type__, precision_type__) \ + extern int Touch_anakin_##op_type__##_##place_type__##_##precision_type__(); \ + int use_converter_anakin_##op_type__##_##place_type__##_##precision_type__ \ + __attribute__((unused)) = \ + Touch_anakin_##op_type__##_##place_type__##_##precision_type__(); #define USE_ANAKIN_CONVERTER(op_type__) \ - USE_ANAKIN_CONVERTER_BASE(op_type__, CUDA) + USE_ANAKIN_CONVERTER_BASE(op_type__, CUDA, FP32) +#define USE_INT8_ANAKIN_CONVERTER(op_type__) \ + USE_ANAKIN_CONVERTER_BASE(op_type__, CUDA, INT8) #define USE_CPU_ANAKIN_CONVERTER(op_type__) \ - USE_ANAKIN_CONVERTER_BASE(op_type__, CPU) + USE_ANAKIN_CONVERTER_BASE(op_type__, CPU, FP32) +#define USE_CPU_INT8_ANAKIN_CONVERTER(op_type__) \ + USE_ANAKIN_CONVERTER_BASE(op_type__, CPU, INT8) diff --git a/paddle/fluid/inference/anakin/convert/pool2d.cc b/paddle/fluid/inference/anakin/convert/pool2d.cc index d0206a5bf9b4eb..436741b43b7058 100644 --- a/paddle/fluid/inference/anakin/convert/pool2d.cc +++ b/paddle/fluid/inference/anakin/convert/pool2d.cc @@ -23,8 +23,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void Pool2dOpConverter::operator()( +template +void Pool2dOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -72,8 +72,21 @@ void Pool2dOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(pool2d, - Pool2dOpConverter<::anakin::saber::NV>); +using pool2d_nv_float32 = + ::paddle::inference::anakin::Pool2dOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using pool2d_nv_int8 = + ::paddle::inference::anakin::Pool2dOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(pool2d, pool2d_nv_float32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(pool2d, pool2d_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(pool2d, - Pool2dOpConverter<::anakin::saber::X86>); + +using pool2d_cpu_float32 = + ::paddle::inference::anakin::Pool2dOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using pool2d_cpu_int8 = + ::paddle::inference::anakin::Pool2dOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(pool2d, pool2d_cpu_float32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(pool2d, pool2d_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/pool2d.h b/paddle/fluid/inference/anakin/convert/pool2d.h index 0f85ec14b33dd6..7a06ff1b660a4c 100644 --- a/paddle/fluid/inference/anakin/convert/pool2d.h +++ b/paddle/fluid/inference/anakin/convert/pool2d.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class Pool2dOpConverter : public AnakinOpConverter { +template +class Pool2dOpConverter : public AnakinOpConverter { public: Pool2dOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/relu.cc b/paddle/fluid/inference/anakin/convert/relu.cc index 71de3113cba1da..6d456ccfdcd1a1 100644 --- a/paddle/fluid/inference/anakin/convert/relu.cc +++ b/paddle/fluid/inference/anakin/convert/relu.cc @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void ReluOpConverter::operator()( +template +void ReluOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -36,8 +36,8 @@ void ReluOpConverter::operator()( this->engine_->AddOpAttr(op_name, "alpha", 0); } -template -void LeakyReluOpConverter::operator()( +template +void LeakyReluOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -58,10 +58,35 @@ void LeakyReluOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(relu, ReluOpConverter<::anakin::saber::NV>); -REGISTER_CUDA_ANAKIN_OP_CONVERTER(leaky_relu, - LeakyReluOpConverter<::anakin::saber::NV>); +using relu_nv_fp32 = + ::paddle::inference::anakin::ReluOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using leaky_nv_fp32 = ::paddle::inference::anakin::LeakyReluOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using relu_nv_int8 = + ::paddle::inference::anakin::ReluOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +using leaky_nv_int8 = ::paddle::inference::anakin::LeakyReluOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; + +REGISTER_CUDA_ANAKIN_OP_CONVERTER(relu, relu_nv_fp32); +REGISTER_CUDA_ANAKIN_OP_CONVERTER(leaky_relu, leaky_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(relu, relu_nv_int8); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(leaky_relu, leaky_nv_int8); + #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(relu, ReluOpConverter<::anakin::saber::X86>); -REGISTER_CPU_ANAKIN_OP_CONVERTER(leaky_relu, - LeakyReluOpConverter<::anakin::saber::X86>); + +using relu_cpu_fp32 = + ::paddle::inference::anakin::ReluOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using leaky_cpu_fp32 = ::paddle::inference::anakin::LeakyReluOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using relu_cpu_int8 = + ::paddle::inference::anakin::ReluOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +using leaky_cpu_int8 = ::paddle::inference::anakin::LeakyReluOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(relu, relu_cpu_fp32); +REGISTER_CPU_ANAKIN_OP_CONVERTER(leaky_relu, leaky_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(relu, relu_cpu_int8); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(leaky_relu, leaky_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/relu.h b/paddle/fluid/inference/anakin/convert/relu.h index 74222a7ea1bb93..f366f05a94ae93 100644 --- a/paddle/fluid/inference/anakin/convert/relu.h +++ b/paddle/fluid/inference/anakin/convert/relu.h @@ -22,8 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class ReluOpConverter : public AnakinOpConverter { +template +class ReluOpConverter : public AnakinOpConverter { public: ReluOpConverter() = default; @@ -34,8 +34,8 @@ class ReluOpConverter : public AnakinOpConverter { virtual ~ReluOpConverter() {} }; -template -class LeakyReluOpConverter : public AnakinOpConverter { +template +class LeakyReluOpConverter : public AnakinOpConverter { public: LeakyReluOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/reshape.cc b/paddle/fluid/inference/anakin/convert/reshape.cc index a6696e8e81b72c..b7b47e30b1c814 100644 --- a/paddle/fluid/inference/anakin/convert/reshape.cc +++ b/paddle/fluid/inference/anakin/convert/reshape.cc @@ -21,8 +21,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void ReshapeOpConverter::operator()( +template +void ReshapeOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -47,9 +47,21 @@ void ReshapeOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(reshape, - ReshapeOpConverter<::anakin::saber::NV>); +using reshape_nv_fp32 = + ::paddle::inference::anakin::ReshapeOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using reshape_nv_int8 = + ::paddle::inference::anakin::ReshapeOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(reshape, reshape_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(reshape, reshape_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(reshape, - ReshapeOpConverter<::anakin::saber::X86>); +using reshape_cpu_fp32 = + ::paddle::inference::anakin::ReshapeOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using reshape_cpu_int8 = + ::paddle::inference::anakin::ReshapeOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(reshape, reshape_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(reshape, reshape_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/reshape.h b/paddle/fluid/inference/anakin/convert/reshape.h index bd0fd08c5cb913..88de2641e60f1a 100644 --- a/paddle/fluid/inference/anakin/convert/reshape.h +++ b/paddle/fluid/inference/anakin/convert/reshape.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class ReshapeOpConverter : public AnakinOpConverter { +template +class ReshapeOpConverter : public AnakinOpConverter { public: ReshapeOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/roi_align.cc b/paddle/fluid/inference/anakin/convert/roi_align.cc index 152578b50fec38..68d3bffd89d433 100644 --- a/paddle/fluid/inference/anakin/convert/roi_align.cc +++ b/paddle/fluid/inference/anakin/convert/roi_align.cc @@ -16,17 +16,12 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -template -void RoiAlignOpConverter::operator()( +template +void RoiAlignOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -57,8 +52,21 @@ void RoiAlignOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(roi_align, - RoiAlignOpConverter<::anakin::saber::NV>); +using roi_align_nv_fp32 = + ::paddle::inference::anakin::RoiAlignOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using roi_align_nv_int8 = + ::paddle::inference::anakin::RoiAlignOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(roi_align, roi_align_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(roi_align, roi_align_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(roi_align, - RoiAlignOpConverter<::anakin::saber::X86>); + +using roi_align_cpu_fp32 = + ::paddle::inference::anakin::RoiAlignOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using roi_align_cpu_int8 = + ::paddle::inference::anakin::RoiAlignOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(roi_align, roi_align_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(roi_align, roi_align_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/roi_align.h b/paddle/fluid/inference/anakin/convert/roi_align.h index 93c28f3e055629..8b5d23a01676f0 100644 --- a/paddle/fluid/inference/anakin/convert/roi_align.h +++ b/paddle/fluid/inference/anakin/convert/roi_align.h @@ -22,8 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class RoiAlignOpConverter : public AnakinOpConverter { +template +class RoiAlignOpConverter : public AnakinOpConverter { public: RoiAlignOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/scale.cc b/paddle/fluid/inference/anakin/convert/scale.cc index d72f9a5fa0c28d..cdfdf86a9747e1 100644 --- a/paddle/fluid/inference/anakin/convert/scale.cc +++ b/paddle/fluid/inference/anakin/convert/scale.cc @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void ScaleOpConverter::operator()( +template +void ScaleOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -49,4 +49,22 @@ void ScaleOpConverter::operator()( } // namespace inference } // namespace paddle -REGISTER_CUDA_ANAKIN_OP_CONVERTER(scale, ScaleOpConverter<::anakin::saber::NV>); +#ifdef PADDLE_WITH_CUDA +using scale_nv_fp32 = + ::paddle::inference::anakin::ScaleOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using scale_nv_int8 = + ::paddle::inference::anakin::ScaleOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(scale, scale_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(scale, scale_nv_int8); +#endif + +using scale_cpu_fp32 = + ::paddle::inference::anakin::ScaleOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using scale_cpu_int8 = + ::paddle::inference::anakin::ScaleOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(scale, scale_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(scale, scale_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/scale.h b/paddle/fluid/inference/anakin/convert/scale.h index 92d936b526226a..f19a9201934971 100644 --- a/paddle/fluid/inference/anakin/convert/scale.h +++ b/paddle/fluid/inference/anakin/convert/scale.h @@ -22,8 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class ScaleOpConverter : public AnakinOpConverter { +template +class ScaleOpConverter : public AnakinOpConverter { public: ScaleOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/softmax.cc b/paddle/fluid/inference/anakin/convert/softmax.cc index 851dafa8bdf63d..eb50e17e55f117 100644 --- a/paddle/fluid/inference/anakin/convert/softmax.cc +++ b/paddle/fluid/inference/anakin/convert/softmax.cc @@ -18,8 +18,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void SoftMaxOpConverter::operator()( +template +void SoftMaxOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -45,9 +45,22 @@ void SoftMaxOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(softmax, - SoftMaxOpConverter<::anakin::saber::NV>); +using sm_nv_fp32 = + ::paddle::inference::anakin::SoftMaxOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using sm_nv_int8 = + ::paddle::inference::anakin::SoftMaxOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; + +REGISTER_CUDA_ANAKIN_OP_CONVERTER(softmax, sm_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(softmax, sm_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(softmax, - SoftMaxOpConverter<::anakin::saber::X86>); +using sm_cpu_fp32 = + ::paddle::inference::anakin::SoftMaxOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using sm_cpu_int8 = + ::paddle::inference::anakin::SoftMaxOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(softmax, sm_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(softmax, sm_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/softmax.h b/paddle/fluid/inference/anakin/convert/softmax.h index c2421f9eb9d2e5..dc431b5b867a26 100644 --- a/paddle/fluid/inference/anakin/convert/softmax.h +++ b/paddle/fluid/inference/anakin/convert/softmax.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class SoftMaxOpConverter : public AnakinOpConverter { +template +class SoftMaxOpConverter : public AnakinOpConverter { public: SoftMaxOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/split.cc b/paddle/fluid/inference/anakin/convert/split.cc index f99233e78b59fc..b84860220fbe03 100644 --- a/paddle/fluid/inference/anakin/convert/split.cc +++ b/paddle/fluid/inference/anakin/convert/split.cc @@ -22,8 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void SplitOpConverter::operator()( +template +void SplitOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -56,7 +56,22 @@ void SplitOpConverter::operator()( } // namespace inference } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(split, SplitOpConverter<::anakin::saber::NV>); +using split_nv_fp32 = + ::paddle::inference::anakin::SplitOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using split_nv_int8 = + ::paddle::inference::anakin::SplitOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(split, split_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(split, split_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(split, SplitOpConverter<::anakin::saber::X86>); +using split_cpu_fp32 = + ::paddle::inference::anakin::SplitOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using split_cpu_int8 = + ::paddle::inference::anakin::SplitOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; + +REGISTER_CPU_ANAKIN_OP_CONVERTER(split, split_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(split, split_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/split.h b/paddle/fluid/inference/anakin/convert/split.h index 989d7acd500e9f..819915315d90a5 100644 --- a/paddle/fluid/inference/anakin/convert/split.h +++ b/paddle/fluid/inference/anakin/convert/split.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class SplitOpConverter : public AnakinOpConverter { +template +class SplitOpConverter : public AnakinOpConverter { public: SplitOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/sum.cc b/paddle/fluid/inference/anakin/convert/sum.cc index 7fc9d764078849..2bc4d124c905e5 100644 --- a/paddle/fluid/inference/anakin/convert/sum.cc +++ b/paddle/fluid/inference/anakin/convert/sum.cc @@ -23,11 +23,10 @@ namespace paddle { namespace inference { namespace anakin { -template -void SumOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void SumOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 2); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -49,6 +48,21 @@ void SumOpConverter::operator()(const framework::proto::OpDesc &op, } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(sum, SumOpConverter<::anakin::saber::NV>); +using sum_nv_fp32 = + ::paddle::inference::anakin::SumOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +using sum_nv_int8 = + ::paddle::inference::anakin::SumOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(sum, sum_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(sum, sum_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(sum, SumOpConverter<::anakin::saber::X86>); + +using sum_cpu_fp32 = + ::paddle::inference::anakin::SumOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +using sum_cpu_int8 = + ::paddle::inference::anakin::SumOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(sum, sum_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(sum, sum_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/sum.h b/paddle/fluid/inference/anakin/convert/sum.h index 27c15a82ebd471..aefc64c623e916 100644 --- a/paddle/fluid/inference/anakin/convert/sum.h +++ b/paddle/fluid/inference/anakin/convert/sum.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class SumOpConverter : public AnakinOpConverter { +template +class SumOpConverter : public AnakinOpConverter { public: SumOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/test_activation_op.cc b/paddle/fluid/inference/anakin/convert/test_activation_op.cc index 18b8b6f3b63bc6..67d3222d985b5d 100644 --- a/paddle/fluid/inference/anakin/convert/test_activation_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_activation_op.cc @@ -27,8 +27,8 @@ static void test_activation_op(const std::string& op_type, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("act-X", {10, 6, 1, 1}); validator.DeclOutputVar("act-Out", {10, 6, 1, 1}); framework::OpDesc desc; @@ -57,6 +57,7 @@ TEST(tanh_op, gpu) { } #endif +/* TEST(sigm_op, cpu) { platform::CPUPlace cpu_place; platform::CPUDeviceContext ctx(cpu_place); @@ -68,6 +69,7 @@ TEST(tanh_op, cpu) { platform::CPUDeviceContext ctx(cpu_place); test_activation_op<::anakin::saber::X86>("tanh", ctx, false); } +*/ } // namespace anakin } // namespace inference diff --git a/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc b/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc index 123f93370b82a9..f6399387aa264d 100644 --- a/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc @@ -28,8 +28,8 @@ void test_affine_channel_op(const platform::DeviceContext& context, std::unordered_set parameters({"scale", "bias"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("x", {1, 3, 5, 2}); validator.DeclOutputVar("out", {1, 3, 5, 2}); validator.DeclParamVar("scale", {3}); diff --git a/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc b/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc index 6a6675b6abf5d1..c008ef1bd5ee25 100644 --- a/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc @@ -25,8 +25,8 @@ void test_batchnorm_op(const platform::DeviceContext& context, bool use_gpu) { {"batch_norm_scale", "batch_norm_bias", "batch_norm_mean", "batch_norm_variance"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); std::vector param_shape{2}; validator.DeclInputVar("batch_norm_X", {1, 2, 5, 5}); diff --git a/paddle/fluid/inference/anakin/convert/test_concat_op.cc b/paddle/fluid/inference/anakin/convert/test_concat_op.cc index 4ea3305e4664f0..42dfbeb5cdc406 100644 --- a/paddle/fluid/inference/anakin/convert/test_concat_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_concat_op.cc @@ -25,8 +25,8 @@ template void test_concat_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({""}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("concat_x1", {1, 2, 1, 1}); validator.DeclInputVar("concat_x2", {1, 3, 1, 1}); validator.DeclInputVar("concat_x3", {1, 1, 1, 1}); diff --git a/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc b/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc index fa1b319bc1c65c..e95e11c4f96881 100644 --- a/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc @@ -25,8 +25,8 @@ template void test_conv2d_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({"conv2d-Y"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("conv2d-X", {1, 3, 3, 3}); validator.DeclParamVar("conv2d-Y", {4, 3, 1, 1}); validator.DeclOutputVar("conv2d-Out", {1, 4, 3, 3}); diff --git a/paddle/fluid/inference/anakin/convert/test_dropout_op.cc b/paddle/fluid/inference/anakin/convert/test_dropout_op.cc index a252dc74c0bf4f..ae27e27ded5d92 100644 --- a/paddle/fluid/inference/anakin/convert/test_dropout_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_dropout_op.cc @@ -25,8 +25,8 @@ template void test_dropout_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("x", {1, 1, 2, 2}); validator.DeclOutputVar("out", {1, 1, 2, 2}); validator.DeclOutputVar("mask", {1, 1, 2, 2}); diff --git a/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc b/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc index ee1bedcfb25eba..bff75294908aab 100644 --- a/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc @@ -27,8 +27,8 @@ static void test_elementwise_op(const std::string& op_type, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("x", {1, 1, 2, 2}); validator.DeclInputVar("y", {1, 1, 2, 2}); validator.DeclOutputVar("out", {1, 1, 2, 2}); diff --git a/paddle/fluid/inference/anakin/convert/test_fc_op.cc b/paddle/fluid/inference/anakin/convert/test_fc_op.cc index 5510008d3c4f2e..a24c809c022132 100644 --- a/paddle/fluid/inference/anakin/convert/test_fc_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_fc_op.cc @@ -25,8 +25,8 @@ void test_mul_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({"mul_y"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("mul_x", {1, 1, 2, 2}); validator.DeclParamVar("mul_y", {4, 2}); validator.DeclOutputVar("mul_out", {1, 2}); diff --git a/paddle/fluid/inference/anakin/convert/test_flatten_op.cc b/paddle/fluid/inference/anakin/convert/test_flatten_op.cc index 86bc1d810f8943..5765f5ebd1f2a0 100644 --- a/paddle/fluid/inference/anakin/convert/test_flatten_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_flatten_op.cc @@ -24,8 +24,8 @@ template void test_flatten_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("flatten-X", {3, 10, 10, 4}); validator.DeclOutputVar("flatten-Out", {3, 400, 1, 1}); framework::OpDesc desc; diff --git a/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc b/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc index b1be7f93c67c36..90503b1fbba81e 100644 --- a/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc @@ -25,8 +25,8 @@ void test_pool2d(const platform::DeviceContext& context, bool use_gpu, std::string pool_type = "max") { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); // The ITensor's Dims should not contain the batch size. // So, the ITensor's Dims of input and output should be C * H * W. diff --git a/paddle/fluid/inference/anakin/convert/test_relu_op.cc b/paddle/fluid/inference/anakin/convert/test_relu_op.cc index 369f1920f24943..3f224796519650 100644 --- a/paddle/fluid/inference/anakin/convert/test_relu_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_relu_op.cc @@ -27,8 +27,8 @@ static void test_activation_op(const std::string& op_type, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("act-X", {10, 6, 1, 1}); validator.DeclOutputVar("act-Out", {10, 6, 1, 1}); framework::OpDesc desc; @@ -60,20 +60,6 @@ TEST(leaky_relu_op, gpu) { } #endif -/* seems bug here -TEST(relu_op, cpu) { - platform::CPUPlace cpu_place; - platform::CPUDeviceContext ctx(cpu_place); - test_activation_op<::anakin::saber::X86>("relu", ctx, false); -} - -TEST(leaky_relu_op, cpu) { - platform::CPUPlace cpu_place; - platform::CPUDeviceContext ctx(cpu_place); - test_activation_op<::anakin::saber::X86>("leaky_relu", ctx, false); -} -*/ - } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/test_reshape_op.cc b/paddle/fluid/inference/anakin/convert/test_reshape_op.cc index 3facdbe9c6944d..e102bd3ac3ea0d 100644 --- a/paddle/fluid/inference/anakin/convert/test_reshape_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_reshape_op.cc @@ -24,8 +24,8 @@ template void test_reshape1_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); // validator.DeclInputVar("reshape-X", {2, 3, 3, 1}); // validator.DeclOutputVar("reshape-Out", {3, 2, 1, 3}); @@ -49,8 +49,8 @@ template void test_reshape2_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("reshape-X", {1, 2, 4}); validator.DeclOutputVar("reshape-Out", {1, 4, 2}); diff --git a/paddle/fluid/inference/anakin/convert/test_softmax_op.cc b/paddle/fluid/inference/anakin/convert/test_softmax_op.cc index e15d19135b44cf..de0b18fdbfd5f7 100644 --- a/paddle/fluid/inference/anakin/convert/test_softmax_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_softmax_op.cc @@ -24,8 +24,8 @@ template void test_softmax_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("softmax-X", {1, 10, 2}); validator.DeclOutputVar("softmax-Out", {1, 10, 2}); diff --git a/paddle/fluid/inference/anakin/convert/test_split_op.cc b/paddle/fluid/inference/anakin/convert/test_split_op.cc index 7131b07558d1eb..9a42ffd853bb07 100644 --- a/paddle/fluid/inference/anakin/convert/test_split_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_split_op.cc @@ -27,8 +27,8 @@ void AnakinSliceTest(const platform::DeviceContext &context, bool use_gpu, const std::vector §ions) { std::unordered_set parameters({""}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("split_input", in_shape); std::vector output_vars; diff --git a/paddle/fluid/inference/anakin/convert/test_sum_op.cc b/paddle/fluid/inference/anakin/convert/test_sum_op.cc index 8714890666c298..65f67ebd129893 100644 --- a/paddle/fluid/inference/anakin/convert/test_sum_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_sum_op.cc @@ -26,8 +26,8 @@ template static void test_sum_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("sum_x1", {1, 2, 1, 2}); validator.DeclInputVar("sum_x2", {1, 2, 1, 2}); validator.DeclOutputVar("sum_out", {1, 2, 1, 2}); diff --git a/paddle/fluid/inference/anakin/convert/test_transpose_op.cc b/paddle/fluid/inference/anakin/convert/test_transpose_op.cc index 6b2f1ed1566d5c..51b69dfbb08b73 100644 --- a/paddle/fluid/inference/anakin/convert/test_transpose_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_transpose_op.cc @@ -24,8 +24,8 @@ template void test_transpose1_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("transpose-X", {2, 3, 4, 5}); validator.DeclOutputVar("transpose-Out", {4, 2, 5, 3}); @@ -47,8 +47,8 @@ template void test_transpose2_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope, context, - use_gpu); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("transpose-X", {3, 4, 5}); validator.DeclOutputVar("transpose-Out", {3, 5, 4}); diff --git a/paddle/fluid/inference/anakin/convert/transpose.cc b/paddle/fluid/inference/anakin/convert/transpose.cc index cffc526065f8c8..849bfc9ea3e490 100644 --- a/paddle/fluid/inference/anakin/convert/transpose.cc +++ b/paddle/fluid/inference/anakin/convert/transpose.cc @@ -23,8 +23,8 @@ namespace paddle { namespace inference { namespace anakin { -template -void TransposeOpConverter::operator()( +template +void TransposeOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -50,9 +50,17 @@ void TransposeOpConverter::operator()( } // namespace paddle #ifdef PADDLE_WITH_CUDA -REGISTER_CUDA_ANAKIN_OP_CONVERTER(transpose, - TransposeOpConverter<::anakin::saber::NV>); +using transpose_nv_fp32 = ::paddle::inference::anakin::TransposeOpConverter< + ::anakin::saber::NV, ::anakin::Precision::FP32>; +using transpose_nv_int8 = ::paddle::inference::anakin::TransposeOpConverter< + ::anakin::saber::NV, ::anakin::Precision::INT8>; +REGISTER_CUDA_ANAKIN_OP_CONVERTER(transpose, transpose_nv_fp32); +REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(transpose, transpose_nv_int8); #endif -REGISTER_CPU_ANAKIN_OP_CONVERTER(transpose, - TransposeOpConverter<::anakin::saber::X86>); +using transpose_cpu_fp32 = ::paddle::inference::anakin::TransposeOpConverter< + ::anakin::saber::X86, ::anakin::Precision::FP32>; +using transpose_cpu_int8 = ::paddle::inference::anakin::TransposeOpConverter< + ::anakin::saber::X86, ::anakin::Precision::INT8>; +REGISTER_CPU_ANAKIN_OP_CONVERTER(transpose, transpose_cpu_fp32); +REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(transpose, transpose_cpu_int8); diff --git a/paddle/fluid/inference/anakin/convert/transpose.h b/paddle/fluid/inference/anakin/convert/transpose.h index 54090468ae13c6..b7b0a0f209e7d6 100644 --- a/paddle/fluid/inference/anakin/convert/transpose.h +++ b/paddle/fluid/inference/anakin/convert/transpose.h @@ -20,8 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -template -class TransposeOpConverter : public AnakinOpConverter { +template +class TransposeOpConverter : public AnakinOpConverter { public: TransposeOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/ut_helper.h b/paddle/fluid/inference/anakin/convert/ut_helper.h index 140a33a7cbb6fe..2f8f953892c390 100644 --- a/paddle/fluid/inference/anakin/convert/ut_helper.h +++ b/paddle/fluid/inference/anakin/convert/ut_helper.h @@ -61,7 +61,7 @@ void RandomizeTensor(framework::LoDTensor* tensor, auto* temp_data = temp_tensor.mutable_data(cpu_place); for (size_t i = 0; i < num_elements; i++) { - *(temp_data + i) = random(-128., 128.); + *(temp_data + i) = random(0., 1.); } TensorCopySync(temp_tensor, place, tensor); @@ -72,9 +72,9 @@ void RandomizeTensor(framework::LoDTensor* tensor, * anakin * layer. */ -template +template class AnakinConvertValidation { - using AnakinNvEngineT = AnakinEngine; + using AnakinNvEngineT = AnakinEngine; public: AnakinConvertValidation() = delete; @@ -84,7 +84,7 @@ class AnakinConvertValidation { const platform::DeviceContext& ctx, bool use_gpu = true) : parameters_(parameters), scope_(scope), ctx_(ctx), use_gpu_(use_gpu) { - engine_.reset(new AnakinEngine(true)); + engine_.reset(new AnakinEngine(true)); } // Declare a Variable as input with random initialization. @@ -127,7 +127,7 @@ class AnakinConvertValidation { // should init anakin engine here. auto& block_desc = program_desc_.Block(framework::kRootBlockIndex); - Singleton>::Global().ConvertOp( + Singleton>::Global().ConvertOp( desc, block_desc, parameters_, *scope_, engine_.get(), true /*test_mode*/); engine_->Freeze(); @@ -213,8 +213,15 @@ class AnakinConvertValidation { bool use_gpu_{true}; }; -template class AnakinConvertValidation<::anakin::saber::NV>; -template class AnakinConvertValidation<::anakin::saber::X86>; +template class AnakinConvertValidation<::anakin::saber::NV, + ::anakin::Precision::FP32>; +template class AnakinConvertValidation<::anakin::saber::X86, + ::anakin::Precision::FP32>; + +template class AnakinConvertValidation<::anakin::saber::NV, + ::anakin::Precision::INT8>; +template class AnakinConvertValidation<::anakin::saber::X86, + ::anakin::Precision::INT8>; } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/engine.cc b/paddle/fluid/inference/anakin/engine.cc index 17e661222433bb..90bc9c2514c3c7 100644 --- a/paddle/fluid/inference/anakin/engine.cc +++ b/paddle/fluid/inference/anakin/engine.cc @@ -172,11 +172,20 @@ AnakinEngine::Clone() { #ifdef PADDLE_WITH_CUDA template class AnakinEngine<::anakin::saber::NV, ::anakin::Precision::FP32>; -template class AnakinEngineManager<::anakin::saber::NV>; +template class AnakinEngineManager<::anakin::saber::NV, + ::anakin::Precision::FP32>; + +template class AnakinEngine<::anakin::saber::NV, ::anakin::Precision::INT8>; +template class AnakinEngineManager<::anakin::saber::NV, + ::anakin::Precision::INT8>; #endif template class AnakinEngine<::anakin::saber::X86, ::anakin::Precision::FP32>; -template class AnakinEngineManager<::anakin::saber::X86>; +template class AnakinEngineManager<::anakin::saber::X86, + ::anakin::Precision::FP32>; +template class AnakinEngine<::anakin::saber::X86, ::anakin::Precision::INT8>; +template class AnakinEngineManager<::anakin::saber::X86, + ::anakin::Precision::INT8>; // template class AnakinEngine<::anakin::saber::X86, ::anakin::Precision::FP32>; } // namespace anakin diff --git a/paddle/fluid/inference/anakin/engine.h b/paddle/fluid/inference/anakin/engine.h index 215c8a6c6146a2..ade15537db838f 100644 --- a/paddle/fluid/inference/anakin/engine.h +++ b/paddle/fluid/inference/anakin/engine.h @@ -93,6 +93,12 @@ class AnakinEngine { void Save(std::string path) { graph_->save(path); } bool IsInit() { return initialized_; } int GetDevice() { return device_; } + void AddTensorScale(const std::string &tensor_name, float scale) { + tensor_scales_[tensor_name] = scale; + } + std::unordered_map GetTensorScales() { + return tensor_scales_; + } void Execute(const std::map &inputs, const std::map &outputs); #ifdef PADDLE_WITH_CUDA @@ -112,11 +118,12 @@ class AnakinEngine { std::unique_ptr graph_; std::unique_ptr net_; std::vector program_inputs_; + std::unordered_map tensor_scales_; }; -template +template class AnakinEngineManager { - using AnakinEngineT = AnakinEngine; + using AnakinEngineT = AnakinEngine; public: bool HasEngine(const std::string &name) const { @@ -132,7 +139,7 @@ class AnakinEngineManager { std::vector program_inputs, std::string engine_name) { std::unique_lock lk(mut_); - auto *p = new AnakinEngine( + auto *p = new AnakinEngine( need_summary, device, max_batch_size, max_input_shape, program_inputs); engines_[engine_name].reset(p); return p; diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 37b7583fde29cc..0e6374201f4623 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -169,7 +169,13 @@ struct Argument { anakin_max_shape_t); DECL_ARGUMENT_FIELD(anakin_max_batch_size, AnakinMaxBatchSize, int); DECL_ARGUMENT_FIELD(anakin_min_subgraph_size, AnakinMinSubgraphSize, int); + DECL_ARGUMENT_FIELD(anakin_precision_mode, AnakinPrecisionMode, + AnalysisConfig::Precision); DECL_ARGUMENT_FIELD(use_anakin, UseAnakin, bool); + DECL_ARGUMENT_FIELD(anakin_passes_filter, AnakinPassesFilter, + std::vector); + DECL_ARGUMENT_FIELD(anakin_ops_filter, AnakinOpsFilter, + std::vector); // Memory optimized related. DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool); diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index bbc3938969a6d4..25db3346cfffd6 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -123,6 +123,11 @@ void IRPassManager::CreatePasses(Argument *argument, pass->Set("max_input_shape", new std::map>( argument->anakin_max_input_shape())); pass->Set("max_batch_size", new int(argument->anakin_max_batch_size())); + bool enable_int8 = + argument->anakin_precision_mode() == AnalysisConfig::Precision::kInt8; + pass->Set("enable_int8", new bool(enable_int8)); + pass->Set("anakin_ops_filter", + new std::vector(argument->anakin_ops_filter())); } pre_pass = pass_name; diff --git a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc index 658006c22cd842..5f74121dc3a679 100644 --- a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc @@ -39,8 +39,14 @@ void analysis::AnakinSubgraphPass::ApplyImpl( framework::ir::Graph *graph) const { framework::ir::FusePassBase::Init("anakin_subgraph_pass", graph); - auto teller = [](const framework::ir::Node *node) { - if (!node->IsOp() || !node->Op()) return false; + auto &anakin_ops_filter = Get>("anakin_ops_filter"); + + auto teller = [&anakin_ops_filter](const framework::ir::Node *node) { + if (!node->IsOp() || !node->Op()) + return false; + else if (std::find(anakin_ops_filter.begin(), anakin_ops_filter.end(), + node->Op()->Type()) != anakin_ops_filter.end()) + return false; return anakin::OpTeller::Global().Tell(node->Op()->Type(), *node->Op()); }; @@ -191,47 +197,71 @@ void AnakinSubgraphPass::CreateAnakinOp( SetAttr(op_desc->Proto(), "engine_key", engine_key); auto max_input_shape = Get>>("max_input_shape"); - auto max_batch_size = Get("max_batch_size"); auto program_inputs = program_desc->GetFeedTargetNames(); bool use_gpu = Get("use_gpu"); SetAttr(op_desc->Proto(), "use_gpu", use_gpu); + bool enable_int8 = Get("enable_int8"); + SetAttr(op_desc->Proto(), "enable_int8", enable_int8); + if (enable_int8) { + CreateAnakinEngine<::anakin::Precision::INT8>(&block_desc, params, + input_names, output_mapping, + program_inputs, engine_key); + } else { + CreateAnakinEngine<::anakin::Precision::FP32>(&block_desc, params, + input_names, output_mapping, + program_inputs, engine_key); + } +} +template <::anakin::Precision PrecisionT> +void AnakinSubgraphPass::CreateAnakinEngine( + framework::BlockDesc *block_desc, const std::vector ¶ms, + const std::set &input_names, + const std::vector &output_mapping, + const std::vector &program_inputs, + const std::string &engine_key) const { + framework::BlockDesc block_desc_temp(nullptr, block_desc->Proto()); + bool use_gpu = Get("use_gpu"); + auto max_batch_size = Get("max_batch_size"); + auto max_input_shape = + Get>>("max_input_shape"); if (use_gpu) { #ifdef PADDLE_WITH_CUDA inference::Singleton< - anakin::AnakinEngineManager<::anakin::saber::NV>>::Global() + anakin::AnakinEngineManager<::anakin::saber::NV, PrecisionT>>::Global() .Create(true, Get("gpu_device_id"), max_batch_size, max_input_shape, program_inputs, engine_key); #endif } else { inference::Singleton< - anakin::AnakinEngineManager<::anakin::saber::X86>>::Global() + anakin::AnakinEngineManager<::anakin::saber::X86, PrecisionT>>::Global() .Create(true, Get("gpu_device_id"), max_batch_size, max_input_shape, program_inputs, engine_key); } auto *scope = param_scope(); std::unordered_set param_set(params.begin(), params.end()); - framework::BlockDesc block_desc_temp(nullptr, block_desc.Proto()); if (use_gpu) { +#ifdef PADDLE_WITH_CUDA auto *anakin_engine = inference::Singleton>::Global() + ::anakin::saber::NV, PrecisionT>>::Global() .Get(engine_key); - inference::Singleton< - inference::anakin::AnakinOpConverter<::anakin::saber::NV>>::Global() + inference::Singleton>::Global() .ConvertBlockToAnakinEngine( &block_desc_temp, scope, std::vector(input_names.begin(), input_names.end()), param_set, output_mapping, anakin_engine); +#endif } else { auto *anakin_engine = inference::Singleton>::Global() + ::anakin::saber::X86, PrecisionT>>::Global() .Get(engine_key); - inference::Singleton< - inference::anakin::AnakinOpConverter<::anakin::saber::X86>>::Global() + inference::Singleton>::Global() .ConvertBlockToAnakinEngine( &block_desc_temp, scope, std::vector(input_names.begin(), input_names.end()), diff --git a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h index e80b8bb612096a..4ab2297b2d4887 100644 --- a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h +++ b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h @@ -15,6 +15,7 @@ #pragma once #include #include +#include #include #include #include "paddle/fluid/framework/ir/pass.h" @@ -36,6 +37,13 @@ class AnakinSubgraphPass : public framework::ir::FusePassBase { const std::vector &graph_params, std::vector *repetitive_params) const; void CleanIntermediateOutputs(framework::ir::Node *node); + template <::anakin::Precision PrecisionT> + void CreateAnakinEngine(framework::BlockDesc *block_desc, + const std::vector ¶ms, + const std::set &input_names, + const std::vector &output_mapping, + const std::vector &program_inputs, + const std::string &engine_key) const; }; } // namespace analysis diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 4f9e0b639564a3..228d80bf9f7596 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -116,6 +116,9 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { CP_MEMBER(anakin_max_batchsize_); CP_MEMBER(anakin_max_input_shape_); CP_MEMBER(anakin_min_subgraph_size_); + CP_MEMBER(anakin_precision_mode_); + CP_MEMBER(anakin_passes_filter_); + CP_MEMBER(anakin_ops_filter_); // Ir related. CP_MEMBER(enable_ir_optim_); @@ -276,7 +279,10 @@ void AnalysisConfig::Update() { pass_builder()->ClearPasses(); for (const auto &pass : kAnakinSubgraphPasses) { - pass_builder()->AppendPass(pass); + if (std::find(anakin_passes_filter_.begin(), anakin_passes_filter_.end(), + pass) == anakin_passes_filter_.end()) { + pass_builder()->AppendPass(pass); + } } } @@ -391,11 +397,16 @@ void AnalysisConfig::SwitchIrDebug(int x) { } void AnalysisConfig::EnableAnakinEngine( int max_batch_size, std::map> max_input_shape, - int min_subgraph_size) { + int min_subgraph_size, AnalysisConfig::Precision precision_mode, + std::vector passes_filter, + std::vector ops_filter) { anakin_max_batchsize_ = max_batch_size; anakin_max_input_shape_ = max_input_shape; anakin_min_subgraph_size_ = min_subgraph_size; + anakin_passes_filter_ = passes_filter; + anakin_ops_filter_ = ops_filter; use_anakin_ = true; + anakin_precision_mode_ = precision_mode; Update(); } } // namespace paddle diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 231beab641a9db..e1709fe2e67491 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -386,6 +386,9 @@ void AnalysisPredictor::PrepareArgument() { argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_); argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_); argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_); + argument_.SetAnakinPrecisionMode(config_.anakin_precision_mode_); + argument_.SetAnakinPassesFilter(config_.anakin_passes_filter_); + argument_.SetAnakinOpsFilter(config_.anakin_ops_filter_); LOG(INFO) << "Anakin subgraph engine is enabled"; } diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index c67c4b5bd0bfee..0f1c42c3602cbf 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -152,7 +152,9 @@ struct AnalysisConfig { void EnableAnakinEngine( int max_batch_size = 1, std::map> max_input_shape = {}, - int min_subgraph_size = 6); + int min_subgraph_size = 6, Precision precision = Precision::kFloat32, + std::vector passes_filter = {}, + std::vector ops_filter = {}); /** A boolean state indicating whether the Anakin sub-graph engine is used. */ @@ -291,6 +293,9 @@ struct AnalysisConfig { int anakin_max_batchsize_; int anakin_min_subgraph_size_{6}; std::map> anakin_max_input_shape_; + Precision anakin_precision_mode_; + std::vector anakin_passes_filter_; + std::vector anakin_ops_filter_; std::map engine_opt_info_; bool use_mkldnn_quantizer_{false}; diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index 3d72295be4b779..a3259f5321f80c 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -73,15 +73,21 @@ void PaddlePassBuilder::ClearPasses() { passes_.clear(); } // The following passes works for Anakin sub-graph engine. const std::vector kAnakinSubgraphPasses({ "infer_clean_graph_pass", // + "graph_viz_pass", // + "quant_conv2d_dequant_fuse_pass", // + "graph_viz_pass", // "simplify_anakin_priorbox_detection_out_pass", // "fillconstant_elementwisemul_fuse", // "fc_fuse_pass", // "conv_elementwise_add_fuse_pass", // - "conv_bn_fuse_pass", // - "conv_elementwise_add_fuse_pass", // - "fc_gru_fuse_pass", // - "quant_conv2d_dequant_fuse_pass", // - "anakin_subgraph_pass", + // "conv_bn_fuse_pass", // + // "conv_elementwise_add_fuse_pass", // + "fc_gru_fuse_pass", // + "graph_viz_pass", // + "anakin_subgraph_pass", // + "graph_viz_pass", // + "fc_gru_fuse_pass", // + "graph_viz_pass", // }); GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { diff --git a/paddle/fluid/operators/anakin/anakin_engine_op.h b/paddle/fluid/operators/anakin/anakin_engine_op.h index 99c5a6dc84a094..11c394c76cd982 100644 --- a/paddle/fluid/operators/anakin/anakin_engine_op.h +++ b/paddle/fluid/operators/anakin/anakin_engine_op.h @@ -44,6 +44,7 @@ class AnakinEngineOp : public framework::OperatorBase { std::string engine_key_; std::string engine_serialized_data_; bool use_gpu_; + bool enable_int8_; public: AnakinEngineOp(const std::string &type, @@ -55,6 +56,7 @@ class AnakinEngineOp : public framework::OperatorBase { engine_key_ = Attr("engine_key"); auto params = Attr>("parameters"); use_gpu_ = Attr("use_gpu"); + enable_int8_ = Attr("enable_int8"); for (const auto ¶m : params) { param_names_.insert(param); } @@ -68,11 +70,6 @@ class AnakinEngineOp : public framework::OperatorBase { void RunAnakin(const framework::Scope &scope, const platform::Place &dev_place) const { - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(dev_place); - auto stream = - reinterpret_cast(dev_ctx).stream(); - PADDLE_ENFORCE(!input_names_.empty(), "should pass more than one inputs"); std::vector output_maps = @@ -96,18 +93,35 @@ class AnakinEngineOp : public framework::OperatorBase { outputs.insert({output_maps[output_index], fluid_t}); output_index += 1; } + if (enable_int8_) { + Execute<::anakin::Precision::INT8>(inputs, outputs, dev_place); + } else { + Execute<::anakin::Precision::FP32>(inputs, outputs, dev_place); + } + } + + template <::anakin::Precision PrecisionT> + void Execute(const std::map &inputs, + const std::map &outputs, + const platform::Place &dev_place) const { if (use_gpu_) { #ifdef PADDLE_WITH_CUDA + platform::DeviceContextPool &pool = + platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + auto stream = + reinterpret_cast(dev_ctx) + .stream(); auto *engine = inference::Singleton>::Global() + ::anakin::saber::NV, PrecisionT>>::Global() .Get(engine_key_); engine->Execute(inputs, outputs, stream); #endif } else { auto *engine = inference::Singleton>::Global() + ::anakin::saber::X86, PrecisionT>>::Global() .Get(engine_key_); engine->Execute(inputs, outputs); } diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index ace385ec60fec0..8385e6331d757b 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include "paddle/fluid/inference/api/analysis_predictor.h" @@ -230,8 +231,13 @@ void BindAnalysisConfig(py::module *m) { py::arg("precision_mode") = AnalysisConfig::Precision::kFloat32, py::arg("use_static") = true) .def("enable_anakin_engine", &AnalysisConfig::EnableAnakinEngine, - py::arg("max_batch_size") = 1, py::arg("max_input_shape") = {}, - py::arg("min_subgraph_size") = 6) + py::arg("max_batch_size") = 1, + py::arg("max_input_shape") = + std::map>(), + py::arg("min_subgraph_size") = 6, + py::arg("precision_mode") = AnalysisConfig::Precision::kFloat32, + py::arg("passes_filter") = std::vector(), + py::arg("ops_filter") = std::vector()) .def("tensorrt_engine_enabled", &AnalysisConfig::tensorrt_engine_enabled) .def("switch_ir_debug", &AnalysisConfig::SwitchIrDebug, py::arg("x") = true) From 480302d404717f8247036089b035a7f2acf2b59f Mon Sep 17 00:00:00 2001 From: nhzlx Date: Thu, 11 Apr 2019 19:31:35 +0000 Subject: [PATCH 27/27] Cherry-pick from 16813 : change singleton to graph RegistBlock test=release/1.4 --- .../anakin/convert/affine_channel.cc | 4 +- .../inference/anakin/convert/batch_norm.cc | 12 +++-- .../fluid/inference/anakin/convert/conv2d.cc | 8 +-- .../inference/anakin/convert/conv2d_fusion.cc | 10 ++-- .../fluid/inference/anakin/convert/dropout.cc | 3 +- paddle/fluid/inference/anakin/convert/fc.cc | 10 ++-- .../fluid/inference/anakin/convert/helper.h | 49 +++++++++++-------- paddle/fluid/inference/anakin/engine.cc | 6 +++ paddle/fluid/inference/anakin/engine.h | 1 + .../inference/anakin/test_anakin_engine.cc | 7 +-- 10 files changed, 65 insertions(+), 45 deletions(-) diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.cc b/paddle/fluid/inference/anakin/convert/affine_channel.cc index 074c1b26ba8913..a3abca0a84f66f 100644 --- a/paddle/fluid/inference/anakin/convert/affine_channel.cc +++ b/paddle/fluid/inference/anakin/convert/affine_channel.cc @@ -38,13 +38,13 @@ void AffineChannelOpConverter::operator()( // Copy the Scale to CPUPlace and get the pointer. auto *scale_v = scope.FindVar(op_desc.Input("Scale").front()); PADDLE_ENFORCE_NOT_NULL(scale_v); - auto weight1 = pblock_from_var(*scale_v); + auto weight1 = pblock_from_var(*scale_v, this->engine_); this->engine_->AddOpAttr(op_name, "weight_1", *weight1); // Copy the Bias to CPUPlace and get the pointer. auto *bias_v = scope.FindVar(op_desc.Input("Bias").front()); PADDLE_ENFORCE_NOT_NULL(bias_v); - auto weight2 = pblock_from_var(*bias_v); + auto weight2 = pblock_from_var(*bias_v, this->engine_); this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.cc b/paddle/fluid/inference/anakin/convert/batch_norm.cc index 3e1e422aea19bc..fa7f3bd79f2807 100644 --- a/paddle/fluid/inference/anakin/convert/batch_norm.cc +++ b/paddle/fluid/inference/anakin/convert/batch_norm.cc @@ -54,25 +54,27 @@ void BatchNormOpConverter::operator()( auto *mean_v = scope.FindVar(op_desc.Input("Mean").front()); PADDLE_ENFORCE_NOT_NULL(mean_v); - auto weight1 = pblock_from_var(*mean_v); + auto weight1 = pblock_from_var(*mean_v, this->engine_); this->engine_->AddOpAttr(bn_op_name, "weight_1", *weight1); auto *variance_v = scope.FindVar(op_desc.Input("Variance").front()); PADDLE_ENFORCE_NOT_NULL(variance_v); - auto weight2 = pblock_from_var(*variance_v); + auto weight2 = + pblock_from_var(*variance_v, this->engine_); this->engine_->AddOpAttr(bn_op_name, "weight_2", *weight2); - auto *weight3 = pblock_from_vector(std::vector({1})); + auto *weight3 = pblock_from_vector( + std::vector({1}), this->engine_); this->engine_->AddOpAttr(bn_op_name, "weight_3", *weight3); auto *scale_v = scope.FindVar(op_desc.Input("Scale").front()); PADDLE_ENFORCE_NOT_NULL(scale_v); - auto scale = pblock_from_var(*scale_v); + auto scale = pblock_from_var(*scale_v, this->engine_); this->engine_->AddOpAttr(scale_op_name, "weight_1", *scale); auto *bias_v = scope.FindVar(op_desc.Input("Bias").front()); PADDLE_ENFORCE_NOT_NULL(bias_v); - auto bias = pblock_from_var(*bias_v); + auto bias = pblock_from_var(*bias_v, this->engine_); this->engine_->AddOpAttr(scale_op_name, "weight_2", *bias); } diff --git a/paddle/fluid/inference/anakin/convert/conv2d.cc b/paddle/fluid/inference/anakin/convert/conv2d.cc index 4bd380e7bb23b3..e2ea6290fab1c8 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d.cc @@ -71,8 +71,9 @@ void Conv2dOpConverter::operator()( const float int8_range = 127.; float in_scale = boost::get(op_desc.GetAttr("input_scale")); float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); - auto *weight1 = ::anakin::graph::GraphGlobalMem::Global() - .template new_block<::anakin::AK_INT8>(anakin_shape); + PBlock *weight1 = + new PBlock(anakin_shape, ::anakin::AK_INT8); + this->engine_->RegistBlock(weight1); float *weight_data = weight_tensor->data(); std::vector weight_int8; int weight_num = weight_tensor->numel(); @@ -94,7 +95,8 @@ void Conv2dOpConverter::operator()( {weight_scale / int8_range}, false); this->engine_->AddTensorScale(input_name, in_scale / int8_range); } else { - auto *weight1 = pblock_from_tensor(*weight_tensor, weight_shape); + auto *weight1 = pblock_from_tensor( + *weight_tensor, weight_shape, this->engine_); this->engine_->AddOpAttr(op_name, "weight_1", *weight1); } } diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc index a8ef73d50f2a42..a557c35475d374 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc @@ -73,8 +73,9 @@ void Conv2dFusionOpConverter::operator()( const float int8_range = 127.; float in_scale = boost::get(op_desc.GetAttr("input_scale")); float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); - auto *weight1 = ::anakin::graph::GraphGlobalMem::Global() - .template new_block<::anakin::AK_INT8>(anakin_shape); + PBlock *weight1 = + new PBlock(anakin_shape, ::anakin::AK_INT8); + this->engine_->RegistBlock(weight1); float *weight_data = weight_tensor->data(); std::vector weight_int8; int weight_num = weight_tensor->numel(); @@ -98,9 +99,10 @@ void Conv2dFusionOpConverter::operator()( } else { auto weight_tensor = tensor_from_var(*filter_v, platform::CPUPlace()); auto weight_shape = framework::vectorize2int(weight_tensor->dims()); - auto *weight1 = pblock_from_tensor(*weight_tensor, weight_shape); + auto *weight1 = pblock_from_tensor( + *weight_tensor, weight_shape, this->engine_); this->engine_->AddOpAttr(op_name, "weight_1", *weight1); - auto weight2 = pblock_from_var(*b_v); + auto weight2 = pblock_from_var(*b_v, this->engine_); this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } } diff --git a/paddle/fluid/inference/anakin/convert/dropout.cc b/paddle/fluid/inference/anakin/convert/dropout.cc index e779aca7308397..872ebaba3c0bcb 100644 --- a/paddle/fluid/inference/anakin/convert/dropout.cc +++ b/paddle/fluid/inference/anakin/convert/dropout.cc @@ -39,7 +39,8 @@ void DropoutOpConverter::operator()( auto dropout_prob = boost::get(op_desc.GetAttr("dropout_prob")); auto factor = 1 - dropout_prob; - auto *weight1 = pblock_from_vector(std::vector({factor})); + auto *weight1 = pblock_from_vector( + std::vector({factor}), this->engine_); this->engine_->AddOpAttr(op_name, "weight_1", *weight1); this->engine_->AddOpAttr(op_name, "axis", 0); diff --git a/paddle/fluid/inference/anakin/convert/fc.cc b/paddle/fluid/inference/anakin/convert/fc.cc index 10ceb2154b1b7c..04af311992210e 100644 --- a/paddle/fluid/inference/anakin/convert/fc.cc +++ b/paddle/fluid/inference/anakin/convert/fc.cc @@ -77,8 +77,9 @@ void FcBaseOpConverter::operator()( const float int8_range = 127.; float in_scale = boost::get(op_desc.GetAttr("input_scale")); float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); - auto *weight1 = ::anakin::graph::GraphGlobalMem::Global() - .template new_block<::anakin::AK_INT8>(anakin_shape); + PBlock *weight1 = + new PBlock(anakin_shape, ::anakin::AK_INT8); + this->engine_->RegistBlock(weight1); std::vector weight_int8; for (int i = 0; i < weight_num; i++) { bool is_valid_int8 = @@ -98,7 +99,8 @@ void FcBaseOpConverter::operator()( {weight_scale / int8_range}, false); this->engine_->AddTensorScale(input_name, in_scale / int8_range); } else { - auto *weight1 = pblock_from_vector(trans_weight_data); + auto *weight1 = pblock_from_vector(trans_weight_data, + this->engine_); this->engine_->AddOpAttr(op_name, "weight_1", *weight1); } @@ -106,7 +108,7 @@ void FcBaseOpConverter::operator()( if (with_bias) { auto *b_v = scope.FindVar(op_desc.Input("Bias").front()); PADDLE_ENFORCE_NOT_NULL(b_v); - auto weight2 = pblock_from_var(*b_v); + auto weight2 = pblock_from_var(*b_v, this->engine_); this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } } diff --git a/paddle/fluid/inference/anakin/convert/helper.h b/paddle/fluid/inference/anakin/convert/helper.h index 5581f7dd641c57..7b0fb211dcd8aa 100644 --- a/paddle/fluid/inference/anakin/convert/helper.h +++ b/paddle/fluid/inference/anakin/convert/helper.h @@ -20,6 +20,7 @@ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/inference/anakin/engine.h" #include "framework/core/net/net.h" #include "framework/core/types.h" @@ -29,8 +30,8 @@ using anakin::saber::Shape; using anakin::AK_FLOAT; +using anakin::AK_INT8; using anakin::PBlock; -using anakin::graph::GraphGlobalMem; namespace paddle { namespace inference { @@ -38,31 +39,34 @@ namespace anakin { std::unique_ptr tensor_from_var( const framework::Variable& var, const platform::Place& place); -template -PBlock* pblock_from_tensor(const framework::LoDTensor& tensor, - std::vector shape) { - while (shape.size() < 4) { - shape.insert(shape.begin(), 1); + +template +PBlock* pblock_from_tensor(const framework::LoDTensor& tensor, + std::vector shape_vec, + AnakinEngine* engine) { + while (shape_vec.size() < 4) { + shape_vec.insert(shape_vec.begin(), 1); } - Shape anakin_shape(shape); - auto* weight = - GraphGlobalMem::Global().template new_block(anakin_shape); + Shape shape(shape_vec); + PBlock* weight = new PBlock(shape, AK_FLOAT); + engine->RegistBlock(weight); float* cpu_data = static_cast(weight->h_tensor().mutable_data()); std::copy_n(tensor.data(), tensor.numel(), cpu_data); - weight->d_tensor().set_shape(anakin_shape); + weight->d_tensor().set_shape(shape); weight->d_tensor().copy_from(weight->h_tensor()); return weight; } -template -PBlock* pblock_from_vector(const std::vector& vec, - std::vector shape_vec) { +template +PBlock* pblock_from_vector(const std::vector& vec, + std::vector shape_vec, + AnakinEngine* engine) { while (shape_vec.size() < 4) { shape_vec.insert(shape_vec.begin(), 1); } Shape shape(shape_vec); - auto* weight = - GraphGlobalMem::Global().template new_block(shape); + PBlock* weight = new PBlock(shape, AK_FLOAT); + engine->RegistBlock(weight); auto* weight_data = static_cast(weight->h_tensor().mutable_data()); std::copy(std::begin(vec), std::end(vec), weight_data); weight->d_tensor().set_shape(shape); @@ -70,17 +74,20 @@ PBlock* pblock_from_vector(const std::vector& vec, return weight; } -template -PBlock* pblock_from_vector(const std::vector& vec) { +template +PBlock* pblock_from_vector(const std::vector& vec, + AnakinEngine* engine) { int size = vec.size(); - return pblock_from_vector(vec, std::vector({1, 1, 1, size})); + return pblock_from_vector( + vec, std::vector({1, 1, 1, size}), engine); } -template -PBlock* pblock_from_var(const framework::Variable& var) { +template +PBlock* pblock_from_var(const framework::Variable& var, + AnakinEngine* engine) { auto tensor = tensor_from_var(var, platform::CPUPlace()); auto shape = framework::vectorize2int(tensor->dims()); - return pblock_from_tensor(*tensor, shape); + return pblock_from_tensor(*tensor, shape, engine); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/engine.cc b/paddle/fluid/inference/anakin/engine.cc index 90bc9c2514c3c7..fdf2f228f59fe8 100644 --- a/paddle/fluid/inference/anakin/engine.cc +++ b/paddle/fluid/inference/anakin/engine.cc @@ -162,6 +162,12 @@ void AnakinEngine::Optimize() { PADDLE_ENFORCE(graph_->Optimize(), "Graph optimization."); } +template +void AnakinEngine::RegistBlock( + ::anakin::PBlock *block_p) { + PADDLE_ENFORCE(graph_->RegistBlock(block_p), "Block register."); +} + template std::unique_ptr> AnakinEngine::Clone() { diff --git a/paddle/fluid/inference/anakin/engine.h b/paddle/fluid/inference/anakin/engine.h index ade15537db838f..5e76331cc56b47 100644 --- a/paddle/fluid/inference/anakin/engine.h +++ b/paddle/fluid/inference/anakin/engine.h @@ -90,6 +90,7 @@ class AnakinEngine { int GetMaxBatchSize() { return max_batch_size_; } void Freeze(); void Optimize(); + void RegistBlock(::anakin::PBlock *block_p); void Save(std::string path) { graph_->save(path); } bool IsInit() { return initialized_; } int GetDevice() { return device_; } diff --git a/paddle/fluid/inference/anakin/test_anakin_engine.cc b/paddle/fluid/inference/anakin/test_anakin_engine.cc index 613481a55514f8..422f415a5db62d 100644 --- a/paddle/fluid/inference/anakin/test_anakin_engine.cc +++ b/paddle/fluid/inference/anakin/test_anakin_engine.cc @@ -19,7 +19,6 @@ limitations under the License. */ #include "paddle/fluid/inference/anakin/engine.h" -using anakin::graph::GraphGlobalMem; using anakin::AK_FLOAT; using anakin::Precision; using anakin::saber::NV; @@ -52,11 +51,9 @@ TEST_F(TestAnakinEngine, Execute) { engine_->AddOpAttr("op1", "axis", 1); std::vector shape = {1, 1, 1, 2}; Shape tmp_shape(shape); - // PBlock weight1(tmp_shape); - auto *weight1 = - GraphGlobalMem::Global().template new_block(tmp_shape); - // auto *weight1 = new PBlock(tmp_shape, AK_FLOAT); + PBlock *weight1 = new PBlock(tmp_shape, AK_FLOAT); + engine_->RegistBlock(weight1); float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); cpu_data[0] = 2.; weight1->d_tensor().set_shape(tmp_shape);