Skip to content

Commit 40f9e35

Browse files
committed
NNAdapter support Intel OpenVINO
1 parent a25fdaf commit 40f9e35

File tree

21 files changed

+341
-251
lines changed

21 files changed

+341
-251
lines changed

lite/backends/nnadapter/nnadapter/src/driver/huawei_ascend_npu/engine.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,11 +60,6 @@ class Program {
6060
uint32_t output_count,
6161
core::Argument* output_arguments);
6262

63-
void Init() {
64-
static InferenceEngine::Core
65-
}
66-
67-
6863
private:
6964
void Clear();
7065
int CheckInputsAndOutputs(uint32_t input_count,

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,4 +24,4 @@ set(DEPS ${NNADAPTER_OPERATIONS} ${NNADAPTER_UTILITIES} ${${DEVICE_NAME}_deps})
2424

2525
add_library(${DEVICE_NAME} SHARED ${SRCS})
2626
target_link_libraries(${DEVICE_NAME} "-Wl,--start-group" ${DEPS} "-Wl,--end-group")
27-
set(NNADAPTER_DEVICES ${NNADAPTER_DEVICES} ${DEVICE_NAME} CACHE INTERNAL "")
27+
set(NNADAPTER_DEVICES ${NNADAPTER_DEVICES} ${DEVICE_NAME} CACHE INTERNAL "")

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/converter/all.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,4 +38,4 @@ REGISTER_CONVERTER(SOFTMAX, ConvertSoftmax)
3838
REGISTER_CONVERTER(SUB, ConvertElementwise)
3939
REGISTER_CONVERTER(TANH, ConvertUnaryActivations)
4040

41-
#endif
41+
#endif // LITE_BACKENDS_NNADAPTER_NNADAPTER_SRC_DRIVER_INTEL_OPENVINO_CONVERTER_ALL_H_

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/converter/batch_normalization.cc

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,8 @@
2020
namespace nnadapter {
2121
namespace intel_openvino {
2222

23-
int ConvertBatchNormalization(Converter* converter, core::Operation* operation) {
23+
int ConvertBatchNormalization(Converter* converter,
24+
core::Operation* operation) {
2425
BATCH_NORMALIZATION_OPERATION_EXTRACT_INPUTS_OUTPUTS
2526

2627
// Convert operand to Intel OpenVINO's OutputNode
@@ -33,8 +34,13 @@ int ConvertBatchNormalization(Converter* converter, core::Operation* operation)
3334
auto mean_node = converter->ConvertToOutputNode(mean_operand);
3435
auto variance_node = converter->ConvertToOutputNode(variance_operand);
3536
// Create <BatchNormInference> Node for Intel OpenVINO
36-
std::shared_ptr<Node> node = std::make_shared<default_opset::BatchNormInference>
37-
(*input_node, *gamma_node, *beta_node, *mean_node, *variance_node, epsilon);
37+
std::shared_ptr<Node> node =
38+
std::make_shared<default_opset::BatchNormInference>(*input_node,
39+
*gamma_node,
40+
*beta_node,
41+
*mean_node,
42+
*variance_node,
43+
epsilon);
3844
auto output_node = std::make_shared<OutputNode>(node->output(0));
3945
converter->UpdateOutputNodeMap(output_operand, output_node);
4046
return NNADAPTER_NO_ERROR;

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/converter/conv2d.cc

Lines changed: 27 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
1+
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -38,7 +38,7 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
3838
&pad_width_left,
3939
&pad_width_right,
4040
stride_width,
41-
&dilation_width);
41+
&dilation_width);
4242
}
4343

4444
// Convert operand to Intel OpenVINO's OutputNode
@@ -48,21 +48,35 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
4848
}
4949
auto filter_node = converter->ConvertToOutputNode(filter_operand);
5050
auto ov_auto_pad = ConvertToOVPadType(auto_pad);
51-
auto ov_strides = ov::Strides({static_cast<size_t>(stride_height), static_cast<size_t>(stride_width)});
52-
auto ov_diliations = ov::Strides({static_cast<size_t>(dilation_height), static_cast<size_t>(dilation_width)});
53-
auto ov_pads_begin = ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_top), static_cast<std::ptrdiff_t>(pad_width_left)});
54-
auto ov_pads_end = ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_bottom), static_cast<std::ptrdiff_t>(pad_width_right)});
51+
auto ov_strides = ov::Strides(
52+
{static_cast<size_t>(stride_height), static_cast<size_t>(stride_width)});
53+
auto ov_diliations = ov::Strides({static_cast<size_t>(dilation_height),
54+
static_cast<size_t>(dilation_width)});
55+
auto ov_pads_begin =
56+
ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_top),
57+
static_cast<std::ptrdiff_t>(pad_width_left)});
58+
auto ov_pads_end =
59+
ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_bottom),
60+
static_cast<std::ptrdiff_t>(pad_width_right)});
5561
// Create <Convolution> Node for Intel OpenVINO
5662
std::shared_ptr<OutputNode> output_node{nullptr};
57-
std::shared_ptr<Node> node = std::make_shared<default_opset::Convolution>(*input_node, *filter_node, ov_strides,
58-
ov_pads_begin, ov_pads_end, ov_diliations, ov_auto_pad);
63+
std::shared_ptr<Node> node =
64+
std::make_shared<default_opset::Convolution>(*input_node,
65+
*filter_node,
66+
ov_strides,
67+
ov_pads_begin,
68+
ov_pads_end,
69+
ov_diliations,
70+
ov_auto_pad);
5971
auto conv_output_node = std::make_shared<OutputNode>(node->output(0));
6072
converter->UpdateOutputNodeMap(output_operand, conv_output_node);
6173
output_node = conv_output_node;
6274
NNADAPTER_LOG(INFO) << "Convert conv2d success";
6375
// Bias
64-
auto unsqueeze_node = converter->AddUnsqueezeOutputNode(bias_operand, std::vector<size_t>({3}), std::vector<int64_t>({0,2,3}));
65-
std::shared_ptr<Node> add_node = std::make_shared<default_opset::Add>(*conv_output_node, *unsqueeze_node);
76+
auto unsqueeze_node = converter->AddUnsqueezeOutputNode(
77+
bias_operand, std::vector<size_t>({3}), std::vector<int64_t>({0, 2, 3}));
78+
std::shared_ptr<Node> add_node =
79+
std::make_shared<default_opset::Add>(*conv_output_node, *unsqueeze_node);
6680
auto add_output_node = std::make_shared<OutputNode>(add_node->output(0));
6781
converter->UpdateOutputNodeMap(output_operand, add_output_node);
6882
output_node = add_output_node;
@@ -71,9 +85,10 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
7185
switch (fuse_code) {
7286
#define CONVERT_UNARY_ACTIVATION(type, class_name) \
7387
case NNADAPTER_FUSED_##type: { \
74-
std::shared_ptr<Node> act_node = std::make_shared<default_opset::class_name>(*output_node); \
88+
std::shared_ptr<Node> act_node = \
89+
std::make_shared<default_opset::class_name>(*output_node); \
7590
auto act_output_node = std::make_shared<OutputNode>(act_node->output(0)); \
76-
converter->UpdateOutputNodeMap(output_operand, act_output_node); \
91+
converter->UpdateOutputNodeMap(output_operand, act_output_node); \
7792
} break;
7893
CONVERT_UNARY_ACTIVATION(RELU, Relu);
7994
NNADAPTER_LOG(INFO) << " Convert conv2d-relu success!";

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/converter/converter.cc

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
1+
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -15,6 +15,7 @@
1515
#include "driver/intel_openvino/converter/converter.h"
1616
#include <unistd.h>
1717
#include <algorithm>
18+
#include <utility>
1819
#include <vector>
1920
#include "utility/debug.h"
2021
#include "utility/logging.h"
@@ -69,8 +70,8 @@ std::shared_ptr<OutputNode> Converter::UpdateOutputNodeMap(
6970
core::Operand* operand, std::shared_ptr<OutputNode> output_node) {
7071
auto it = output_nodes_->find(operand);
7172
if (it == output_nodes_->end()) {
72-
auto result = output_nodes_->insert(std::make_pair(
73-
operand, std::vector<std::shared_ptr<OutputNode>>()));
73+
auto result = output_nodes_->insert(
74+
std::make_pair(operand, std::vector<std::shared_ptr<OutputNode>>()));
7475
NNADAPTER_CHECK(result.second);
7576
it = result.first;
7677
}
@@ -87,16 +88,21 @@ std::shared_ptr<OutputNode> Converter::ConvertToOutputNode(
8788
}
8889
}
8990
if (IsConstantOperand(operand)) {
90-
auto constant_node = std::make_shared<default_opset::Constant>(ConvertToOVElementType(operand->type.precision),
91-
ConvertToOVShape(dimensions), operand->buffer);
92-
std::shared_ptr<OutputNode> output_node = std::make_shared<OutputNode>(constant_node->output(0));
91+
auto constant_node = std::make_shared<default_opset::Constant>(
92+
ConvertToOVElementType(operand->type.precision),
93+
ConvertToOVShape(dimensions),
94+
operand->buffer);
95+
std::shared_ptr<OutputNode> output_node =
96+
std::make_shared<OutputNode>(constant_node->output(0));
9397
UpdateOutputNodeMap(operand, output_node);
9498
return output_node;
9599
} else if (IsModelInputOperand(operand)) {
96-
auto parameter_node = std::make_shared<default_opset::Parameter>(ConvertToOVElementType(operand->type.precision),
97-
ConvertToOVShape(dimensions));
100+
auto parameter_node = std::make_shared<default_opset::Parameter>(
101+
ConvertToOVElementType(operand->type.precision),
102+
ConvertToOVShape(dimensions));
98103
parameter_nodes_->push_back(parameter_node);
99-
std::shared_ptr<OutputNode> output_node = std::make_shared<OutputNode>(parameter_node->output(0));
104+
std::shared_ptr<OutputNode> output_node =
105+
std::make_shared<OutputNode>(parameter_node->output(0));
100106
UpdateOutputNodeMap(operand, output_node);
101107
return output_node;
102108
}

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/converter/converter.h

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,32 +27,40 @@ class Converter {
2727
public:
2828
explicit Converter(
2929
std::vector<std::shared_ptr<default_opset::Parameter>>* paramter_nodes,
30-
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>* output_nodes) : parameter_nodes_(paramter_nodes), output_nodes_(output_nodes) {}
31-
30+
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>*
31+
output_nodes)
32+
: parameter_nodes_(paramter_nodes), output_nodes_(output_nodes) {}
33+
3234
~Converter() {}
3335

3436
// Convert a NNAdapter model to an intel openvino graph
3537
int Apply(core::Model* model);
3638

3739
// Convert a NNAdapter operand to an intel openvino OutputNode
38-
std::shared_ptr<OutputNode> ConvertToOutputNode(core::Operand* operand, std::vector<int32_t> dimensions = {});
40+
std::shared_ptr<OutputNode> ConvertToOutputNode(
41+
core::Operand* operand, std::vector<int32_t> dimensions = {});
3942

40-
std::shared_ptr<OutputNode> UpdateOutputNodeMap(core::Operand* operand, std::shared_ptr<OutputNode> output_node);
43+
std::shared_ptr<OutputNode> UpdateOutputNodeMap(
44+
core::Operand* operand, std::shared_ptr<OutputNode> output_node);
4145

4246
std::shared_ptr<OutputNode> GetMappedOutputNode(core::Operand* operand);
4347

44-
template<typename T>
45-
std::shared_ptr<OutputNode> AddUnsqueezeOutputNode(core::Operand* operand,
46-
std::vector<size_t> dimensions, std::vector<T> axes) {
47-
auto axes_node = AddConstOutputNode<T>(dimensions, axes);
48-
auto y_node = ConvertToOutputNode(operand);
49-
auto unsqueeze_node = std::make_shared<default_opset::Unsqueeze>(*y_node, *axes_node);
50-
return std::make_shared<OutputNode>(unsqueeze_node->output(0));
48+
template <typename T>
49+
std::shared_ptr<OutputNode> AddUnsqueezeOutputNode(
50+
core::Operand* operand,
51+
std::vector<size_t> dimensions,
52+
std::vector<T> axes) {
53+
auto axes_node = AddConstOutputNode<T>(dimensions, axes);
54+
auto y_node = ConvertToOutputNode(operand);
55+
auto unsqueeze_node =
56+
std::make_shared<default_opset::Unsqueeze>(*y_node, *axes_node);
57+
return std::make_shared<OutputNode>(unsqueeze_node->output(0));
5158
}
5259

5360
private:
5461
std::vector<std::shared_ptr<default_opset::Parameter>>* parameter_nodes_;
55-
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>* output_nodes_;
62+
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>*
63+
output_nodes_;
5664
};
5765

5866
} // namespace intel_openvino

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/converter/elementwise.cc

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ namespace intel_openvino {
2222

2323
int ConvertElementwise(Converter* converter, core::Operation* operation) {
2424
ELEMENTWISE_OPERATION_EXTRACT_INPUTS_OUTPUTS
25-
25+
2626
// Convert operand to Intel OpenVINO's OutputNode
2727
auto input0_node = converter->GetMappedOutputNode(input0_operand);
2828
if (!input0_node) {
@@ -35,11 +35,12 @@ int ConvertElementwise(Converter* converter, core::Operation* operation) {
3535
// Create <ElementWise> Node for Intel OpenVINO
3636
std::shared_ptr<OutputNode> output_node{nullptr};
3737
switch (operation->type) {
38-
#define CONVERT_ELEMENTWISE(type, class_name) \
39-
case NNADAPTER_##type: { \
40-
std::shared_ptr<Node> node = std::make_shared<default_opset::class_name>(*input0_node, *input1_node); \
41-
output_node = std::make_shared<OutputNode>(node->output(0)); \
42-
converter->UpdateOutputNodeMap(output_operand, output_node); \
38+
#define CONVERT_ELEMENTWISE(type, class_name) \
39+
case NNADAPTER_##type: { \
40+
std::shared_ptr<Node> node = std::make_shared<default_opset::class_name>( \
41+
*input0_node, *input1_node); \
42+
output_node = std::make_shared<OutputNode>(node->output(0)); \
43+
converter->UpdateOutputNodeMap(output_operand, output_node); \
4344
} break;
4445
CONVERT_ELEMENTWISE(ADD, Add);
4546
CONVERT_ELEMENTWISE(SUB, Subtract);
@@ -61,9 +62,10 @@ int ConvertElementwise(Converter* converter, core::Operation* operation) {
6162
switch (fuse_code) {
6263
#define CONVERT_UNARY_ACTIVATION(type, class_name) \
6364
case NNADAPTER_FUSED_##type: { \
64-
std::shared_ptr<Node> act_node = std::make_shared<default_opset::class_name>(*output_node); \
65+
std::shared_ptr<Node> act_node = \
66+
std::make_shared<default_opset::class_name>(*output_node); \
6567
auto act_output_node = std::make_shared<OutputNode>(act_node->output(0)); \
66-
converter->UpdateOutputNodeMap(output_operand, act_output_node); \
68+
converter->UpdateOutputNodeMap(output_operand, act_output_node); \
6769
} break;
6870
CONVERT_UNARY_ACTIVATION(RELU, Relu);
6971
#undef CONVERT_UNARY_ACTIVATION

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/converter/matmul.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15-
#include "operation/mat_mul.h"
1615
#include "driver/intel_openvino/converter/converter.h"
16+
#include "operation/mat_mul.h"
1717
#include "utility/debug.h"
1818
#include "utility/logging.h"
1919

@@ -33,8 +33,8 @@ int ConvertMatMul(Converter* converter, core::Operation* operation) {
3333
y_node = converter->ConvertToOutputNode(y_operand);
3434
}
3535
// Create <MatMul> Node for Intel OpenVINO
36-
std::shared_ptr<Node> node = std::make_shared<default_opset::MatMul>
37-
(*x_node, *y_node, transpose_x, transpose_y);
36+
std::shared_ptr<Node> node = std::make_shared<default_opset::MatMul>(
37+
*x_node, *y_node, transpose_x, transpose_y);
3838
auto output_node = std::make_shared<OutputNode>(node->output(0));
3939
converter->UpdateOutputNodeMap(output_operand, output_node);
4040
return NNADAPTER_NO_ERROR;

lite/backends/nnadapter/nnadapter/src/driver/intel_openvino/converter/pool2d.cc

Lines changed: 43 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -22,42 +22,68 @@ namespace intel_openvino {
2222

2323
int ConvertPool2D(Converter* converter, core::Operation* operation) {
2424
POOL_2D_OPERATION_EXTRACT_INPUTS_OUTPUTS
25-
25+
2626
// Convert operand to Intel OpenVINO's OutputNode
2727
auto input_node = converter->GetMappedOutputNode(input_operand);
2828
if (!input_node) {
2929
input_node = converter->ConvertToOutputNode(input_operand);
3030
}
3131
auto ov_auto_pad = ConvertToOVPadType(auto_pad);
32-
auto ov_pads_begin = Shape({static_cast<size_t>(pad_height_top), static_cast<size_t>(pad_width_left)});
33-
auto ov_pads_end = Shape({static_cast<size_t>(pad_height_bottom), static_cast<size_t>(pad_width_right)});
34-
auto ov_strides = ov::Strides({static_cast<size_t>(stride_height), static_cast<size_t>(stride_width)});
35-
auto ov_kernel = Shape({static_cast<size_t>(kernel_height), static_cast<size_t>(kernel_width)});
36-
auto rounding_type = ceil_mode ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR;
32+
auto ov_pads_begin = Shape({static_cast<size_t>(pad_height_top),
33+
static_cast<size_t>(pad_width_left)});
34+
auto ov_pads_end = Shape({static_cast<size_t>(pad_height_bottom),
35+
static_cast<size_t>(pad_width_right)});
36+
auto ov_strides = ov::Strides(
37+
{static_cast<size_t>(stride_height), static_cast<size_t>(stride_width)});
38+
auto ov_kernel = Shape(
39+
{static_cast<size_t>(kernel_height), static_cast<size_t>(kernel_width)});
40+
auto rounding_type =
41+
ceil_mode ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR;
3742
// Create <Pooling> Node for Intel OpenVINO
3843
std::shared_ptr<Node> node{nullptr};
3944
if (operation->type == NNADAPTER_AVERAGE_POOL_2D) {
4045
if (global_pooling) {
41-
auto axes_node = AddConstOutputNode<int64_t>({2}, std::vector<int64_t>({input_operand->type.dimensions.count - 2, input_operand->type.dimensions.count - 1}));
42-
node = std::make_shared<default_opset::ReduceMean>(*input_node, *axes_node, true);
46+
auto axes_node = AddConstOutputNode<int64_t>(
47+
{2},
48+
std::vector<int64_t>({input_operand->type.dimensions.count - 2,
49+
input_operand->type.dimensions.count - 1}));
50+
node = std::make_shared<default_opset::ReduceMean>(
51+
*input_node, *axes_node, true);
4352
} else {
44-
node = std::make_shared<default_opset::AvgPool>(*input_node, ov_strides,
45-
ov_pads_begin, ov_pads_end, ov_kernel, flag, rounding_type, ov_auto_pad);
53+
node = std::make_shared<default_opset::AvgPool>(*input_node,
54+
ov_strides,
55+
ov_pads_begin,
56+
ov_pads_end,
57+
ov_kernel,
58+
flag,
59+
rounding_type,
60+
ov_auto_pad);
4661
}
4762
} else if (operation->type == NNADAPTER_MAX_POOL_2D) {
48-
if (global_pooling) {
49-
auto axes_node = AddConstOutputNode<int64_t>({2}, std::vector<int64_t>({input_operand->type.dimensions.count - 2, input_operand->type.dimensions.count - 1}));
50-
node = std::make_shared<default_opset::ReduceMax>(*input_node, *axes_node, true);
51-
} else {
52-
node = std::make_shared<default_opset::MaxPool>(*input_node, ov_strides, ov::Strides({1, 1}), ov_pads_begin, ov_pads_end, ov_kernel, rounding_type, ov_auto_pad);
53-
}
63+
if (global_pooling) {
64+
auto axes_node = AddConstOutputNode<int64_t>(
65+
{2},
66+
std::vector<int64_t>({input_operand->type.dimensions.count - 2,
67+
input_operand->type.dimensions.count - 1}));
68+
node = std::make_shared<default_opset::ReduceMax>(
69+
*input_node, *axes_node, true);
70+
} else {
71+
node = std::make_shared<default_opset::MaxPool>(*input_node,
72+
ov_strides,
73+
ov::Strides({1, 1}),
74+
ov_pads_begin,
75+
ov_pads_end,
76+
ov_kernel,
77+
rounding_type,
78+
ov_auto_pad);
79+
}
5480
} else {
5581
NNADAPTER_LOG(FATAL) << "Unsupported pooling operation type "
5682
<< OperationTypeToString(operation->type)
5783
<< " is found.";
5884
}
5985
auto output_node = std::make_shared<OutputNode>(node->output(0));
60-
converter->UpdateOutputNodeMap(output_operand, output_node);
86+
converter->UpdateOutputNodeMap(output_operand, output_node);
6187
return NNADAPTER_NO_ERROR;
6288
}
6389

0 commit comments

Comments
 (0)