[NNAdapter][IntelOpenVINO] Init support for OpenVINO#8552
[NNAdapter][IntelOpenVINO] Init support for OpenVINO#8552hong19860320 merged 6 commits intoPaddlePaddle:developfrom
Conversation
|
Thanks for your contribution! |
40f9e35 to
da793e4
Compare
da793e4 to
73daabf
Compare
| auto mean_node = converter->ConvertToOutputNode(mean_operand); | ||
| auto variance_node = converter->ConvertToOutputNode(variance_operand); | ||
| // Create <BatchNormInference> Node for Intel OpenVINO | ||
| std::shared_ptr<Node> node = |
| auto beta_node = converter->ConvertToOutputNode(bias_operand); | ||
| auto mean_node = converter->ConvertToOutputNode(mean_operand); | ||
| auto variance_node = converter->ConvertToOutputNode(variance_operand); | ||
| // Create <BatchNormInference> Node for Intel OpenVINO |
| return output_node; | ||
| } | ||
|
|
||
| std::shared_ptr<OutputNode> Converter::ConvertToOutputNode( |
There was a problem hiding this comment.
直接用ConvertOperand就行了,不用强调output node
| core::Operation* operation) { | ||
| BATCH_NORMALIZATION_OPERATION_EXTRACT_INPUTS_OUTPUTS | ||
|
|
||
| // Convert operand to Intel OpenVINO's OutputNode |
There was a problem hiding this comment.
Convert to OpenVINO nodes
| BATCH_NORMALIZATION_OPERATION_EXTRACT_INPUTS_OUTPUTS | ||
|
|
||
| // Convert operand to Intel OpenVINO's OutputNode | ||
| auto input_node = converter->GetMappedOutputNode(input_operand); |
There was a problem hiding this comment.
未修改,OutputNode用来修饰方法,变量名用tensor代替
| std::shared_ptr<ov::Core> ov_core_{nullptr}; | ||
| std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>> | ||
| output_nodes_; | ||
| std::vector<std::shared_ptr<default_opset::Parameter>> parameter_nodes_; |
There was a problem hiding this comment.
为啥parameter_nodes_,这里不加 ov_前缀? 是不是得统一下?我建议都不加
| Context* context_{nullptr}; | ||
| std::vector<NNAdapterOperandType> input_types_; | ||
| std::vector<NNAdapterOperandType> output_types_; | ||
| std::shared_ptr<ov::Core> ov_core_{nullptr}; |
| output_nodes_; | ||
| std::vector<std::shared_ptr<default_opset::Parameter>> parameter_nodes_; | ||
| std::vector<std::shared_ptr<Node>> result_nodes_; | ||
| std::shared_ptr<ov::CompiledModel> compiled_ov_model_{nullptr}; |
|
|
||
| ElementType ConvertToOVElementType( | ||
| const NNAdapterOperandPrecisionCode& precision_code) { | ||
| std::map<NNAdapterOperandPrecisionCode, ElementType> precision_map{ |
There was a problem hiding this comment.
为啥要用map,用switch 不是更简单更快吗?
|
|
||
| #define FUNCTION_ADD_CONST_OUTPUT_NODE_DEFINE(type, element_type) \ | ||
| template <> \ | ||
| std::shared_ptr<OutputNode> AddConstOutputNode<type>( \ |
There was a problem hiding this comment.
用CreateOVConstantNode或AddOVConstantNode?
| UpdateOutputNodeMap(operand, output_node); | ||
| return output_node; | ||
| } | ||
| NNADAPTER_LOG(FATAL) << "Only constant and model input operands can be " |
There was a problem hiding this comment.
直接用OpenVINO吧,不要加Intel了,有点显得多余
| } | ||
| return Shape(ov_shape); | ||
| } | ||
| // Add const ov::Node and return ov::Output<ov::Node> |
There was a problem hiding this comment.
为啥不直接在utility.h定义
// Convert C/C++ POD types to ElementType
template
ElementType GetElementType() {
NNADAPTER_LOG(FATAL) << "Unable to convert " << typeid(T).name() << " to ElementType";
return ov::element::f32;
}
template <>
ElementType GetElementType();
template <>
ElementType GetElementType<int8_t>();
template <>
ElementType GetElementType<int16_t>();
....
template
std::shared_ptr AddConstOutputNode(
std::vector<size_t> dimensions, std::vector values) {
auto const_node = std::make_shared<default_opset::Constant>(
GetElementType(), Shape(dimensions), values);
return std::make_shared(const_node->output(0));
}
| ConvertElementwise, | ||
| "huawei_ascend_npu,huawei_kirin_npu,imagination_nna,kunlunxin_xtcl"); | ||
| "kunlunxin_xtcl,cambricon_mlu,android_nnapi,intel_openvino"); | ||
| REGISTER_CONVERTER(elementwise_max, |
558a882 to
8b5ea93
Compare
8b5ea93 to
2fae09d
Compare
| REGISTER_CONVERTER(SUB, ConvertElementwise) | ||
| REGISTER_CONVERTER(TANH, ConvertUnaryActivations) | ||
|
|
||
| #endif // LITE_BACKENDS_NNADAPTER_NNADAPTER_SRC_DRIVER_INTEL_OPENVINO_CONVERTER_ALL_H_ |
There was a problem hiding this comment.
NNADAPTER_DRIVER_INTEL_OPENVINO_CONVERTER_ALL_H
| int ConvertSoftmax(Converter* converter, core::Operation* operation) { | ||
| SOFTMAX_OPERATION_EXTRACT_INPUTS_OUTPUTS | ||
|
|
||
| // Convert operand to OpenVINO Tensor |
There was a problem hiding this comment.
Tensor 第一个字母小写,其它地方都改一下
2fae09d to
1d39f14
Compare
./lite/tools/build_linux.sh --arch=x86 --with_nnadapter=ON --nnadapter_with_intel_openvino=ON --nnadapter_intel_openvino_sdk_root=${nnadapter_intel_openvino_sdk_root}