Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion docs/cn/build_and_install/kunlunxin.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,16 @@ FastDeploy 基于 Paddle Lite 后端支持在昆仑芯 XPU 上进行部署推理
| ORT_DIRECTORY | 当开启ONNX Runtime后端时,用于指定用户本地的ONNX Runtime库路径;如果不指定,编译过程会自动下载ONNX Runtime库 |
| OPENCV_DIRECTORY | 当ENABLE_VISION=ON时,用于指定用户本地的OpenCV库路径;如果不指定,编译过程会自动下载OpenCV库 |
| OPENVINO_DIRECTORY | 当开启OpenVINO后端时, 用于指定用户本地的OpenVINO库路径;如果不指定,编译过程会自动下载OpenVINO库 |

更多编译选项请参考[FastDeploy编译选项说明](./README.md)

## 基于 Paddle Lite 的 C++ FastDeploy 库编译
- OS: Linux
- gcc/g++: version >= 8.2
- cmake: version >= 3.15

此外更推荐开发者自行安装,编译时通过`-DOPENCV_DIRECTORY`来指定环境中的OpenCV(如若不指定-DOPENCV_DIRECTORY,会自动下载FastDeploy提供的预编译的OpenCV,但在**Linux平台**无法支持Video的读取,以及imshow等可视化界面功能)

```
sudo apt-get install libopencv-dev
```
Expand All @@ -47,7 +50,7 @@ cmake -DWITH_KUNLUNXIN=ON \
-DENABLE_PADDLE_BACKEND=ON \ # 可选择开启 Paddle 后端
-DCMAKE_INSTALL_PREFIX=fastdeploy-kunlunxin \
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \ # 指定系统自带的opencv路径
..

# Build FastDeploy KunlunXin XPU C++ SDK
Expand Down
3 changes: 3 additions & 0 deletions examples/vision/detection/paddledetection/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@ target_link_libraries(infer_yolov6_demo ${FASTDEPLOY_LIBS})
add_executable(infer_yolov7_demo ${PROJECT_SOURCE_DIR}/infer_yolov7.cc)
target_link_libraries(infer_yolov7_demo ${FASTDEPLOY_LIBS})

add_executable(infer_yolov8_demo ${PROJECT_SOURCE_DIR}/infer_yolov8.cc)
target_link_libraries(infer_yolov8_demo ${FASTDEPLOY_LIBS})

add_executable(infer_rtmdet_demo ${PROJECT_SOURCE_DIR}/infer_rtmdet.cc)
target_link_libraries(infer_rtmdet_demo ${FASTDEPLOY_LIBS})

Expand Down
159 changes: 159 additions & 0 deletions examples/vision/detection/paddledetection/cpp/infer_yolov8.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/vision.h"

#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif

void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::PaddleYOLOv8(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}

auto im = cv::imread(image_file);
auto im_bak = im.clone();

fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}

std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseKunlunXin();
auto model = fastdeploy::vision::detection::PaddleYOLOv8(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}

auto im = cv::imread(image_file);
auto im_bak = im.clone();

fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}

std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

void GpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";

auto option = fastdeploy::RuntimeOption();
option.UseGpu();
auto model = fastdeploy::vision::detection::PaddleYOLOv8(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}

auto im = cv::imread(image_file);
auto im_bak = im.clone();

fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}

std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

void TrtInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";

auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UseTrtBackend();
auto model = fastdeploy::vision::detection::PaddleYOLOv8(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}

auto im = cv::imread(image_file);

fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}

std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}

if (std::atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 2){
TrtInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 3){
KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
62 changes: 62 additions & 0 deletions examples/vision/detection/paddledetection/python/infer_yolov8.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import fastdeploy as fd
import cv2
import os


def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
required=True,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()


def build_option(args):
option = fd.RuntimeOption()

if args.device.lower() == "kunlunxin":
option.use_kunlunxin()

if args.device.lower() == "gpu":
option.use_gpu()

if args.use_trt:
option.use_trt_backend()
return option


args = parse_arguments()

model_file = os.path.join(args.model_dir, "model.pdmodel")
params_file = os.path.join(args.model_dir, "model.pdiparams")
config_file = os.path.join(args.model_dir, "infer_cfg.yml")

# 配置runtime,加载模型
runtime_option = build_option(args)
model = fd.vision.detection.PaddleYOLOv8(
model_file, params_file, config_file, runtime_option=runtime_option)

# 预测图片检测结果
im = cv2.imread(args.image)
result = model.predict(im.copy())
print(result)

# 预测结果可视化
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")
37 changes: 14 additions & 23 deletions fastdeploy/pybind/runtime.cc
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,17 @@ void BindRuntime(pybind11::module& m) {
.def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend)
.def("use_lite_backend", &RuntimeOption::UseLiteBackend)
.def("set_lite_device_names", &RuntimeOption::SetLiteDeviceNames)
.def("set_lite_context_properties", &RuntimeOption::SetLiteContextProperties)
.def("set_lite_context_properties",
&RuntimeOption::SetLiteContextProperties)
.def("set_lite_model_cache_dir", &RuntimeOption::SetLiteModelCacheDir)
.def("set_lite_dynamic_shape_info", &RuntimeOption::SetLiteDynamicShapeInfo)
.def("set_lite_subgraph_partition_path", &RuntimeOption::SetLiteSubgraphPartitionPath)
.def("set_lite_mixed_precision_quantization_config_path", &RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath)
.def("set_lite_subgraph_partition_config_buffer", &RuntimeOption::SetLiteSubgraphPartitionConfigBuffer)
.def("set_lite_dynamic_shape_info",
&RuntimeOption::SetLiteDynamicShapeInfo)
.def("set_lite_subgraph_partition_path",
&RuntimeOption::SetLiteSubgraphPartitionPath)
.def("set_lite_mixed_precision_quantization_config_path",
&RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath)
.def("set_lite_subgraph_partition_config_buffer",
&RuntimeOption::SetLiteSubgraphPartitionConfigBuffer)
.def("set_paddle_mkldnn", &RuntimeOption::SetPaddleMKLDNN)
.def("set_openvino_device", &RuntimeOption::SetOpenVINODevice)
.def("set_openvino_shape_info", &RuntimeOption::SetOpenVINOShapeInfo)
Expand Down Expand Up @@ -114,21 +119,7 @@ void BindRuntime(pybind11::module& m) {
.def_readwrite("ipu_available_memory_proportion",
&RuntimeOption::ipu_available_memory_proportion)
.def_readwrite("ipu_enable_half_partial",
&RuntimeOption::ipu_enable_half_partial)
.def_readwrite("kunlunxin_l3_workspace_size",
&RuntimeOption::kunlunxin_l3_workspace_size)
.def_readwrite("kunlunxin_locked",
&RuntimeOption::kunlunxin_locked)
.def_readwrite("kunlunxin_autotune",
&RuntimeOption::kunlunxin_autotune)
.def_readwrite("kunlunxin_autotune_file",
&RuntimeOption::kunlunxin_autotune_file)
.def_readwrite("kunlunxin_precision",
&RuntimeOption::kunlunxin_precision)
.def_readwrite("kunlunxin_adaptive_seqlen",
&RuntimeOption::kunlunxin_adaptive_seqlen)
.def_readwrite("kunlunxin_enable_multi_stream",
&RuntimeOption::kunlunxin_enable_multi_stream);
&RuntimeOption::ipu_enable_half_partial);

pybind11::class_<TensorInfo>(m, "TensorInfo")
.def_readwrite("name", &TensorInfo::name)
Expand All @@ -151,9 +142,9 @@ void BindRuntime(pybind11::module& m) {
auto dtype =
NumpyDataTypeToFDDataType(warm_datas[i][j].dtype());
std::vector<int64_t> data_shape;
data_shape.insert(data_shape.begin(), warm_datas[i][j].shape(),
warm_datas[i][j].shape() +
warm_datas[i][j].ndim());
data_shape.insert(
data_shape.begin(), warm_datas[i][j].shape(),
warm_datas[i][j].shape() + warm_datas[i][j].ndim());
warm_tensors[i][j].Resize(data_shape, dtype);
memcpy(warm_tensors[i][j].MutableData(),
warm_datas[i][j].mutable_data(),
Expand Down
Loading