From 33d3ada6ef97c5cfdff92c701fdbab8f6aa7802f Mon Sep 17 00:00:00 2001
From: DefTruth <31974251+DefTruth@users.noreply.github.com>
Date: Mon, 15 Aug 2022 13:28:37 +0800
Subject: [PATCH 1/6] [feature][docs] add prebuilt windows python whl and cpp
libs (#113)
[feature][docs] add windows python whl and cpp libs
---
docs/compile/prebuilt_libraries.md | 10 +++++-----
docs/compile/prebuilt_wheels.md | 11 ++++++++---
2 files changed, 13 insertions(+), 8 deletions(-)
diff --git a/docs/compile/prebuilt_libraries.md b/docs/compile/prebuilt_libraries.md
index 6cec3721acd..bd58fc4b0b8 100644
--- a/docs/compile/prebuilt_libraries.md
+++ b/docs/compile/prebuilt_libraries.md
@@ -19,17 +19,17 @@ FastDeploy提供了在Windows/Linux/Mac上的预先编译CPP部署库,开发
### Windows 10 x64平台
-| 部署库下载地址 | 硬件 |
-| :------------- | :--- |
-| [comming...] | CPU |
-| [comming...] | CPU/GPU |
+| 部署库下载地址 | 硬件 | 说明 |
+| :------------- | :--- | :--- |
+| [fastdeploy-win-x64-0.2.0](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-win-x64-0.2.0.zip) | CPU | Visual Studio 16 2019 编译产出 |
+| [fastdeploy-win-x64-gpu-0.2.0](https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-win-x64-gpu-0.2.0.zip) | CPU/GPU | Visual Studio 16 2019,cuda 11.2, cudnn 8.2编译产出 |
### Linux aarch64平台
| 安装包 | 硬件 |
| :---- | :-- |
| [comming...] | CPU |
-| [comming...] | Jetson |
+| [comming...] | Jetson |
### Mac OSX平台
diff --git a/docs/compile/prebuilt_wheels.md b/docs/compile/prebuilt_wheels.md
index 14ba7d40044..e3ada892e11 100644
--- a/docs/compile/prebuilt_wheels.md
+++ b/docs/compile/prebuilt_wheels.md
@@ -38,15 +38,20 @@ python -m pip install fastdeploy_python-0.2.0-cp38-cp38-manylinux1_x86_64.whl
| CPU 安装包 | 硬件 | Python版本 |
| :---- | :-- | :------ |
-| [comming...] | CPU | 3.8 |
-| [comming...] | CPU | 3.9 |
+| [fastdeploy_python-0.2.0-cp38-cp38-win_amd64.whl](https://bj.bcebos.com/paddlehub/fastdeploy/wheels/fastdeploy_python-0.2.0-cp38-cp38-win_amd64.whl) | CPU | 3.8 |
+| [fastdeploy_python-0.2.0-cp39-cp39-win_amd64.whl](https://bj.bcebos.com/paddlehub/fastdeploy/wheels/fastdeploy_python-0.2.0-cp39-cp39-win_amd64.whl) | CPU | 3.9 |
+
+| GPU 安装包 | 硬件 | Python版本 |
+| :---- | :-- | :------ |
+| [fastdeploy_gpu_python-0.2.0-cp38-cp38-win_amd64.whl](https://bj.bcebos.com/paddlehub/fastdeploy/wheels/fastdeploy_gpu_python-0.2.0-cp38-cp38-win_amd64.whl) | CPU/GPU | 3.8 |
+| [fastdeploy_gpu_python-0.2.0-cp39-cp39-win_amd64.whl](https://bj.bcebos.com/paddlehub/fastdeploy/wheels/fastdeploy_gpu_python-0.2.0-cp39-cp39-win_amd64.whl) | CPU/GPU | 3.9 |
### Linux aarch64平台
| 安装包 | 硬件 | Python版本 |
| :---- | :-- | :------ |
| [comming...] | CPU | 3.7 |
-| [comming...] | CPU | 3.8 |
+| [comming...] | CPU | 3.8 |
| [comming...] | CPU | 3.9 |
### Mac OSX平台
From 835bb39b107779f60defe5084b72364047140227 Mon Sep 17 00:00:00 2001
From: Jack Zhou
Date: Mon, 15 Aug 2022 13:28:48 +0800
Subject: [PATCH 2/6] Add ernie ie task (#100)
---
FastDeploy.cmake.in | 4 +
csrc/fastdeploy/core/config.h.in | 8 +
.../ernie/cpp/CMakeLists.txt | 25 +++
.../information_extraction/ernie/cpp/infer.cc | 182 ++++++++++++++++++
4 files changed, 219 insertions(+)
create mode 100644 examples/text/information_extraction/ernie/cpp/CMakeLists.txt
create mode 100644 examples/text/information_extraction/ernie/cpp/infer.cc
diff --git a/FastDeploy.cmake.in b/FastDeploy.cmake.in
index 082fa30f30b..ccd1039be29 100644
--- a/FastDeploy.cmake.in
+++ b/FastDeploy.cmake.in
@@ -95,6 +95,10 @@ endif()
if (ENABLE_TEXT)
# Add dependency libs later
+ find_library(FASTER_TOKENIZER_LIB core_tokenizers ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/faster_tokenizer/lib NO_DEFAULT_PATH)
+ list(APPEND FASTDEPLOY_LIBS ${FASTER_TOKENIZER_LIB})
+ list(APPEND FASTDEPLOY_INCS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/faster_tokenizer/include)
+ list(APPEND FASTDEPLOY_INCS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/faster_tokenizer/third_party/include)
endif()
if(ENABLE_PADDLE_FRONTEND)
diff --git a/csrc/fastdeploy/core/config.h.in b/csrc/fastdeploy/core/config.h.in
index 7713925867a..b29113f1fdb 100644
--- a/csrc/fastdeploy/core/config.h.in
+++ b/csrc/fastdeploy/core/config.h.in
@@ -45,6 +45,10 @@
#cmakedefine ENABLE_VISION
#endif
+#ifndef ENABLE_TEXT
+#cmakedefine ENABLE_TEXT
+#endif
+
#ifndef ENABLE_OPENCV_CUDA
#cmakedefine ENABLE_OPENCV_CUDA
#endif
@@ -52,3 +56,7 @@
#ifndef ENABLE_VISION_VISUALIZE
#cmakedefine ENABLE_VISION_VISUALIZE
#endif
+
+#ifndef ENABLE_FDTENSOR_FUNC
+#cmakedefine ENABLE_FDTENSOR_FUNC
+#endif
diff --git a/examples/text/information_extraction/ernie/cpp/CMakeLists.txt b/examples/text/information_extraction/ernie/cpp/CMakeLists.txt
new file mode 100644
index 00000000000..1189820cb79
--- /dev/null
+++ b/examples/text/information_extraction/ernie/cpp/CMakeLists.txt
@@ -0,0 +1,25 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.12)
+
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+include_directories(${FASTDEPLOY_INCS})
+
+add_executable(infer_ernie_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+target_link_libraries(infer_ernie_demo ${FASTDEPLOY_LIBS})
diff --git a/examples/text/information_extraction/ernie/cpp/infer.cc b/examples/text/information_extraction/ernie/cpp/infer.cc
new file mode 100644
index 00000000000..7f3b9318664
--- /dev/null
+++ b/examples/text/information_extraction/ernie/cpp/infer.cc
@@ -0,0 +1,182 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include
+#include
+
+#include "fastdeploy/function/reduce.h"
+#include "fastdeploy/function/softmax.h"
+#include "fastdeploy/text.h"
+#include "tokenizers/ernie_faster_tokenizer.h"
+
+using namespace paddlenlp;
+
+void LoadTransitionFromFile(const std::string& file,
+ std::vector* transitions, int* num_tags) {
+ std::ifstream fin(file);
+ std::string curr_transition;
+ float transition;
+ int i = 0;
+ while (fin) {
+ std::getline(fin, curr_transition);
+ std::istringstream iss(curr_transition);
+ while (iss) {
+ iss >> transition;
+ transitions->push_back(transition);
+ }
+ if (curr_transition != "") {
+ ++i;
+ }
+ }
+ *num_tags = i;
+}
+
+template
+void ViterbiDecode(const fastdeploy::FDTensor& slot_logits,
+ const fastdeploy::FDTensor& trans,
+ fastdeploy::FDTensor* best_path) {
+ int batch_size = slot_logits.shape[0];
+ int seq_len = slot_logits.shape[1];
+ int num_tags = slot_logits.shape[2];
+ best_path->Allocate({batch_size, seq_len}, fastdeploy::FDDataType::INT64);
+
+ const T* slot_logits_ptr = reinterpret_cast(slot_logits.Data());
+ const T* trans_ptr = reinterpret_cast(trans.Data());
+ int64_t* best_path_ptr = reinterpret_cast(best_path->Data());
+ std::vector scores(num_tags);
+ std::copy(slot_logits_ptr, slot_logits_ptr + num_tags, scores.begin());
+ std::vector> M(num_tags, std::vector(num_tags));
+ for (int b = 0; b < batch_size; ++b) {
+ std::vector> paths;
+ const T* curr_slot_logits_ptr = slot_logits_ptr + b * seq_len * num_tags;
+ int64_t* curr_best_path_ptr = best_path_ptr + b * seq_len;
+ for (int t = 1; t < seq_len; t++) {
+ for (size_t i = 0; i < num_tags; i++) {
+ for (size_t j = 0; j < num_tags; j++) {
+ auto trans_idx = i * num_tags * num_tags + j * num_tags;
+ auto slot_logit_idx = t * num_tags + j;
+ M[i][j] = scores[i] + trans_ptr[trans_idx] +
+ curr_slot_logits_ptr[slot_logit_idx];
+ }
+ }
+ std::vector idxs;
+ for (size_t i = 0; i < num_tags; i++) {
+ T max = 0.0f;
+ int idx = 0;
+ for (size_t j = 0; j < num_tags; j++) {
+ if (M[j][i] > max) {
+ max = M[j][i];
+ idx = j;
+ }
+ }
+ scores[i] = max;
+ idxs.push_back(idx);
+ }
+ paths.push_back(idxs);
+ }
+ int scores_max_index = 0;
+ float scores_max = 0.0f;
+ for (size_t i = 0; i < scores.size(); i++) {
+ if (scores[i] > scores_max) {
+ scores_max = scores[i];
+ scores_max_index = i;
+ }
+ }
+ curr_best_path_ptr[seq_len - 1] = scores_max_index;
+ for (int i = seq_len - 2; i >= 0; i--) {
+ int index = curr_best_path_ptr[i + 1];
+ curr_best_path_ptr[i] = paths[i][index];
+ }
+ }
+}
+
+int main() {
+ // 1. Define a ernie faster tokenizer
+ faster_tokenizer::tokenizers_impl::ErnieFasterTokenizer tokenizer(
+ "ernie_vocab.txt");
+ std::vector strings_list = {
+ "导航去科技园二号楼", "屏幕亮度为我减小一点吧"};
+ std::vector encodings;
+ tokenizer.EncodeBatchStrings(strings_list, &encodings);
+ size_t batch_size = strings_list.size();
+ size_t seq_len = encodings[0].GetLen();
+ for (auto&& encoding : encodings) {
+ std::cout << encoding.DebugString() << std::endl;
+ }
+ // 2. Initialize runtime
+ fastdeploy::RuntimeOption runtime_option;
+ runtime_option.SetModelPath("nano_static/model.pdmodel",
+ "nano_static/model.pdiparams");
+ fastdeploy::Runtime runtime;
+ runtime.Init(runtime_option);
+
+ // 3. Construct input vector
+ // 3.1 Convert encodings to input_ids, token_type_ids
+ std::vector input_ids, token_type_ids;
+ for (int i = 0; i < encodings.size(); ++i) {
+ auto&& curr_input_ids = encodings[i].GetIds();
+ auto&& curr_type_ids = encodings[i].GetTypeIds();
+ input_ids.insert(input_ids.end(), curr_input_ids.begin(),
+ curr_input_ids.end());
+ token_type_ids.insert(token_type_ids.end(), curr_type_ids.begin(),
+ curr_type_ids.end());
+ }
+ // 3.2 Set data to input vector
+ std::vector inputs(runtime.NumInputs());
+ void* inputs_ptrs[] = {input_ids.data(), token_type_ids.data()};
+ for (int i = 0; i < runtime.NumInputs(); ++i) {
+ inputs[i].SetExternalData({batch_size, seq_len},
+ fastdeploy::FDDataType::INT64, inputs_ptrs[i]);
+ inputs[i].name = runtime.GetInputInfo(i).name;
+ }
+
+ // 4. Infer
+ std::vector outputs(runtime.NumOutputs());
+ runtime.Infer(inputs, &outputs);
+
+ // 5. Postprocess
+ fastdeploy::FDTensor domain_probs, intent_probs;
+ fastdeploy::Softmax(outputs[0], &domain_probs);
+ fastdeploy::Softmax(outputs[1], &intent_probs);
+
+ fastdeploy::FDTensor domain_max_probs, intent_max_probs;
+ fastdeploy::Max(domain_probs, &domain_max_probs, {-1}, true);
+ fastdeploy::Max(intent_probs, &intent_max_probs, {-1}, true);
+
+ std::vector transition;
+ int num_tags;
+ LoadTransitionFromFile("joint_transition.txt", &transition, &num_tags);
+ fastdeploy::FDTensor trans;
+ trans.SetExternalData({num_tags, num_tags}, fastdeploy::FDDataType::FP32,
+ transition.data());
+
+ fastdeploy::FDTensor best_path;
+ ViterbiDecode(outputs[2], trans, &best_path);
+ // 6. Print result
+ domain_max_probs.PrintInfo();
+ intent_max_probs.PrintInfo();
+
+ batch_size = best_path.shape[0];
+ seq_len = best_path.shape[1];
+ const int64_t* best_path_ptr =
+ reinterpret_cast(best_path.Data());
+ for (int i = 0; i < batch_size; ++i) {
+ std::cout << "best_path[" << i << "] = ";
+ for (int j = 0; j < seq_len; ++j) {
+ std::cout << best_path_ptr[i * seq_len + j] << ", ";
+ }
+ std::cout << std::endl;
+ }
+ best_path.PrintInfo();
+ return 0;
+}
From 773d6bb938b77b8753feca11ed8607bf480860c3 Mon Sep 17 00:00:00 2001
From: ziqi-jin <67993288+ziqi-jin@users.noreply.github.com>
Date: Mon, 15 Aug 2022 13:29:23 +0800
Subject: [PATCH 3/6] Modify API Result Docs (#112)
* first commit for yolov7
* pybind for yolov7
* CPP README.md
* CPP README.md
* modified yolov7.cc
* README.md
* python file modify
* delete license in fastdeploy/
* repush the conflict part
* README.md modified
* README.md modified
* file path modified
* file path modified
* file path modified
* file path modified
* file path modified
* README modified
* README modified
* move some helpers to private
* add examples for yolov7
* api.md modified
* api.md modified
* api.md modified
* YOLOv7
* yolov7 release link
* yolov7 release link
* yolov7 release link
* copyright
* change some helpers to private
* change variables to const and fix documents.
* gitignore
* Transfer some funtions to private member of class
* Transfer some funtions to private member of class
* Merge from develop (#9)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* first commit for yolor
* for merge
* Develop (#11)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* Yolor (#16)
* Develop (#11) (#12)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* Develop (#13)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* Develop (#14)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason <928090362@qq.com>
* add is_dynamic for YOLO series (#22)
* first commit test photo
* yolov7 doc
* yolov7 doc
* yolov7 doc
* yolov7 doc
* add yolov5 docs
* modify yolov5 doc
* first commit for retinaface
* first commit for retinaface
* firt commit for ultraface
* firt commit for ultraface
* firt commit for yolov5face
* firt commit for modnet and arcface
* firt commit for modnet and arcface
* first commit for partial_fc
* first commit for partial_fc
* first commit for yolox
* first commit for yolov6
* first commit for nano_det
* first commit for scrfd
* first commit for scrfd
* first commit for retinaface
* first commit for ultraface
* first commit for yolov5face
* first commit for yolox yolov6 nano
* rm jpg
* first commit for modnet and modify nano
* yolor scaledyolov4 v5lite
* first commit for insightface
* first commit for insightface
* first commit for insightface
* docs
* docs
* docs
* docs
* docs
* add print for detect and modify docs
* docs
* docs
* docs
* docs test for insightface
* docs test for insightface again
* docs test for insightface
* modify all wrong expressions in docs
* modify all wrong expressions in docs
* modify all wrong expressions in docs
* modify all wrong expressions in docs
* modify docs expressions
* fix expression of detection part
* fix expression of detection part
* fix expression of detection part
* add face recognition result doc
* modify result docs
* modify result docs
* modify result docs
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason <928090362@qq.com>
---
docs/api/vision_results/README.md | 5 ++--
.../vision_results/classification_result.md | 4 ++--
docs/api/vision_results/detection_result.md | 6 ++---
.../vision_results/face_detection_result.md | 8 +++----
.../vision_results/face_recognition_result.md | 24 +++++++++++++++++++
docs/api/vision_results/matting_result.md | 10 ++++----
examples/vision/README.md | 1 +
7 files changed, 42 insertions(+), 16 deletions(-)
create mode 100644 docs/api/vision_results/face_recognition_result.md
diff --git a/docs/api/vision_results/README.md b/docs/api/vision_results/README.md
index 844388cca86..64ea4fc671b 100644
--- a/docs/api/vision_results/README.md
+++ b/docs/api/vision_results/README.md
@@ -6,5 +6,6 @@ FastDeploy根据视觉模型的任务类型,定义了不同的结构体(`csrcs
| :----- | :--- | :---- | :------- |
| ClassificationResult | [C++/Python文档](./classification_result.md) | 图像分类返回结果 | ResNet50、MobileNetV3等 |
| DetectionResult | [C++/Python文档](./detection_result.md) | 目标检测返回结果 | PPYOLOE、YOLOv7系列模型等 |
-| FaceDetectionResult | [C++/Python文档](./face_detection_result.md) | 目标检测返回结果 | PPYOLOE、YOLOv7系列模型等 |
-| MattingResult | [C++/Python文档](./matting_result.md) | 目标检测返回结果 | PPYOLOE、YOLOv7系列模型等 |
+| FaceDetectionResult | [C++/Python文档](./face_detection_result.md) | 目标检测返回结果 | SCRFD、RetinaFace系列模型等 |
+| FaceRecognitionResult | [C++/Python文档](./face_recognition_result.md) | 目标检测返回结果 | ArcFace、CosFace系列模型等 |
+| MattingResult | [C++/Python文档](./matting_result.md) | 目标检测返回结果 | MODNet系列模型等 |
diff --git a/docs/api/vision_results/classification_result.md b/docs/api/vision_results/classification_result.md
index 113db39608a..bf94d0ff159 100644
--- a/docs/api/vision_results/classification_result.md
+++ b/docs/api/vision_results/classification_result.md
@@ -2,7 +2,7 @@
ClassifyResult代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用于表明图像的分类结果和置信度。
-## C++ 结构体
+## C++ 定义
`fastdeploy::vision::ClassifyResult`
@@ -20,7 +20,7 @@ struct ClassifyResult {
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数,将结构体中的信息以字符串形式输出(用于Debug)
-## Python结构体
+## Python 定义
`fastdeploy.vision.ClassifyResult`
diff --git a/docs/api/vision_results/detection_result.md b/docs/api/vision_results/detection_result.md
index e44a27b34c3..a702d49899f 100644
--- a/docs/api/vision_results/detection_result.md
+++ b/docs/api/vision_results/detection_result.md
@@ -2,7 +2,7 @@
DetectionResult代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用于表明图像检测出来的目标框、目标类别和目标置信度。
-## C++ 结构体
+## C++ 定义
`fastdeploy::vision::DetectionResult`
@@ -22,10 +22,10 @@ struct DetectionResult {
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数,将结构体中的信息以字符串形式输出(用于Debug)
-## Python结构体
+## Python 定义
`fastdeploy.vision.DetectionResult`
- **boxes**(list of list(float)): 成员变量,表示单张图片检测出来的所有目标框坐标。boxes是一个list,其每个元素为一个长度为4的list, 表示为一个框,每个框以4个float数值依次表示xmin, ymin, xmax, ymax, 即左上角和右下角坐标
- **scores**(list of float): 成员变量,表示单张图片检测出来的所有目标置信度
-- **label_ids(list of int): 成员变量,表示单张图片检测出来的所有目标类别
+- **label_ids**(list of int): 成员变量,表示单张图片检测出来的所有目标类别
diff --git a/docs/api/vision_results/face_detection_result.md b/docs/api/vision_results/face_detection_result.md
index 6c9c09f0073..000b42a6be0 100644
--- a/docs/api/vision_results/face_detection_result.md
+++ b/docs/api/vision_results/face_detection_result.md
@@ -1,8 +1,8 @@
# FaceDetectionResult 人脸检测结果
-FaceDetectionResult 代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用于表明图像检测出来的目标框、目标类别和目标置信度。
+FaceDetectionResult 代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用于表明人脸检测出来的目标框、人脸landmarks,目标置信度和每张人脸的landmark数量。
-## C++ 结构体
+## C++ 定义
`fastdeploy::vision::FaceDetectionResult`
@@ -11,7 +11,6 @@ struct FaceDetectionResult {
std::vector> boxes;
std::vector> landmarks;
std::vector scores;
- ResultType type = ResultType::FACE_DETECTION;
int landmarks_per_face;
void Clear();
std::string Str();
@@ -25,10 +24,11 @@ struct FaceDetectionResult {
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数,将结构体中的信息以字符串形式输出(用于Debug)
-## Python结构体
+## Python 定义
`fastdeploy.vision.FaceDetectionResult`
- **boxes**(list of list(float)): 成员变量,表示单张图片检测出来的所有目标框坐标。boxes是一个list,其每个元素为一个长度为4的list, 表示为一个框,每个框以4个float数值依次表示xmin, ymin, xmax, ymax, 即左上角和右下角坐标
- **scores**(list of float): 成员变量,表示单张图片检测出来的所有目标置信度
- **landmarks**: 成员变量,表示单张图片检测出来的所有人脸的关键点
+- **landmarks_per_face**: 成员变量,表示每个人脸框中的关键点的数量。
diff --git a/docs/api/vision_results/face_recognition_result.md b/docs/api/vision_results/face_recognition_result.md
new file mode 100644
index 00000000000..83160561843
--- /dev/null
+++ b/docs/api/vision_results/face_recognition_result.md
@@ -0,0 +1,24 @@
+# FaceRecognitionResult 人脸识别结果
+
+FaceRecognitionResult 代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用于表明人脸识别模型对图像特征的embedding。
+## C++ 定义
+
+`fastdeploy::vision::FaceRecognitionResult`
+
+```
+struct FaceRecognitionResult {
+ std::vector embedding;
+ void Clear();
+ std::string Str();
+};
+```
+
+- **embedding**: 成员变量,表示人脸识别模型最终的提取的特征embedding,可以用来计算人脸之间的特征相似度。
+- **Clear()**: 成员函数,用于清除结构体中存储的结果
+- **Str()**: 成员函数,将结构体中的信息以字符串形式输出(用于Debug)
+
+## Python 定义
+
+`fastdeploy.vision.FaceRecognitionResult`
+
+- **embedding**: 成员变量,表示人脸识别模型最终提取的特征embedding,可以用来计算人脸之间的特征相似度。
diff --git a/docs/api/vision_results/matting_result.md b/docs/api/vision_results/matting_result.md
index 3418400ecaa..67bcbc79d21 100644
--- a/docs/api/vision_results/matting_result.md
+++ b/docs/api/vision_results/matting_result.md
@@ -1,15 +1,15 @@
# MattingResult 抠图结果
-MattingResult 代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用于表明图像检测出来的目标框、目标类别和目标置信度。
+MattingResult 代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用于表明模型预测的alpha透明度的值,预测的前景等。
-## C++ 结构体
+## C++ 定义
`fastdeploy::vision::MattingResult`
```
struct MattingResult {
- std::vector alpha; // h x w
- std::vector foreground; // h x w x c (c=3 default)
+ std::vector alpha;
+ std::vector foreground;
std::vector shape;
bool contain_foreground = false;
void Clear();
@@ -25,7 +25,7 @@ struct MattingResult {
- **Str()**: 成员函数,将结构体中的信息以字符串形式输出(用于Debug)
-## Python结构体
+## Python 定义
`fastdeploy.vision.MattingResult`
diff --git a/examples/vision/README.md b/examples/vision/README.md
index 9f05d2d7f6d..d95a315d798 100644
--- a/examples/vision/README.md
+++ b/examples/vision/README.md
@@ -8,6 +8,7 @@
| Segmentation | 语义分割,输入图像,给出图像中每个像素的分类及置信度 | [SegmentationResult](../../docs/api/vision_results/segmentation_result.md) |
| Classification | 图像分类,输入图像,给出图像的分类结果和置信度 | [ClassifyResult](../../docs/api/vision_results/classification_result.md) |
| FaceDetection | 人脸检测,输入图像,检测图像中人脸位置,并返回检测框坐标及人脸关键点 | [FaceDetectionResult](../../docs/api/vision_results/face_detection_result.md) |
+| FaceRecognition | 人脸识别,输入图像,返回可用于相似度计算的人脸特征的embedding | [FaceRecognitionResult](../../docs/api/vision_results/face_recognition_result.md) |
| Matting | 抠图,输入图像,返回图片的前景每个像素点的Alpha值 | [MattingResult](../../docs/api/vision_results/matting_result.md) |
## FastDeploy API设计
From a016ef99ceb57b461930b837b534317af567f3fd Mon Sep 17 00:00:00 2001
From: huangjianhui <852142024@qq.com>
Date: Mon, 15 Aug 2022 15:24:38 +0800
Subject: [PATCH 4/6] Add PaddleSeg doc and infer.cc demo (#114)
* Update README.md
* Update README.md
* Update README.md
* Create README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Add evaluation calculate time and fix some bugs
* Update classification __init__
* Move to ppseg
* Add segmentation doc
* Add PaddleClas infer.py
* Update PaddleClas infer.py
* Delete .infer.py.swp
* Add PaddleClas infer.cc
* Update README.md
* Update README.md
* Update README.md
* Update infer.py
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Add PaddleSeg doc and infer.cc demo
* Update README.md
* Update README.md
* Update README.md
Co-authored-by: Jason
---
.../vision/segmentation/ppseg/model.cc | 2 +-
.../classification/paddleclas/README.md | 2 +-
.../paddleclas/python/README.md | 15 ++--
.../detection/paddledetection/cpp/README.md | 2 +-
.../vision/segmentation/paddleseg/README.md | 58 +++++-------
.../segmentation/paddleseg/cpp/README.md | 62 +++++++------
.../segmentation/paddleseg/cpp/infer.cc | 89 +++++++++++--------
.../segmentation/paddleseg/python/README.md | 58 ++++++------
.../segmentation/paddleseg/python/infer.py | 19 ++--
.../vision/segmentation/ppseg/__init__.py | 4 +-
10 files changed, 160 insertions(+), 151 deletions(-)
diff --git a/csrc/fastdeploy/vision/segmentation/ppseg/model.cc b/csrc/fastdeploy/vision/segmentation/ppseg/model.cc
index be5c241f7d3..9604d665c13 100644
--- a/csrc/fastdeploy/vision/segmentation/ppseg/model.cc
+++ b/csrc/fastdeploy/vision/segmentation/ppseg/model.cc
@@ -14,7 +14,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
const Frontend& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
- valid_gpu_backends = {Backend::PDINFER, Backend::ORT};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;
runtime_option.model_file = model_file;
diff --git a/examples/vision/classification/paddleclas/README.md b/examples/vision/classification/paddleclas/README.md
index 1f837761645..9a62f07f0ba 100644
--- a/examples/vision/classification/paddleclas/README.md
+++ b/examples/vision/classification/paddleclas/README.md
@@ -21,7 +21,7 @@
PaddleClas模型导出,请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/inference_deployment/export_model.md#2-%E5%88%86%E7%B1%BB%E6%A8%A1%E5%9E%8B%E5%AF%BC%E5%87%BA)
-注意:PaddleClas导出的模型仅包含`inference.pdmodel`和`inference.pdiparams`两个文档,但为了满足部署的需求,同时也需准备其提供的通用[inference_cls.yaml](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/deploy/configs/inference_cls.yaml)文件,FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息,开发者可直接下载此文件使用。但需根据自己的需求修改yaml文件中的配置参数,具体可比照PaddleClas模型训练[config](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.4/ppcls/configs/ImageNet)中的infer部分的配置信息进行修改。
+注意:PaddleClas导出的模型仅包含`inference.pdmodel`和`inference.pdiparams`两个文件,但为了满足部署的需求,同时也需准备其提供的通用[inference_cls.yaml](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/deploy/configs/inference_cls.yaml)文件,FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息,开发者可直接下载此文件使用。但需根据自己的需求修改yaml文件中的配置参数,具体可比照PaddleClas模型训练[config](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.4/ppcls/configs/ImageNet)中的infer部分的配置信息进行修改。
## 下载预训练模型
diff --git a/examples/vision/classification/paddleclas/python/README.md b/examples/vision/classification/paddleclas/python/README.md
index c6bb35d2ee5..77ad787973f 100644
--- a/examples/vision/classification/paddleclas/python/README.md
+++ b/examples/vision/classification/paddleclas/python/README.md
@@ -8,22 +8,21 @@
本目录下提供`infer.py`快速完成ResNet50_vd在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
```
+#下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/classification/paddleclas/python
+
# 下载ResNet50_vd模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
tar -xvf ResNet50_vd_infer.tgz
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
-
-#下载部署示例代码
-git clone https://github.com/PaddlePaddle/FastDeploy.git
-cd examples/vision/classification/paddleclas/python
-
# CPU推理
-python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device cpu
+python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device cpu --topk 1
# GPU推理
-python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu
+python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --topk 1
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待)
-python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --use_trt True
+python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --use_trt True --topk 1
```
运行完成后返回结果如下所示
diff --git a/examples/vision/detection/paddledetection/cpp/README.md b/examples/vision/detection/paddledetection/cpp/README.md
index 4ed39929b85..64e2dc99734 100644
--- a/examples/vision/detection/paddledetection/cpp/README.md
+++ b/examples/vision/detection/paddledetection/cpp/README.md
@@ -15,7 +15,7 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/libs/0.2.0/fastdeploy-linux-x64-
tar xvf fastdeploy-linux-x64-gpu-0.2.0.tgz
cd fastdeploy-linux-x64-gpu-0.2.0/examples/vision/detection/paddledetection
mkdir build && cd build
-cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/../../../../../../fastdeploy-linux-x64-gpu-0.2.0
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/../../../../../../../fastdeploy-linux-x64-gpu-0.2.0
make -j
# 下载PPYOLOE模型文件和测试图片
diff --git a/examples/vision/segmentation/paddleseg/README.md b/examples/vision/segmentation/paddleseg/README.md
index 1f837761645..413e10b8a2c 100644
--- a/examples/vision/segmentation/paddleseg/README.md
+++ b/examples/vision/segmentation/paddleseg/README.md
@@ -1,54 +1,36 @@
-# PaddleClas 模型部署
+# PaddleSeg 模型部署
## 模型版本说明
-- [PaddleClas Release/2.4](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.4)
+- [PaddleSeg Release/2.6](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.6)
目前FastDeploy支持如下模型的部署
-- [PP-LCNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/PP-LCNet.md)
-- [PP-LCNetV2系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/PP-LCNetV2.md)
-- [EfficientNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/EfficientNet_and_ResNeXt101_wsl.md)
-- [GhostNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Mobile.md)
-- [MobileNet系列模型(包含v1,v2,v3)](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Mobile.md)
-- [ShuffleNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Mobile.md)
-- [SqueezeNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Others.md)
-- [Inception系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Inception.md)
-- [PP-HGNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/PP-HGNet.md)
-- [ResNet系列模型(包含vd系列)](https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/ResNet_and_vd.md)
+- [U-Net系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/unet/README.md)
+- [PP-LiteSeg系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/pp_liteseg/README.md)
+- [PP-HumanSeg系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/contrib/PP-HumanSeg/README.md)
+- [FCN系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/fcn/README.md)
+- [DeepLabV3系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/deeplabv3/README.md)
-## 准备PaddleClas部署模型
+## 准备PaddleSeg部署模型
-PaddleClas模型导出,请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/inference_deployment/export_model.md#2-%E5%88%86%E7%B1%BB%E6%A8%A1%E5%9E%8B%E5%AF%BC%E5%87%BA)
+PaddleSeg模型导出,请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/docs/model_export_cn.md)
-注意:PaddleClas导出的模型仅包含`inference.pdmodel`和`inference.pdiparams`两个文档,但为了满足部署的需求,同时也需准备其提供的通用[inference_cls.yaml](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/deploy/configs/inference_cls.yaml)文件,FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息,开发者可直接下载此文件使用。但需根据自己的需求修改yaml文件中的配置参数,具体可比照PaddleClas模型训练[config](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.4/ppcls/configs/ImageNet)中的infer部分的配置信息进行修改。
+注意:在使用PaddleSeg模型导出时,可指定`--input_shape`参数,若预测输入图片尺寸并不固定,建议使用默认值即不指定该参数。PaddleSeg导出的模型包含`model.pdmodel`、`model.pdiparams`和`deploy.yaml`三个文件,FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息。
## 下载预训练模型
-为了方便开发者的测试,下面提供了PaddleClas导出的部分模型(含inference_cls.yaml文件),开发者可直接下载使用。
-
-| 模型 | 参数文件大小 |输入Shape | Top1 | Top5 |
-|:---------------------------------------------------------------- |:----- |:----- | :----- | :----- |
-| [PPLCNet_x1_0](https://bj.bcebos.com/paddlehub/fastdeploy/PPLCNet_x1_0_infer.tgz) | 12MB | 224x224 |71.32% | 90.03% |
-| [PPLCNetV2_base](https://bj.bcebos.com/paddlehub/fastdeploy/PPLCNetV2_base_infer.tgz) | 26MB | 224x224 |77.04% | 93.27% |
-| [EfficientNetB7](https://bj.bcebos.com/paddlehub/fastdeploy/EfficientNetB7_infer.tgz) | 255MB | 600x600 | 84.3% | 96.9% |
-| [EfficientNetB0_small](https://bj.bcebos.com/paddlehub/fastdeploy/EfficientNetB0_small_infer.tgz)| 18MB | 224x224 | 75.8% | 75.8% |
-| [GhostNet_x1_3_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/GhostNet_x1_3_ssld_infer.tgz) | 29MB | 224x224 | 75.7% | 92.5% |
-| [GhostNet_x0_5_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/GhostNet_x0_5_infer.tgz) | 10MB | 224x224 | 66.8% | 86.9% |
-| [MobileNetV1_x0_25](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_x0_25_infer.tgz) | 1.9MB | 224x224 | 51.4% | 75.5% |
-| [MobileNetV1_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_ssld_infer.tgz) | 17MB | 224x224 | 77.9% | 93.9% |
-| [MobileNetV2_x0_25](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV2_x0_25_infer.tgz) | 5.9MB | 224x224 | 53.2% | 76.5% |
-| [MobileNetV2_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV2_ssld_infer.tgz) | 14MB | 224x224 | 76.74% | 93.39% |
-| [MobileNetV3_small_x0_35_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV3_small_x0_35_ssld_infer.tgz) | 6.4MB | 224x224 | 55.55% | 77.71% |
-| [MobileNetV3_large_x1_0_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV3_large_x1_0_ssld_infer.tgz) | 22MB | 224x224 | 78.96% | 94.48% |
-| [ShuffleNetV2_x0_25](https://bj.bcebos.com/paddlehub/fastdeploy/ShuffleNetV2_x0_25_infer.tgz) | 2.4MB | 224x224 | 49.9% | 73.79% |
-| [ShuffleNetV2_x2_0](https://bj.bcebos.com/paddlehub/fastdeploy/ShuffleNetV2_x2_0_infer.tgz) | 29MB | 224x224 | 73.15% | 91.2% |
-| [SqueezeNet1_1](https://bj.bcebos.com/paddlehub/fastdeploy/SqueezeNet1_1_infer.tgz) | 4.8MB | 224x224 | 60.1% | 81.9% |
-| [InceptionV3](https://bj.bcebos.com/paddlehub/fastdeploy/InceptionV3_infer.tgz) | 92MB | 299x299 | 79.14% | 94.59% |
-| [PPHGNet_tiny_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/PPHGNet_tiny_ssld_infer.tgz) | 57MB | 224x224 | 81.95% | 96.12% |
-| [PPHGNet_base_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/PPHGNet_base_ssld_infer.tgz) | 274MB | 224x224 | 85.0% | 97.35% |
-| [ResNet50_vd](https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz) | 98MB | 224x224 | 79.12% | 94.44% |
+为了方便开发者的测试,下面提供了PaddleSeg导出的部分模型(导出方式为:**不指定**`input_shape`和`with_softmax`,**指定**`without_argmax`),开发者可直接下载使用。
+
+| 模型 | 参数文件大小 |输入Shape | mIoU | mIoU (flip) | mIoU (ms+flip) |
+|:---------------------------------------------------------------- |:----- |:----- | :----- | :----- | :----- |
+| [Unet-cityscapes](https://bj.bcebos.com/paddlehub/fastdeploy/Unet_cityscapes_without_argmax_infer.tgz) | 52MB | 1024x512 | 65.00% | 66.02% | 66.89% |
+| [PP-LiteSeg-T(STDC1)-cityscapes](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_T_STDC1_cityscapes_without_argmax_infer.tgz) | 31MB | 1024x512 |73.10% | 73.89% | - |
+| [PP-HumanSegV1-Lite](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Lite_infer.tgz) | 543KB | 192x192 | 86.2% | - | - |
+| [PP-HumanSegV1-Server](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Server_infer.tgz) | 103MB | 512x512 | 96.47% | - | - |
+| [FCN-HRNet-W18-cityscapes](https://bj.bcebos.com/paddlehub/fastdeploy/FCN_HRNet_W18_cityscapes_without_argmax_infer.tgz) | 37MB | 1024x512 | 78.97% | 79.49% | 79.74% |
+| [Deeplabv3-ResNet50-OS8-cityscapes](https://bj.bcebos.com/paddlehub/fastdeploy/Deeplabv3_ResNet50_OS8_cityscapes_without_argmax_infer.tgz) | 150MB | 1024x512 | 79.90% | 80.22% | 80.47% |
## 详细部署文档
diff --git a/examples/vision/segmentation/paddleseg/cpp/README.md b/examples/vision/segmentation/paddleseg/cpp/README.md
index 302f3f74f36..8e5a9627b16 100644
--- a/examples/vision/segmentation/paddleseg/cpp/README.md
+++ b/examples/vision/segmentation/paddleseg/cpp/README.md
@@ -1,6 +1,6 @@
-# YOLOv7 C++部署示例
+# PaddleSeg C++部署示例
-本目录下提供`infer.cc`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
+本目录下提供`infer.cc`快速完成Unet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
在部署前,需确认以下两个步骤
@@ -12,51 +12,58 @@
```
mkdir build
cd build
-wget https://xxx.tgz
-tar xvf fastdeploy-linux-x64-0.2.0.tgz
-cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0
+wget https://bj.bcebos.com/paddlehub/fastdeploy/libs/0.2.0/fastdeploy-linux-x64-gpu-0.2.0.tgz
+tar xvf fastdeploy-linux-x64-gpu-0.2.0.tgz
+cd fastdeploy-linux-x64-gpu-0.2.0/examples/vision/segmentation/paddleseg/cpp/build
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/../../../../../../../fastdeploy-linux-x64-gpu-0.2.0
make -j
-#下载官方转换好的yolov7模型文件和测试图片
-wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx
-wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000087038.jpg
+# 下载Unet模型文件和测试图片
+wget https://bj.bcebos.com/paddlehub/fastdeploy/Unet_cityscapes_without_argmax_infer.tgz
+tar -xvf Unet_cityscapes_without_argmax_infer.tgz
+wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
# CPU推理
-./infer_demo yolov7.onnx 000000087038.jpg 0
+./infer_demo Unet_cityscapes_without_argmax_infer infer.cc cityscapes_demo.png 0
# GPU推理
-./infer_demo yolov7.onnx 000000087038.jpg 1
+./infer_demo Unet_cityscapes_without_argmax_infer infer.cc cityscapes_demo.png 1
# GPU上TensorRT推理
-./infer_demo yolov7.onnx 000000087038.jpg 2
+./infer_demo Unet_cityscapes_without_argmax_infer infer.cc cityscapes_demo.png 2
```
-## YOLOv7 C++接口
+运行完成可视化结果如下图所示
+
+

+
-### YOLOv7类
+## PaddleSeg C++接口
+
+### PaddleSeg类
```
-fastdeploy::vision::detection::YOLOv7(
+fastdeploy::vision::segmentation::PaddleSegModel(
const string& model_file,
const string& params_file = "",
+ const string& config_file,
const RuntimeOption& runtime_option = RuntimeOption(),
- const Frontend& model_format = Frontend::ONNX)
+ const Frontend& model_format = Frontend::PADDLE)
```
-YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。
+PaddleSegModel模型加载和初始化,其中model_file为导出的Paddle模型格式。
**参数**
> * **model_file**(str): 模型文件路径
-> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可
+> * **params_file**(str): 参数文件路径
+> * **config_file**(str): 推理部署配置文件
> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
-> * **model_format**(Frontend): 模型格式,默认为ONNX格式
+> * **model_format**(Frontend): 模型格式,默认为Paddle格式
#### Predict函数
> ```
-> YOLOv7::Predict(cv::Mat* im, DetectionResult* result,
-> float conf_threshold = 0.25,
-> float nms_iou_threshold = 0.5)
+> PaddleSegModel::Predict(cv::Mat* im, DetectionResult* result)
> ```
>
> 模型预测接口,输入图像直接输出检测结果。
@@ -64,13 +71,16 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。
> **参数**
>
> > * **im**: 输入图像,注意需为HWC,BGR格式
-> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
-> > * **conf_threshold**: 检测框置信度过滤阈值
-> > * **nms_iou_threshold**: NMS处理过程中iou阈值
+> > * **result**: 分割结果,包括分割预测的标签以及标签对应的概率值, SegmentationResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
+
+### 类成员属性
+#### 预处理参数
+用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
-### 类成员变量
+> > * **is_vertical_screen**(bool): PP-HumanSeg系列模型通过设置此参数为`True`表明输入图片是竖屏,即height大于width的图片
-> > * **size**(vector): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640]
+#### 后处理参数
+> > * **with_softmax**(bool): 当模型导出时,并未指定`with_softmax`参数,可通过此设置此参数为`True`,将预测的输出分割标签(label_map)对应的概率结果(score_map)做softmax归一化处理
- [模型介绍](../../)
- [Python部署](../python)
diff --git a/examples/vision/segmentation/paddleseg/cpp/infer.cc b/examples/vision/segmentation/paddleseg/cpp/infer.cc
index bd07ba988da..1f20ea9c87f 100644
--- a/examples/vision/segmentation/paddleseg/cpp/infer.cc
+++ b/examples/vision/segmentation/paddleseg/cpp/infer.cc
@@ -14,34 +14,45 @@
#include "fastdeploy/vision.h"
-void CpuInfer(const std::string& model_file, const std::string& params_file,
- const std::string& config_file, const std::string& image_file) {
- auto option = fastdeploy::RuntimeOption();
- option.UseCpu() auto model =
- fastdeploy::vision::classification::PaddleClasModel(
- model_file, params_file, config_file, option);
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void CpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "deploy.yaml";
+ auto model = fastdeploy::vision::segmentation::PaddleSegModel(
+ model_file, params_file, config_file);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
- fastdeploy::vision::ClassifyResult res;
+ fastdeploy::vision::SegmentationResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
- // print res
- res.Str();
+ auto vis_im = fastdeploy::vision::Visualize::VisSegmentation(im_bak, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void GpuInfer(const std::string& model_file, const std::string& params_file,
- const std::string& config_file, const std::string& image_file) {
+void GpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "deploy.yaml";
+
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
- auto model = fastdeploy::vision::classification::PaddleClasModel(
+ auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
@@ -49,25 +60,30 @@ void GpuInfer(const std::string& model_file, const std::string& params_file,
}
auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
- fastdeploy::vision::ClassifyResult res;
+ fastdeploy::vision::SegmentationResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
- // print res
- res.Str();
+ auto vis_im = fastdeploy::vision::Visualize::VisSegmentation(im_bak, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void TrtInfer(const std::string& model_file, const std::string& params_file,
- const std::string& config_file, const std::string& image_file) {
+void TrtInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "deploy.yaml";
+
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UseTrtBackend();
- option.SetTrtInputShape("inputs", [ 1, 3, 224, 224 ], [ 1, 3, 224, 224 ],
- [ 1, 3, 224, 224 ]);
- auto model = fastdeploy::vision::classification::PaddleClasModel(
+ option.SetTrtInputShape("x", {1, 3, 256, 256}, {1, 3, 1024, 1024},
+ {1, 3, 2048, 2048});
+ auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
@@ -75,40 +91,37 @@ void TrtInfer(const std::string& model_file, const std::string& params_file,
}
auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
- fastdeploy::vision::ClassifyResult res;
+ fastdeploy::vision::SegmentationResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
- // print res
- res.Str();
+ auto vis_im = fastdeploy::vision::Visualize::VisSegmentation(im_bak, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
- std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
- "e.g ./infer_demo ./ResNet50_vd ./test.jpeg 0"
- << std::endl;
+ std::cout
+ << "Usage: infer_demo path/to/model_dir path/to/image run_option, "
+ "e.g ./infer_model ./ppseg_model_dir ./test.jpeg 0"
+ << std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
<< std::endl;
return -1;
}
- std::string model_file =
- argv[1] + "/" + "model.pdmodel" std::string params_file =
- argv[1] + "/" + "model.pdiparams" std::string config_file =
- argv[1] + "/" + "inference_cls.yaml" std::string image_file =
- argv[2] if (std::atoi(argv[3]) == 0) {
- CpuInfer(model_file, params_file, config_file, image_file);
- }
- else if (std::atoi(argv[3]) == 1) {
- GpuInfer(model_file, params_file, config_file, image_file);
- }
- else if (std::atoi(argv[3]) == 2) {
- TrtInfer(model_file, params_file, config_file, image_file);
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 2) {
+ TrtInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/segmentation/paddleseg/python/README.md b/examples/vision/segmentation/paddleseg/python/README.md
index 1259c150992..a4456488491 100644
--- a/examples/vision/segmentation/paddleseg/python/README.md
+++ b/examples/vision/segmentation/paddleseg/python/README.md
@@ -1,46 +1,43 @@
-# PaddleClas模型 Python部署示例
+# PaddleSeg Python部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md)
- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md)
-本目录下提供`infer.py`快速完成ResNet50_vd在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
+本目录下提供`infer.py`快速完成Unet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
```
-# 下载ResNet50_vd模型文件和测试图片
-wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
-tar -xvf ResNet50_vd_infer.tgz
-wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
-
-
#下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
-cd examples/vision/classification/paddleclas/python
+cd FastDeploy/examples/vision/segmentation/paddleseg/python
+
+# 下载Unet模型文件和测试图片
+wget https://bj.bcebos.com/paddlehub/fastdeploy/Unet_cityscapes_without_argmax_infer.tgz
+tar -xvf Unet_cityscapes_without_argmax_infer.tgz
+wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
+
# CPU推理
-python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device cpu
+python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device cpu
# GPU推理
-python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu
+python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待)
-python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --use_trt True
+python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu --use_trt True
```
-运行完成后返回结果如下所示
-```
-ClassifyResult(
-label_ids: 153,
-scores: 0.686229,
-)
-```
+运行完成可视化结果如下图所示
+
+

+
-## PaddleClasModel Python接口
+## PaddleSegModel Python接口
```
-fd.vision.classification.PaddleClasModel(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE)
+fd.vision.segmentation.PaddleSegModel(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE)
```
-PaddleClas模型加载和初始化,其中model_file, params_file为训练模型导出的Paddle inference文件,具体请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/inference_deployment/export_model.md#2-%E5%88%86%E7%B1%BB%E6%A8%A1%E5%9E%8B%E5%AF%BC%E5%87%BA)
+PaddleSeg模型加载和初始化,其中model_file, params_file以及config_file为训练模型导出的Paddle inference文件,具体请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/docs/model_export_cn.md)
**参数**
@@ -53,7 +50,7 @@ PaddleClas模型加载和初始化,其中model_file, params_file为训练模
### predict函数
> ```
-> PaddleClasModel.predict(input_image, topk=1)
+> PaddleSegModel.predict(input_image)
> ```
>
> 模型预测结口,输入图像直接输出检测结果。
@@ -61,15 +58,22 @@ PaddleClas模型加载和初始化,其中model_file, params_file为训练模
> **参数**
>
> > * **input_image**(np.ndarray): 输入数据,注意需为HWC,BGR格式
-> > * **topk**(int):返回预测概率最高的topk个分类结果
> **返回**
>
-> > 返回`fastdeploy.vision.ClassifyResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
+> > 返回`fastdeploy.vision.SegmentationResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
+
+### 类成员属性
+#### 预处理参数
+用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
+
+> > * **is_vertical_screen**(bool): PP-HumanSeg系列模型通过设置此参数为`true`表明输入图片是竖屏,即height大于width的图片
+#### 后处理参数
+> > * **with_softmax**(bool): 当模型导出时,并未指定`with_softmax`参数,可通过此设置此参数为`true`,将预测的输出分割标签(label_map)对应的概率结果(score_map)做softmax归一化处理
## 其它文档
-- [PaddleClas 模型介绍](..)
-- [PaddleClas C++部署](../cpp)
+- [PaddleSeg 模型介绍](..)
+- [PaddleSeg C++部署](../cpp)
- [模型预测结果说明](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/segmentation/paddleseg/python/infer.py b/examples/vision/segmentation/paddleseg/python/infer.py
index 88c272bcac7..28903e92957 100644
--- a/examples/vision/segmentation/paddleseg/python/infer.py
+++ b/examples/vision/segmentation/paddleseg/python/infer.py
@@ -8,11 +8,9 @@ def parse_arguments():
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
- "--model", required=True, help="Path of PaddleClas model.")
+ "--model", required=True, help="Path of PaddleSeg model.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
- parser.add_argument(
- "--topk", type=int, default=1, help="Return topk results.")
parser.add_argument(
"--device",
type=str,
@@ -43,14 +41,17 @@ def build_option(args):
# 配置runtime,加载模型
runtime_option = build_option(args)
-model_file = os.path.join(args.model, "inference.pdmodel")
-params_file = os.path.join(args.model, "inference.pdiparams")
-config_file = os.path.join(args.model, "inference_cls.yaml")
-#model = fd.vision.classification.PaddleClasModel(model_file, params_file, config_file, runtime_option=runtime_option)
-model = fd.vision.classification.ResNet50vd(
+model_file = os.path.join(args.model, "model.pdmodel")
+params_file = os.path.join(args.model, "model.pdiparams")
+config_file = os.path.join(args.model, "deploy.yaml")
+model = fd.vision.segmentation.PaddleSegModel(
model_file, params_file, config_file, runtime_option=runtime_option)
# 预测图片分类结果
im = cv2.imread(args.image)
-result = model.predict(im, args.topk)
+result = model.predict(im)
print(result)
+
+# 可视化结果
+vis_im = fd.vision.visualize.vis_segmentation(im, result)
+cv2.imwrite("vis_img.png", vis_im)
diff --git a/fastdeploy/vision/segmentation/ppseg/__init__.py b/fastdeploy/vision/segmentation/ppseg/__init__.py
index 9d3cfaf4d78..bc516351d0e 100644
--- a/fastdeploy/vision/segmentation/ppseg/__init__.py
+++ b/fastdeploy/vision/segmentation/ppseg/__init__.py
@@ -23,9 +23,9 @@ def __init__(self,
model_file,
params_file,
config_file,
- backend_option=None,
+ runtime_option=None,
model_format=Frontend.PADDLE):
- super(Model, self).__init__(backend_option)
+ super(PaddleSegModel, self).__init__(runtime_option)
assert model_format == Frontend.PADDLE, "PaddleSeg only support model format of Frontend.Paddle now."
self._model = C.vision.segmentation.PaddleSegModel(
From 62760a4eb69de6635e2fc7d89fe58250b4e7ce0e Mon Sep 17 00:00:00 2001
From: ziqi-jin <67993288+ziqi-jin@users.noreply.github.com>
Date: Mon, 15 Aug 2022 16:58:46 +0800
Subject: [PATCH 5/6] Fix the expression of data type for api docs (#115)
* first commit for yolov7
* pybind for yolov7
* CPP README.md
* CPP README.md
* modified yolov7.cc
* README.md
* python file modify
* delete license in fastdeploy/
* repush the conflict part
* README.md modified
* README.md modified
* file path modified
* file path modified
* file path modified
* file path modified
* file path modified
* README modified
* README modified
* move some helpers to private
* add examples for yolov7
* api.md modified
* api.md modified
* api.md modified
* YOLOv7
* yolov7 release link
* yolov7 release link
* yolov7 release link
* copyright
* change some helpers to private
* change variables to const and fix documents.
* gitignore
* Transfer some funtions to private member of class
* Transfer some funtions to private member of class
* Merge from develop (#9)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* first commit for yolor
* for merge
* Develop (#11)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* Yolor (#16)
* Develop (#11) (#12)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* Develop (#13)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* Develop (#14)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason <928090362@qq.com>
* add is_dynamic for YOLO series (#22)
* modify api docs
* modify api docs
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason <928090362@qq.com>
---
docs/api/vision_results/face_detection_result.md | 4 ++--
docs/api/vision_results/face_recognition_result.md | 2 +-
docs/api/vision_results/matting_result.md | 8 ++++----
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docs/api/vision_results/face_detection_result.md b/docs/api/vision_results/face_detection_result.md
index 000b42a6be0..2e1a13143d8 100644
--- a/docs/api/vision_results/face_detection_result.md
+++ b/docs/api/vision_results/face_detection_result.md
@@ -30,5 +30,5 @@ struct FaceDetectionResult {
- **boxes**(list of list(float)): 成员变量,表示单张图片检测出来的所有目标框坐标。boxes是一个list,其每个元素为一个长度为4的list, 表示为一个框,每个框以4个float数值依次表示xmin, ymin, xmax, ymax, 即左上角和右下角坐标
- **scores**(list of float): 成员变量,表示单张图片检测出来的所有目标置信度
-- **landmarks**: 成员变量,表示单张图片检测出来的所有人脸的关键点
-- **landmarks_per_face**: 成员变量,表示每个人脸框中的关键点的数量。
+- **landmarks**(list of list(float)): 成员变量,表示单张图片检测出来的所有人脸的关键点
+- **landmarks_per_face**(int): 成员变量,表示每个人脸框中的关键点的数量。
diff --git a/docs/api/vision_results/face_recognition_result.md b/docs/api/vision_results/face_recognition_result.md
index 83160561843..54d6eabb145 100644
--- a/docs/api/vision_results/face_recognition_result.md
+++ b/docs/api/vision_results/face_recognition_result.md
@@ -21,4 +21,4 @@ struct FaceRecognitionResult {
`fastdeploy.vision.FaceRecognitionResult`
-- **embedding**: 成员变量,表示人脸识别模型最终提取的特征embedding,可以用来计算人脸之间的特征相似度。
+- **embedding**(list of float): 成员变量,表示人脸识别模型最终提取的特征embedding,可以用来计算人脸之间的特征相似度。
diff --git a/docs/api/vision_results/matting_result.md b/docs/api/vision_results/matting_result.md
index 67bcbc79d21..5a61365c268 100644
--- a/docs/api/vision_results/matting_result.md
+++ b/docs/api/vision_results/matting_result.md
@@ -29,7 +29,7 @@ struct MattingResult {
`fastdeploy.vision.MattingResult`
-- **alpha**: 是一维向量,为预测的alpha透明度的值,值域为[0.,1.],长度为hxw,h,w为输入图像的高和宽
-- **foreground**: 是一维向量,为预测的前景,值域为[0.,255.],长度为hxwxc,h,w为输入图像的高和宽,c一般为3,foreground不是一定有的,只有模型本身预测了前景,这个属性才会有效
-- **contain_foreground**: 表示预测的结果是否包含前景
-- **shape**: 表示输出结果的shape,当contain_foreground为false,shape只包含(h,w),当contain_foreground为true,shape包含(h,w,c), c一般为3
+- **alpha**(list of float): 是一维向量,为预测的alpha透明度的值,值域为[0.,1.],长度为hxw,h,w为输入图像的高和宽
+- **foreground**(list of float): 是一维向量,为预测的前景,值域为[0.,255.],长度为hxwxc,h,w为输入图像的高和宽,c一般为3,foreground不是一定有的,只有模型本身预测了前景,这个属性才会有效
+- **contain_foreground**(bool): 表示预测的结果是否包含前景
+- **shape**(list of int): 表示输出结果的shape,当contain_foreground为false,shape只包含(h,w),当contain_foreground为true,shape包含(h,w,c), c一般为3
From fcf85cfe6222660bd9685e4383f51dd7484454e6 Mon Sep 17 00:00:00 2001
From: DefTruth <31974251+DefTruth@users.noreply.github.com>
Date: Mon, 15 Aug 2022 19:47:31 +0800
Subject: [PATCH 6/6] [feature][docs] update README.md and logo (#116)
* update (#21)
* [feature][docs] add prebuilt windows python whl and cpp libs (#113)
[feature][docs] add windows python whl and cpp libs
* Add ernie ie task (#100)
* Modify API Result Docs (#112)
* first commit for yolov7
* pybind for yolov7
* CPP README.md
* CPP README.md
* modified yolov7.cc
* README.md
* python file modify
* delete license in fastdeploy/
* repush the conflict part
* README.md modified
* README.md modified
* file path modified
* file path modified
* file path modified
* file path modified
* file path modified
* README modified
* README modified
* move some helpers to private
* add examples for yolov7
* api.md modified
* api.md modified
* api.md modified
* YOLOv7
* yolov7 release link
* yolov7 release link
* yolov7 release link
* copyright
* change some helpers to private
* change variables to const and fix documents.
* gitignore
* Transfer some funtions to private member of class
* Transfer some funtions to private member of class
* Merge from develop (#9)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* first commit for yolor
* for merge
* Develop (#11)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* Yolor (#16)
* Develop (#11) (#12)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* Develop (#13)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* documents
* Develop (#14)
* Fix compile problem in different python version (#26)
* fix some usage problem in linux
* Fix compile problem
Co-authored-by: root
* Add PaddleDetetion/PPYOLOE model support (#22)
* add ppdet/ppyoloe
* Add demo code and documents
* add convert processor to vision (#27)
* update .gitignore
* Added checking for cmake include dir
* fixed missing trt_backend option bug when init from trt
* remove un-need data layout and add pre-check for dtype
* changed RGB2BRG to BGR2RGB in ppcls model
* add model_zoo yolov6 c++/python demo
* fixed CMakeLists.txt typos
* update yolov6 cpp/README.md
* add yolox c++/pybind and model_zoo demo
* move some helpers to private
* fixed CMakeLists.txt typos
* add normalize with alpha and beta
* add version notes for yolov5/yolov6/yolox
* add copyright to yolov5.cc
* revert normalize
* fixed some bugs in yolox
* fixed examples/CMakeLists.txt to avoid conflicts
* add convert processor to vision
* format examples/CMakeLists summary
* Fix bug while the inference result is empty with YOLOv5 (#29)
* Add multi-label function for yolov5
* Update README.md
Update doc
* Update fastdeploy_runtime.cc
fix variable option.trt_max_shape wrong name
* Update runtime_option.md
Update resnet model dynamic shape setting name from images to x
* Fix bug when inference result boxes are empty
* Delete detection.py
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason <928090362@qq.com>
* add is_dynamic for YOLO series (#22)
* first commit test photo
* yolov7 doc
* yolov7 doc
* yolov7 doc
* yolov7 doc
* add yolov5 docs
* modify yolov5 doc
* first commit for retinaface
* first commit for retinaface
* firt commit for ultraface
* firt commit for ultraface
* firt commit for yolov5face
* firt commit for modnet and arcface
* firt commit for modnet and arcface
* first commit for partial_fc
* first commit for partial_fc
* first commit for yolox
* first commit for yolov6
* first commit for nano_det
* first commit for scrfd
* first commit for scrfd
* first commit for retinaface
* first commit for ultraface
* first commit for yolov5face
* first commit for yolox yolov6 nano
* rm jpg
* first commit for modnet and modify nano
* yolor scaledyolov4 v5lite
* first commit for insightface
* first commit for insightface
* first commit for insightface
* docs
* docs
* docs
* docs
* docs
* add print for detect and modify docs
* docs
* docs
* docs
* docs test for insightface
* docs test for insightface again
* docs test for insightface
* modify all wrong expressions in docs
* modify all wrong expressions in docs
* modify all wrong expressions in docs
* modify all wrong expressions in docs
* modify docs expressions
* fix expression of detection part
* fix expression of detection part
* fix expression of detection part
* add face recognition result doc
* modify result docs
* modify result docs
* modify result docs
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason <928090362@qq.com>
Co-authored-by: Jack Zhou
Co-authored-by: ziqi-jin <67993288+ziqi-jin@users.noreply.github.com>
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason <928090362@qq.com>
* [feature][docs] update README.md and logo
* [feature][docs] update README.md and logo
* [feature][docs] update README.md and logo
* [feature][docs] update README.md and logo
* [feature][docs] update README.md and logo
* [feature][docs] update README.md and logo
* [feature][docs] update README.md and logo
* feature][docs] update README.md and logo
* feature][docs] update README.md and logo
* feature][docs] update README.md and logo
* [feature][docs] update README.md and logo
* [feature][docs] update README.md and logo
* [feature][win] fix typos and rm un-need file
Co-authored-by: Jack Zhou
Co-authored-by: ziqi-jin <67993288+ziqi-jin@users.noreply.github.com>
Co-authored-by: Jason
Co-authored-by: root
Co-authored-by: huangjianhui <852142024@qq.com>
Co-authored-by: Jason <928090362@qq.com>
---
README.md | 271 +++++++++++-------
.../vision_results/face_detection_result.md | 1 +
docs/compile/prebuilt_libraries.md | 2 +-
docs/logo/fastdeploy-opaque.png | Bin 0 -> 147535 bytes
4 files changed, 162 insertions(+), 112 deletions(-)
create mode 100644 docs/logo/fastdeploy-opaque.png
diff --git a/README.md b/README.md
index 5619db16009..8046fe0cba9 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,7 @@
-# ⚡️FastDeploy
+
-------------------------------------------------------------------------------------------
-
@@ -19,147 +17,196 @@
**⚡️FastDeploy**是一款**简单易用**的推理部署工具箱。覆盖业界主流**优质预训练模型**并提供**开箱即用**的开发体验,包括图像分类、目标检测、图像分割、人脸检测、人体关键点识别、文字识别等多任务,满足开发者**多场景**,**多硬件**、**多平台**的快速部署需求。
## 发版历史
-- [v0.2.0] 2022.08.18 全面开源服务端部署代码,支持40+视觉模型在CPU/GPU,以及通过GPU TensorRT加速部署
-
-## 服务端模型
-
-| 任务场景 | 模型 | CPU | NVIDIA GPU | TensorRT |
-| -------- | ------------------------------------------------------------ | ------- | ---------- | ------------------- |
-| 图像分类 | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | √ | √ | √ |
-| | [PaddleClas/PPLCNet](./examples/vision/classification/paddleclas) | √ | √ | √ |
-| | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | √ | √ | √ |
-| | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | √ | √ | √ |
-| | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | √ | √ | √ |
-| | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | √ | √ | √ |
-| | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | √ | √ | √ |
-| 目标检测 | [PaddleDetection/PPYOLOE](./examples/vision/detection/paddledetection) | √ | √ | √ |
-| | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | √ | √ | √ |
-| | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | √ | √ | √ |
-| | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | √ | √ | √ |
-| | [PaddleDetection/PPYOLO](./examples/vision/detection/paddledetection) | √ | √ | - |
-| | [PaddleDetection/PPYOLOv2](./examples/vision/detection/paddledetection) | √ | √ | - |
-| | [PaddleDetection/FasterRCNN](./examples/vision/detection/paddledetection) | √ | √ | - |
-| | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | √ | √ | √ |
-
-## 快速开始
-
-#### 安装FastDeploy Python
-
-用户根据开发环境选择安装版本,更多安装环境参考[安装文档](docs/quick_start/install.md).
-
-```
+- [v0.2.0] 2022.08.18 全面开源服务端部署代码,支持40+视觉模型在CPU/GPU,以及通过TensorRT加速部署
+
+## 内容目录
+* [服务端模型列表](#fastdeploy-server-models)
+* [服务端快速开始](#fastdeploy-quick-start)
+ * [快速安装](#fastdeploy-quick-start)
+ * [Python预测示例](#fastdeploy-quick-start-python)
+ * [C++预测示例](#fastdeploy-quick-start-cpp)
+* [轻量化SDK快速实现端侧AI推理部署](#fastdeploy-edge-sdk)
+ * [边缘侧部署](#fastdeploy-edge-sdk-arm-linux)
+ * [移动端部署](#fastdeploy-edge-sdk-ios-android)
+ * [自定义模型部署](#fastdeploy-edge-sdk-custom)
+* [社区交流](#fastdeploy-community)
+* [Acknowledge](#fastdeploy-acknowledge)
+* [License](#fastdeploy-license)
+## 1. 服务端模型列表 🔥🔥🔥
+
+
+
+符号说明: (1) ✅: 已经支持; (2) ❔: 计划未来支持; (3) ❌: 暂不支持; (4) contrib: 外部模型
+| 任务场景 | 模型 | API | Linux | Linux | Windows | Windows | MacOS | MacOS | Linux |
+| :--------: | :--------: | :--------: | :--------: | :--------: | :--------: | :--------: | :--------: | :--------: | :--------: |
+| --- | --- | --- | X86 CPU | NVIDIA GPU | Intel CPU | NVIDIA GPU | Intel CPU | Arm CPU | NVIDIA Jetson |
+| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/PPLCNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/PPLCNetv2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/PPHGNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Classification | [PaddleClas/SwinTransformer](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [PaddleDetection/PPYOLOE](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [PaddleDetection/PPYOLO](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [PaddleDetection/PPYOLOv2](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [PaddleDetection/FasterRCNN](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [Contrib/YOLOX](./examples/vision/detection/yolox) | [Python](./examples/vision/detection/yolox/python)/[C++](./examples/vision/detection/yolox/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [Contrib/YOLOv7](./examples/vision/detection/yolov7) | [Python](./examples/vision/detection/yolov7/python)/[C++](./examples/vision/detection/yolov7/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [Contrib/YOLOv6](./examples/vision/detection/yolov6) | [Python](./examples/vision/detection/yolov6/python)/[C++](./examples/vision/detection/yolov6/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [Contrib/YOLOv5](./examples/vision/detection/yolov5) | [Python](./examples/vision/detection/yolov5/python)/[C++](./examples/vision/detection/yolov5/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [Contrib/YOLOR](./examples/vision/detection/yolor) | [Python](./examples/vision/detection/yolor/python)/[C++](./examples/vision/detection/yolor/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [Contrib/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | [Python](./examples/vision/detection/scaledyolov4/python)/[C++](./examples/vision/detection/scaledyolov4/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [Contrib/YOLOv5Lite](./examples/vision/detection/yolov5lite) | [Python](./examples/vision/detection/yolov5lite/python)/[C++](./examples/vision/detection/yolov5lite/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Detection | [Contrib/NanoDetPlus](./examples/vision/detection/nanodet_plus) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/detection/nanodet_plus/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Segmentation | [PaddleSeg/PPLiteSeg](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Segmentation | [PaddleSeg/PPHumanSegLite](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Segmentation | [PaddleSeg/PPHumanSegServer](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| FaceDetection | [Contrib/RetinaFace](./examples/vision/facedet/retinaface) | [Python](./examples/vision/facedet/retinaface/python)/[C++](./examples/vision/facedet/retinaface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| FaceDetection | [Contrib/UltraFace](./examples/vision/facedet/utltraface) | [ Python](./examples/vision/facedet/utltraface/python)/[C++](./examples/vision/facedet/utltraface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| FaceDetection | [Contrib/YOLOv5Face](./examples/vision/facedet/yolov5face) | [Python](./examples/vision/facedet/yolov5face/python)/[C++](./examples/vision/facedet/yolov5face/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| FaceDetection | [Contrib/SCRFD](./examples/vision/facedet/scrfd) | [Python](./examples/vision/facedet/scrfd/python)/[C++](./examples/vision/facedet/scrfd/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| FaceRecognition | [Contrib/ArcFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| FaceRecognition | [Contrib/CosFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| FaceRecognition | [Contrib/PartialFC](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| FaceRecognition | [Contrib/VPL](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+| Matting | [Contrib/MODNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/modnet/python)/[C++](./examples/vision/matting/modnet/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
+
+
+## 2. 服务端快速开始
+
+
+
+💡 快速安装 FastDeploy Python/C++ 库
+
+用户根据自己的python版本选择安装对应的wheel包,详细的wheel目录请参考 [python安装文档](docs/compile/prebuilt_wheels.md) .
+
+```bash
pip install https://bj.bcebos.com/paddlehub/fastdeploy/wheels/fastdeploy_python-0.2.0-cp38-cp38-manylinux1_x86_64.whl
```
-
-准备目标检测模型和测试图片
+或获取C++预编译库,更多可用的预编译库请参考 [C++预编译库下载](docs/compile/prebuilt_libraries.md)
+```bash
+wget https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-0.2.0.tgz
```
+准备目标检测模型和测试图片
+```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
+
-加载模型预测
-```
-import fastdeploy.vision as vis
-import cv2
+### 2.1 Python预测示例
+
-model = vis.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
- "ppyoloe_crn_l_300e_coco/model.pdiparams",
- "ppyoloe_crn_l_300e_coco/infer_cfg.yml")
+```python
+import cv2
+import fastdeploy.vision as vision
+model = vision.detection.PPYOLOE("model.pdmodel", "model.pdiparams", "infer_cfg.yml")
im = cv2.imread("000000014439.jpg")
result = model.predict(im.copy())
print(result)
-vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
+vis_im = vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("vis_image.jpg", vis_im)
```
+### 2.2 C++预测示例
+
-预测完成,可视化结果保存至`vis_image.jpg`,同时输出检测结果如下
-```
-DetectionResult: [xmin, ymin, xmax, ymax, score, label_id]
-415.047363,89.311523, 506.009613, 283.863129, 0.950423, 0
-163.665710,81.914894, 198.585342, 166.760880, 0.896433, 0
-581.788635,113.027596, 612.623474, 198.521713, 0.842597, 0
-267.217224,89.777321, 298.796051, 169.361496, 0.837951, 0
-104.465599,45.482410, 127.688835, 93.533875, 0.773348, 0
-...
-```
-
-## 更多服务端部署示例
+```C++
+#include "fastdeploy/vision.h"
-FastDeploy提供了大量部署示例供开发者参考,支持模型在CPU、GPU以及TensorRT的部署
+int main(int argc, char* argv[]) {
+ namespace vision = fastdeploy::vision;
+ auto model = vision::detection::PPYOLOE("model.pdmodel", "model.pdiparams", "infer_cfg.yml");
+ auto im = cv::imread("000000014439.jpg");
-- [PaddleDetection模型部署](examples/vision/detection/paddledetection)
-- [PaddleClas模型部署](examples/vision/classification/paddleclas)
-- [PaddleSeg模型部署](examples/vision/segmentation/paddleseg)
-- [YOLOv7部署](examples/vision/detection/yolov7)
-- [YOLOv6部署](examples/vision/detection/yolov6)
-- [YOLOv5部署](examples/vision/detection/yolov5)
-- [人脸检测模型部署](examples/vision/facedet)
-- [更多视觉模型部署示例...](examples/vision)
+ vision::DetectionResult res;
+ model.Predict(&im, &res)
-### 📱轻量化SDK快速实现端侧AI推理部署
+ auto vis_im = vision::Visualize::VisDetection(im, res, 0.5);
+ cv::imwrite("vis_image.jpg", vis_im);
+}
+```
+更多部署案例请参考[视觉模型部署示例](examples/vision) .
+## 3. 轻量化SDK快速实现端侧AI推理部署 📱
+
| 任务场景 | 模型 | 大小(MB) | 边缘端 | 移动端 | 移动端 |
-| ------------------ | ---------------------------- | --------------------- | --------------------- | ---------------------- | --------------------- |
-| ---- | --- | --- | Linux | Android | iOS |
-| ----- | ---- | --- | ARM CPU | ARM CPU | ARM CPU |
-| Classfication | PP-LCNet | 11.9 | ✅ | ✅ | ✅ |
-| | PP-LCNetv2 | 26.6 | ✅ | ✅ | ✅ |
-| | EfficientNet | 31.4 | ✅ | ✅ | ✅ |
-| | GhostNet | 20.8 | ✅ | ✅ | ✅ |
-| | MobileNetV1 | 17 | ✅ | ✅ | ✅ |
-| | MobileNetV2 | 14.2 | ✅ | ✅ | ✅ |
-| | MobileNetV3 | 22 | ✅ | ✅ | ✅ |
-| | ShuffleNetV2 | 9.2 | ✅ | ✅ | ✅ |
-| | SqueezeNetV1.1 | 5 | ✅ | ✅ | ✅ |
-| | Inceptionv3 | 95.5 | ✅ | ✅ | ✅ |
-| | PP-HGNet | 59 | ✅ | ✅ | ✅ |
-| | SwinTransformer_224_win7 | 352.7 | ✅ | ✅ | ✅ |
-| Detection | PP-PicoDet_s_320_coco | 4.1 | ✅ | ✅ | ✅ |
-| | PP-PicoDet_s_320_lcnet | 4.9 | ✅ | ✅ | ✅ |
-| | CenterNet | 4.8 | ✅ | ✅ | ✅ |
-| | YOLOv3_MobileNetV3 | 94.6 | ✅ | ✅ | ✅ |
-| | PP-YOLO_tiny_650e_coco | 4.4 | ✅ | ✅ | ✅ |
-| | SSD_MobileNetV1_300_120e_voc | 23.3 | ✅ | ✅ | ✅ |
-| | PP-YOLO_ResNet50vd | 188.5 | ✅ | ✅ | ✅ |
-| | PP-YOLOv2_ResNet50vd | 218.7 | ✅ | ✅ | ✅ |
-| | PP-YOLO_crn_l_300e_coco | 209.1 | ✅ | ✅ | ✅ |
-| | YOLOv5s | 29.3 | ✅ | ✅ | ✅ |
-| Face Detection | BlazeFace | 1.5 | ✅ | ✅ | ✅ |
-| Face Localisation | RetinaFace | 1.7 | ✅ | ❌ | ❌ |
-| Keypoint Detection | PP-TinyPose | 5.5 | ✅ | ✅ | ✅ |
-| Segmentation | PP-LiteSeg(STDC1) | 32.2 | ✅ | ✅ | ✅ |
-| | PP-HumanSeg-Lite | 0.556 | ✅ | ✅ | ✅ |
-| | HRNet-w18 | 38.7 | ✅ | ✅ | ✅ |
-| | PP-HumanSeg-Server | 107.2 | ✅ | ✅ | ✅ |
-| | Unet | 53.7 | ❌ | ✅ | ❌ |
-| OCR | PP-OCRv1 | 2.3+4.4 | ✅ | ✅ | ✅ |
-| | PP-OCRv2 | 2.3+4.4 | ✅ | ✅ | ✅ |
-| | PP-OCRv3 | 2.4+10.6 | ✅ | ✅ | ✅ |
-| | PP-OCRv3-tiny | 2.4+10.7 | ✅ | ✅ | ✅ |
-
-
-#### 边缘侧部署
-
-- ARM Linux 系统
+| :--------: | :--------: | :--------: | :--------: | :--------: | :--------: |
+| --- | --- | --- | Linux | Android | iOS |
+| --- | --- | --- | ARM CPU | ARM CPU | ARM CPU |
+| Classification | PP-LCNet | 11.9 | ✅ | ✅ | ✅ |
+| Classification | PP-LCNetv2 | 26.6 | ✅ | ✅ | ✅ |
+| Classification | EfficientNet | 31.4 | ✅ | ✅ | ✅ |
+| Classification | GhostNet | 20.8 | ✅ | ✅ | ✅ |
+| Classification | MobileNetV1 | 17 | ✅ | ✅ | ✅ |
+| Classification | MobileNetV2 | 14.2 | ✅ | ✅ | ✅ |
+| Classification | MobileNetV3 | 22 | ✅ | ✅ | ✅ |
+| Classification | ShuffleNetV2 | 9.2 | ✅ | ✅ | ✅ |
+| Classification | SqueezeNetV1.1 | 5 | ✅ | ✅ | ✅ |
+| Classification | Inceptionv3 | 95.5 | ✅ | ✅ | ✅ |
+| Classification | PP-HGNet | 59 | ✅ | ✅ | ✅ |
+| Classification | SwinTransformer_224_win7 | 352.7 | ✅ | ✅ | ✅ |
+| Detection | PP-PicoDet_s_320_coco | 4.1 | ✅ | ✅ | ✅ |
+| Detection | PP-PicoDet_s_320_lcnet | 4.9 | ✅ | ✅ | ✅ |
+| Detection | CenterNet | 4.8 | ✅ | ✅ | ✅ |
+| Detection | YOLOv3_MobileNetV3 | 94.6 | ✅ | ✅ | ✅ |
+| Detection | PP-YOLO_tiny_650e_coco | 4.4 | ✅ | ✅ | ✅ |
+| Detection | SSD_MobileNetV1_300_120e_voc | 23.3 | ✅ | ✅ | ✅ |
+| Detection | PP-YOLO_ResNet50vd | 188.5 | ✅ | ✅ | ✅ |
+| Detection | PP-YOLOv2_ResNet50vd | 218.7 | ✅ | ✅ | ✅ |
+| Detection | PP-YOLO_crn_l_300e_coco | 209.1 | ✅ | ✅ | ✅ |
+| Detection | YOLOv5s | 29.3 | ✅ | ✅ | ✅ |
+| FaceDetection | BlazeFace | 1.5 | ✅ | ✅ | ✅ |
+| FaceDetection | RetinaFace | 1.7 | ✅ | ❌ | ❌ |
+| KeypointsDetection | PP-TinyPose | 5.5 | ✅ | ✅ | ✅ |
+| Segmentation | PP-LiteSeg(STDC1) | 32.2 | ✅ | ✅ | ✅ |
+| Segmentation | PP-HumanSeg-Lite | 0.556 | ✅ | ✅ | ✅ |
+| Segmentation | HRNet-w18 | 38.7 | ✅ | ✅ | ✅ |
+| Segmentation | PP-HumanSeg-Server | 107.2 | ✅ | ✅ | ✅ |
+| Segmentation | Unet | 53.7 | ❌ | ✅ | ❌ |
+| OCR | PP-OCRv1 | 2.3+4.4 | ✅ | ✅ | ✅ |
+| OCR | PP-OCRv2 | 2.3+4.4 | ✅ | ✅ | ✅ |
+| OCR | PP-OCRv3 | 2.4+10.6 | ✅ | ✅ | ✅ |
+| OCR | PP-OCRv3-tiny | 2.4+10.7 | ✅ | ✅ | ✅ |
+
+### 3.1 边缘侧部署
+
+
+- ARM Linux 系统
- [C++ Inference部署(含视频流)](./docs/ARM-Linux-CPP-SDK-Inference.md)
- [C++ 服务化部署](./docs/ARM-Linux-CPP-SDK-Serving.md)
- [Python Inference部署](./docs/ARM-Linux-Python-SDK-Inference.md)
- [Python 服务化部署](./docs/ARM-Linux-Python-SDK-Serving.md)
-#### 移动端部署
+### 3.2 移动端部署
+
- [iOS 系统部署](./docs/iOS-SDK.md)
- [Android 系统部署](./docs/Android-SDK.md)
-#### 自定义模型部署
+### 3.3 自定义模型部署
+
- [快速实现个性化模型替换](./docs/Replace-Model-With-Anther-One.md)
-## 社区交流
+## 4. 社区交流
+
- **加入社区👬:** 微信扫描二维码后,填写问卷加入交流群,与开发者共同讨论推理部署痛点问题
@@ -167,11 +214,13 @@ FastDeploy提供了大量部署示例供开发者参考,支持模型在CPU、G
-## Acknowledge
+## 5. Acknowledge
+
本项目中SDK生成和下载使用了[EasyEdge](https://ai.baidu.com/easyedge/app/openSource)中的免费开放能力,再次表示感谢。
-## License
+## 6. License
+
FastDeploy遵循[Apache-2.0开源协议](./LICENSE)。
diff --git a/docs/api/vision_results/face_detection_result.md b/docs/api/vision_results/face_detection_result.md
index 2e1a13143d8..30029a7f0fa 100644
--- a/docs/api/vision_results/face_detection_result.md
+++ b/docs/api/vision_results/face_detection_result.md
@@ -32,3 +32,4 @@ struct FaceDetectionResult {
- **scores**(list of float): 成员变量,表示单张图片检测出来的所有目标置信度
- **landmarks**(list of list(float)): 成员变量,表示单张图片检测出来的所有人脸的关键点
- **landmarks_per_face**(int): 成员变量,表示每个人脸框中的关键点的数量。
+
diff --git a/docs/compile/prebuilt_libraries.md b/docs/compile/prebuilt_libraries.md
index bd58fc4b0b8..905eb6d5295 100644
--- a/docs/compile/prebuilt_libraries.md
+++ b/docs/compile/prebuilt_libraries.md
@@ -1,4 +1,4 @@
-# FastDeploy 预编编译Python Wheel包
+# FastDeploy 预编译 C++ 库
FastDeploy提供了在Windows/Linux/Mac上的预先编译CPP部署库,开发者可以直接下载后使用,也可以自行编译代码。
diff --git a/docs/logo/fastdeploy-opaque.png b/docs/logo/fastdeploy-opaque.png
new file mode 100644
index 0000000000000000000000000000000000000000..16289dcf555f5a1ceb32f720a71054ec6c5200d0
GIT binary patch
literal 147535
zcmeFZcRbbq`#+xJh(k8X-jtbwllXC`p{DWM4nMr(J0<%RpV0
z&SXpIU{gfs!0IAsLta(g-jn3?4G#M(;5_u^S&ro*PgzjoVBhEI5@BIJR+qPTM$kfW
z=+wlQ`rkZP!+VYaYrALOk!xK|pv|?cFqptxXZP&Zw{Ke_Uk*>^jkV0?kNhxM!7m3~
zD!Bh8BnT7@=MQ{=a7E`vD|wKaGWTb$cX3~ZQt*^eu|IHo|9VUW4FqEI{}09H{NZsdj4%I@NdH{Qln{S-Jk7JXKe2g!MaLgb%Jm2L
zBoagt3LPad>JR*r(SeS}{<_~ka@p%NR6QAJX$*fdZ>V~jul&itz;TffJd)RLQUA&4
zAb8CFWD-K3prMuAXvl*7$>^Y!a3KH5z$o8_rzqe`Dg4Py1Q1F57b5=vR{n*^KZF^7
zA@UDl#$Sm1GhqG~BL7g~{tJ(UH2mdMA{tE3sBt-rS?LVMD
zz^48Sk$)lbKTFBKUgV$2#J^tTAIikPUgRGlD}TMn{|p)b<;nbo$iEQzug&nk{o-%C
z<_|d_f7>-@Td!6)FXh)Pz<=)1{q1i5Q{VsFTlH^u`#;}|=hppSxcs;G;6M5IU%33=
zR_gzmcK?OQzYzJKrR3l4_Mgc_FjDk)0O(IF<=^i1A0jJ%yW9U6GXBe-`3sSMBCi1f
z{0otP2Y~)xF2#$%KdoPKbn?cW$
z?m^ZE%u&2QfSJxbp!OuL5V}yDhd#n&yW!OXkJ(vn8wcg0}JlqN?5RQv!>5^?E4QKQ^3OA`{XT%I8^}x54{sT)F{~DZGQP@y-Gdp{z
zZ#57M3WbcMB$niuA0m9jyEEx)AzfbjjI{qbokYapfg?Ps?7^MqtAubxt8vuf-0qm|
z6s3P#&6s>JWHS3QvYBng!t%x+h!&1THF&Zzoou@ETQ6n4X{WN;`k<|szio-G*6j{f
z9pj>4jq9$p`5)jVJ&WK}jdRt@e#F`SHH^Z~3LKFqO=d}mu8B&Q+cjC>isE=lj~NeZ)EaEUjV{oFbd)jzJ8MU3&dmx?Jxs{DD(A2!XSQ
zL}ia;J$u7Th+VWA9{N`zBHG--+L7s|E2m@q^dmD3gOo)7n^65ffxx#jB1`VS8gst;
z3~N!YDkxAK-YfdHF?ch6K!zlen_*FulQnoEI;c}B-^~jH_6M*9?giGgM7sOld3*{v
zP)@vcHi!327N52i(REriS~Gcliz#_lD}b4%_$5}*k^R5osE)y+TXD14yt~UR-X>aK9%H?fUFv>;tzpgahWRyVUfonkWmL0sK3YkdV0e&^p02#+HT?9@wntq
zYf-e)VY*vMy|&dW^FD7v4`eHd|G(EXMF6bWfJgSlo+qchd73o)xq#lTLnemt!Gk+-
zBvIv>1+e0_Dd9*7J0(w>5spJA&Hs7;N}8VD-pJ&WADR0b;eH9)#b%}-0#Md8cy8Uw
z+sEsTAZmOl-1XGMYgmsJ~d~wQ*W>)iTY3*$*W0kmwU(aR=P*GpUe!K
zf!x>7)zITj>$qJj8I1yYjk-kcZF!Pv+qf%=3KwN4S9R(GDrJYZd1CZpUuHpZnP!20
z<+s;QIgJ_uomZB2pSlFxxPU$vs+)c%M`V6S8?S^sHiK7Pw>o*dm3%j&$UJ7ByV%p{
zTtGHC^ZL%1-NI%&d$(KN;Y6<1yRX9W+1|uo)1wqW?be+h|5*D{XUw~_3LU+1(`Bju
zd-)gNwIFI=wQ8$^TAs`VM_Iu&3;zFmVXP88A{W}4>?mJ1B<
zZD*D)A)=o~ik`B0n5t|pcROk6~@3-K9Yz+`M#@%P+aP>7DeIs(3Ozwj3VttrI=?CY;L_W)gLaVZE#O
zJoR2inzEm&_Z}Vg&VSJ^<7qN8?LFG@!u~wWx;V(YILfMH{mPg9*BTlWw=_qJ9$ef*6rth+GbrJ>GtI*pKu)$!2X=6
zlq_`oqmP*5ev^O|;l&jcw5I~`6NMLgw_^`K}g#r}sb9O#$(#AX3}bHpb+m=jE%P)1;HUH)T?JNS9G8A1Edr
zKG^IiZ3}3L`gF?XLcShl5?t_NDFgfW2nm#eqOEwmDL{7i-TpkdoLte~yV>2-Se8TM
zBF;8JnV?;{GKv@S$uC(vm^&w`7`#rcNXkX2AAD0OZzJEGM?IDpqal)ah&B
zQYoK8?79@@kJe09ee#U%J&?J)+I#Th39fYcWs^!YoGdHE8G01d?tI#uYad!Bi_UCI
zcIx6^E*+$~z?&2vsO|gxc`~0rtv!L^#=&TVvpzSSoz7NUlYSn}{eQu0%%pTA#Yt@qPsWusO_(7`jZV=VQd{A16zi^B)wZ3V0XbM$8$>x#`3d!O@sQ76A6u2P
zlGLggW$v$8fTPS={7gC>^4Rp$vThSMDB4kI{S!iFPP2~Kjs$+ES{Ejz=LJHKyncN9
zygbn;ein)>J;cCU&4ouq`mNx9^p@Lwc}6bCqgU;kH-R}e;t5lOSgyW5T+JEmN)+VP
zv8^;!w<5sL(m8_wuexCpv#pc2sT?B*>n1<6+cbjvS8$ijW}=6t-Uvpb^hM1%MIq86
zKU6c0`SC9vzN_L73U&2G`0P&bs%FSP=o2d+wR~H%`H9&kNBwrnAT+>yw;^Vd4Dq
zvEz>Itfj65hyOQg8HPf;L{E=&Js02Dh%9b(izYv(l`gB)HzI^=Wzea1D=_&@^b8du
zN2aZf^|*ab{#9Vl!G|53nqD;!M?I%6ij0MZvYf`=Mb4g^W|0FjmtB>1CfGc-^Jb?-
z8T2BZSDw$;ofBO|vUWHl!z5Y7JB+UGXs*DRA(
z#z$JtU7DlWJf?{KIwjx3%Gtbq8_i+s!$HINL`B)E9h*aeXegR=1uq3r>#W#2W!1{tAu+$MKe1!V)D+OfqgrB
z-O(z)=k97Ez-3cV97h1Nwe69>1C_6U8tWAra_S@=;bH5rZXe5y*5H##Wkq17UE
zN9CU3$AH<>FqxQ>y52;qYJ=|7HlAd=RPC2x<2dk_^
zvtDna6eYfodq$G{W5n~=Q%lmnUJ8G2p11DRnoq$oCDt`AGx-d~#H!CbD^h2%qCW$<
zxuSNqc6Pu0b~SOsljEh4V)HJ(nP&-JtChrj_J*uB9*JHap!hiU-<6(A4W>Bg8BLW4
z&e)u;1w_v&@tB~$tiJb+Twj4fT@J6ufV$>f2UmBlXCex8Nj3$~Z0l`plx(c*EzRWV
zw)_P;!IN@;h_{#rd?Vm;PL9j(+m56MSGeYjOm?>h^)dtMFB4P%y5#iZv$Up5Frb=d
z{aJUL
zO#>enMbYCQJVVU7!Qdy}6hf>XWmOU1Y4Q!+8Hm>()8ujgo}_SdQ11J*4Ybc86LEm=
zzRlYfH4{~S@&X9G&G)XSt8MyY49JI!s%DJrEb^XotO^hNgbq*_i;gvy))HKG6YFbF
zBaSvs)ygx9)pv=Xf8R5RQc8#fgl$A|K?o{PS3iCjDTpN($9(E&wJ(4SXD7G9NKd_Sx)LRmyX!1DU74we}pa1+TW}x6F8RD`$2ZW}`Fq
z#>?@$s>~_*s7Aq)`pY
z#mkEdYl|>y*Yb2^=tvapy0#VCL=qV&QY0nj>L+sFG8yRU&M@lOUhMVSx;I=aY<(R;
zWGld5hj2~Z~8MfL}VO-vWr@wn+|!{l90%{YIk
z#=p+x!q0kupq2=lEgjNlE^6U-noq|YodUJDK7&RXR?L60H#6oup6EJyqc1~2j{Z`{
zU;$SbAFb%Y=kU>*PC%wIH$T*|=~b0@gf@)gY1OzpT)uKmc{>9rj2d0n5mWVoVt@Dq
znh*f|jwa5r6_J}Grrw;E-o3R4ev_Z9Un^GE97{RQXtd>EV}~EcM}_h+?RVHM{irXS
zAciu?$40!(ugb|u8}~as9sz&e-b;&xv!Zz)^d)zJJ)$$9!KCy-zrx<=pP8lH4PkBT
z!;i{d@Y(Yc@)3<8TNjNoLLJ-^TS+_*l;8(JpCFN*^I3NK`NQYL
z@S?)x^lNI3-94|!zOP9!UR}xmJzcYCf#!4haesvAj1D@hAUx_6k_5Q4RbaW=UdYDx4uG+8HKcqcx#plgXyQxE%)N&;WhWg$wc)E!v=Z$UyxjHqc)PO7lQeYmj@ajV
zt|M>mu%CTGh4L~aTzE$d=#o01M_za8)UG#RIBk3+DzAJ`i=HX}s%Type2|d(gfFOD
z{3|7WIrq{kT%k;L7`aNry&(A#nG0%)T(RymLS@le;$Dq&SDCa7>gydc2>P*(Y=2J=
zzVw3!sqVn8%?7n3`^Us;J7V>dv8Br4{_H5E4ck}z{I9?hI~g!$HTqM1eH
zXuZ*Ccfw86r(NvfC~=mG9MS
z@3WyO8cyoI<>Nqq=4ii;fh(YqxlbV~Pfrftj`w{r*C7vK)JD31;-^-OY!TM^s<-nt4IU_k18*6tcGDFVm|~mkFG{
zmuWk8q_vri5m{y9p1<~I-4l2V#8lV8CxLUvs=o|Amp%KGLc#a+jmbX78fNi+Q;NyL
z!>FJ-CH>?CAwGWX3Yx{Z+>_JqiKy-b14F@Ieu$%i%#Wvnymr3vBqb{(h2{xO$br!s
z)qTyA>rb9smqa1F5s*To5O-&-HZTlMh2u%}#mN=_3Yy8<>i5;}^9c(hjCWTncgNir
zin-iY_Y=t^`P7ZMN1l8!T-$%(oP>WT7FsvCf>+uA4SMJ>xR1hsfDJXC
z?7t7!i`>Z(Mk1tydOXO=fWGkWhGnIMc0XVCmxaTiNQkxfhhbfb6Zpv9+XI@5Lj}pK
zH>@XXUIAG~`ZO+GDZ}sdr1dhqw4M%!lCm3!*k=KChpW}Z!k#71!+A6yND@=!u7$_u
zYr&)>NOJ-boeNavl562aA+#Zd5olLl-K(xtUiVaGw<&Db3QsMi$
z%Ec&wljzxpMjKv*X2}vlLbupn7=KKn8!KJIy~+?e5D-`2%WL?VSxi{6o#ADsfV0+R
z>H{|;V%3#9Fn3uh4@U3h1ASURO-fhge}_TbcHm>xlg&;#m)Lry%xF*R15R@deyqN
z?3g;GmLL1GRYSc?-r=ELgQT@Ld}bB7!V8rxd$gn^K}~q44(;fO(P)5b%Q!WFXTGDq
zkrM*wC?h}rNLF2*PPs~s+6`Jtn_TF3_?ZP^xH?`bl}f;-8#__$M!Iide6cY96%|N?
zKjX*frS~YMN!ax=R;?G&GBk(SjFp*g&bCQW`+_Ycv|f)aB{Ir~jHpX`M7Xw3@fdAJ
z-IpNeN8oV4drAXowcn$BesSn^B*GF{4|1jv^Lg;$m+)~o;!SI(TRHg>8O&(;&EFJB
zti<4qz*;W{*9ClO5#RI(K@{`LIu;FfHxcDut&bY&(PLOQ$JW}dZ0{`gZqyK!cwDAU
z@era6DWfj5A~|x5vT30HQmXup^u-GFIsbS3mr}2x0WbW~N1=;|fd@B`=%;Q$W|~9O
zB!aOhwnhC;yqAUw*6vbj2qyD61kgx6v7c?l4X@I#bfh21S-ck_jdMJ!TWYCpEcP0J
z=R3m>5K+n8MkXY7{PoHdg`oY}Y@}!}cx4yP^NDtRO#jsdVXU{JJlG}2-q#Ha)^xUH
zvZSf-_*|9*ABBE;;r){f-p%r?IwlNZa%rn`U$A)^8o)>7|v4w
zNs4Nl%$pGV2IDwJ%@6XPuhP5T19CsJr=Ae4^|1311`){yF_+mDazR(_&Cksbpy|V3
z-QK`gF>=fHdU15F-N%5-i)C6Ul+m2}er*@mdNGh@>GxX-Uc4nDg)t?7N6at8CE#bk
znIVt;8Y7eJO<)Po2LCPiJBzG7vah#oY6%kJv6wG67D3TaHiI0}r;2B@g#9Cw57l+c
z315>n9t99V
zL^+xTs}HXWk=<{!k2P&XrkT7)S=c^j2+mLGF|ESL_4B59l!l2n=Oc^wHG
z_IFlh@EU}&f3`&Byj7f6sDcMza{e-~4KzMvk^V^@0RnG}A?@lrW@{8qO
zNr?Je4cx0&gS;~*?He&m9`rRAIh4?z=bHhHOdms3%|SG9kF%$(d)aAKx*+N2!mC2&
zK1CaEnH8_SY>Q%a0v7O?vlI`u)ig%ok&RWr70h69Uh%dae0Xs7fPRQWP-jO)
zIrZ&U8avI3cRF|3B|5O@klcH?O~9fYNg1A=9EZ)$zIZ_pem_es8_#Mee>)5NZ6ffk
zK7c~9>QMeh?xk6K4C_T?{jD1kI8C2?u17&9F#Ph~2f&dQ40;8@F-QF%%BO))j1;#F
z7eR_!)h5eRPL#Xf@7-L^8hGT=dE74nbH8H*W?4(K(k3?(Pb?z>9VJk*xId)kHId9L|4e
z3R+d762Kn!>+$j1{)K|}#E?p-h57aAMk#Bv?xfC8-bo8k4G~V6Q}M6umm(JUo=ifA
z{Gj~I?vpQfQlOCNdQl7`SGP>?M)EtFU-$@aAIT{Et7@L7tUDZzVk6|Ang1)7br}jz
zd9}t#!)zU&j07SrAXMHdzr{5nq7GZC=ioc0?KmbYV-QojMi55*uAX!@XEZ({LiuHI
ze-M(Qp$6~px{JS3L-4el7P^PkzCD~=7N#Ya0%*dOYd8J-GUw!IoZ3MX2cJ>VS#6;B
zP209yzA?gZbK+h)vD&xGZ)_hHX2C=58LL6c7(h_VM1)8Z!2aT)T?eg?^&fpoI(X{h
z;*gI|$1?B73Pyj+hl)o!hU*l-(~n54INOG=Sj8Pgl0LX4T<
zy;6d=9UZRVj-=|EXC0xQ2-1gSl_kVJb07Sik-?1d%#9V}2)&n_&
zQ49(ycjFEDz6qexM?}LC6oLwz;5pVc&W$E^t*m|((^+i4UdyHgzx|PxhUSh!GO-71LTb0H}f~IfEHH|_#iU=QB=?6WTpyH;f4;ZqK_V+o1T%WK8^aPaYszh`2}*6%>y7eJ*4#{@f$hQtzwQnakRS{5HT`|bFe&HVs^*vZ*T&_5G@(Kz23|6AQez!@VF668DM
zZh=R8DyO;+r0Tcv%3?X2(&uN?u)YkuKbka81(AMeX6mHZ>VD44pOg{Nj>G5t4M!!R
zr{9EJZm_uj3hc-klT6p_+ly2WU08(+g9-bbFXd7;Gyvg`CK;9olh)HwN?GLXg50qD
zErw_Bq5zoR3QCUELPzvZ;KQ`{uh7{Ba6P!sLk;)7)}Mb_@^*cS2$hV$;Zze^
z?8dDlC#?JT@36*(HU-#46dxf6K6+HinO+9F3h5L4FwgOUlXR^s&E3jE_+z!P#hPrr
z98@D$3qi+`EW-N4e15RVRL=K!ThmF}&dZt%iL2Vvi(qY=ot?$14t;Lz5zg_+Hig9s
zzZNI}3WXHfiDwV4fgjOnv%j^laOAQmi>!#j>SST7mq+
zPR876ZAHV;#6p+cxrxhq!MG#SUFrt`)ZehI2V<2jEt#U3HFw;jjvF3fWa|u>!OTdd
zKXe>IPT}*{v0VTda9
zQe545z}{)@+``$a+V?XlZ&N7-ewEJ!b^&PUcQ;9nF(HlNWZf%evKowiT>rQ`NByle
z7b@oMUzLg-Ed=ccXg*tU_k-4jcs!5IvojfUmLwX`uH;7>LkPOkc&$+Ll*wuaicyW9
zuY+emV_3As!DDdRxIK6Xv=a_4^Z_I8u?-N=?H
ztFM3#Bf?-zyuoCx4=vQ+S~4+b%i$w+KGcMLRbKIN7sluP1X0P3<|dV}#>YMnl54w?
zTigVRMDaBn->txNWWrBvbW%yVU1>L(iti5Sz_g?c!Gw?VQh(db2@Nf+jJ*79D4n&j
z&ii1|+cm7MENi!vPm!;W^R+T$^xcIF;(_>Ed05(kngV4+^!KZw6R~x?_c>%qoWd48
zs!w#AeDEMSBaxinF>t)niPov4myTaOQf&Yd3?vV-`~KX^P-OjD&s*v_Y8*SNsPu+j
zkVq3J!^a-6F8aYXq90NT=ih612DXTApFSk%jw>L%M;uJ()46n+^gYkoVpnuTb=*(g
z^J8@){=-95(rrgDowUAE9yD+(RH_FsDle$)OSaB3o3Uq?vvkVYFo@XBgy*j!2ubuu
z(^;FW9kU?WULG<(Iodxb<{%9X+6u2Wc{Fs=s7ikZPuU8U1
zZX$OZH{ynkFdBMN+Hu2g@?6EOo~}%Mq^Xc+k8dHUhd~_Y*thdR0TAb|8Z3NfWy;ZI
z&j1*w%`XXdPVCm%0hRh(EcOeCqM=yqG%gNG;*nPcKU$|}1*47LzBGp}KB7ZiLaWTA
z(7}MBh3tje@V$az4a3K7ih*^E&A9bjQK?6jI1(F8L**$8YXKGspZ
zLrLAxRPK;k#>v=vwSCF@n(Yz_aOZTHDon8yg@x$tfxe^N6=xVMHdhkapT)-~0?ThL
zk)rkP{`b++_UlPNAm0S$#<}lw0mM{2uoo=ak%58Gvp_M<4q#W`w__q&uvV3*xpl6^Y@_^F(Yf#)LqZ7NAtd$hC0V$IH|RWp*^XvyOu({_?G
zL#j|17|Z7R90!h36;AZC`8#WMWZKw(3}xJnpvhVCK5k&iFzGGJCg5&b
z^WbAj|4K?Xm|CvZb4*g6YuH7;$DT}TS%znu^DKeA@b5fI@5G@-LBSXkR0~zScu7D|
zJ(x>y-)v(Hoi`44JCH&eOWg<%>2fp9(8!?WrZir9vl0zo$KQKTL&jmClrrOKZe)7v
zf~Q^3HvmF+K*+Bk9ytB__DkA`s$E`SMHCFR4IKPWuZ~7!I{LzoOa)(~JTF?`-*GY?791nFL|7$0Q;@gV{<1bfF|bLU
z3Vq+ha!PqeLlq)+I|cY&-iIsYF&$Z8JOfmN#>n?9i*(cGd!>o=dLz|QI9a_IOSSm1
zFz=n9x1UjNo;jsc4>MDNQ#!>3(U`cS{WovI91i~T)3gN
zg87D#sue$l?2>1|4Ia>W_>JLRVzNjxNBk5Ez_UyLo+Lh;BN9k9dC8nJWaQ4OR-3MJ
zKGJR1%a+K`;de$uyrx^nZkh{_Jq>8pQgKS0u}EqTj3JTVA*&R6CO?53SKky=XqR+L
z^|~J=H?fg|!RS+P@fh51gL$L|em0@QY8zia%G|kA$ifpYILd*NG5Otr7pYIoo7m!r
zkf4ggP<#*`?azDWw(%}nemN`V7zj?&PlYbaU`Es&c%W+8Dj%7Bs(T-_Mbg(g+T8;-
zvqs*%Ia##PW(KIqR)+Og?owf|9px-#YE@qFu-}yoRaA$$`h>awkvgf;+>zslkp?$D
z!JhsGhOvaV(J+HY3&iq>sG9r&ET#I+2yrDfXRbPG=uIo*Wi(x{l~NY{n&GU*g7*hc
zz%XR}d9bQ+Lc+rrhTj=WO_$hC#NANMxb|C10HFxd?J|p2VZA%aW>^()yqbNN{#yiJ
z)PIB~dCU{v)*WwJtzHw6de$R6!Nw&gjM`ZAfj^ti8nj&N_18uC10x3qm6BOgt#%WQJwGv?rkcj=z
zt9OCR$Hj)CjV4?&tM4s0I=&n7?BO@%uK8}M`;*wM20572i&eZ&gskvll__^lO*_Zi
zT@V?zYGh?*&cHUkj$p=d|NgFJZ#GJaWg4G^$FcTC}xql!E<9Au=jfgs7iy
z!4-0H^3IK!mgimurS(*2Mfj{3UqfYHxI0m+s6uPSK{lq)^=AzBhnInQnX+Axy|W&r|vC_
zgXcH8UvG6Ly)e(D7iP|A=1=JbgWL$didQsC(GX_qveainP~3)(5p4-SdI%S$(jmdW
zo6V1H9Y||X|NVhI16Bhql@fYzQ8|6t*Wjim3mWejzQ_4IZ5u-$ME!Igrdl&v%u59X
z-Ac1!2jS2~)MK#M)Po5G8{%YEoigRB5{1ZS*KYr|&fHfz&*znEFmuTvn~uG!F>g6b
zlCF%%jR>oRD#6@CK{0j5%yZ2J#WVtE4|JCw4CN;dnay-#+N)+NzEOzh(T~?Tw=aoy
z@12gmD~0NVdOGR7r7)uy6&VR~=p5Vt*H#^?56hl6niRl5zs0#VCP*4nH#6yOMyoe2
zK!Zi%3z<8n&7F7rcJAn~@z(XMuPQ{O<^BvO!3~u_kn`8J!631??{8`_H>)|Fi@}Ra20Tpr&f}}NCuzf*MePRj{+&W
ztBfa*zB8*!$`(Q~`=P)n?R
z^X{>vA;_}(mem;zx5ojgI>i?zfB|l|M!Me8s8NpArauc8FyYNfXnyY&AWL0fB4NJt
z9hhYi1GT4Su+G(*70Io8JBm>WkDT1b=H_TTULi8V!0u6?%>485O;iF0RqmZ0+04Su
zm^Gh6GQC5f-@Co@-*4{C*fQd)E|;KuwV`3KCaEsNwt(tCAw3
z6$umH{&ulk8lW*Wp7;CyWa+>L^;);<%3GEE@$RGjwGjEUuyQ|5P}j-QuD757%(mTWO0juH6$7H2P;hfLQJ^EIcNm=iA^
zcP_D>7ohM{=uj|+BM;;u>r64|r!Eb=HP`l|a4GQC{G`85~6*WJ$FqZ(f)4h*VZnyz;3TMft)jSF|{=4sXM{ppr8
zG&b_Pb;*x=bSDWDFsZd((4(Y!(2fXs@`RIOZ_;bc6sWR)Mj1>qJO%u%-yI!PWTxYT
z5v{;}cd7qgUXLVFz_jU|yI1kSc*fed{_Lp7a)Y*l#mv@ZM$IX@<`xH`zXCn3-}Uu%
zhS~0GhK#MxJHPVuJgy?NAj%>VdC2D$;S2ap9KPrZGL6Tvo!$}Sg(2J=q$8Tdgh+Gk
z5~(_u_6aVFv%T7(zC(=J{yreE7$+X%b+?m%G+X4RulNaGXaG`F*9UZn2KwlW_xSgY
z3?^%G<0PWsn}o-~
z#WoAj5N6wR%NiMyshCK^&|HJl03Hkw1lcQKBNn2OiTWl~RPfEDRJ$xU7aBD_J1gk`
zy+De>sqpR1S0v{T!Ps^mnC65hRW*Yy_W4-L&(4#z7#tmkO}C=wddwH!!_au!sZj_9)t
zDSWdlz0jHmzJuIyTkH*bL>;R=mogQmnv__4UxWav>y^N{Why4ThOa5gbY5M%bOs;I
zABS)k@Wr}>--+lRR}qUBOnF2~82pgzouU$KFTTFQVdh&;>O)f_`B$8e2J>`u29u@9
z$#pM_>do9GB1$M=u!wR9JjO5_pc>x6Pf&ViX8qRlhMy?+0Yv=P<5s5h(eTnv!{c+DMXPk2
zfbgt3e#Oguud|7Rhc*V(ZXcrlh2T^<;
zTA*Cfb4(i5T<$ej(iOiy{w76s?yz{<}9sBX~pg
zpFu+32)W_0h>NS7%1vN{NnH+6LDCDf=6o{J%HrDCmRe|kg-jIVGj#-O0PXWzcUU=J
z=%QE-8|9EZ)I-x^vgkv)_pG$NgBuH6y%+6F+V#qk_8M%>cdmTkb+~n;)fuOyOIRX9
z$?CzFyWga3bMnK;>VDztm*Sx*b9^D6Y$WXfCKTQRI`-Z6^yewaGcZ|*I$UUkOtY?>
z0!Mm6a6?>6OUu7gys`6+As7fPv9o)3{evk^9cW7kogQt8JX#BLfBe8799}c}Ow+hz
zmGRKRs~zpfVqY-05XtsSVK3J(is+BLx9D*Kp=Pd+LTNh~2fw0T_sOhGW~L~5rINFj
zx8Gmgz{CGZEZ%_neF^hjkGjqmgN0j~TMnzDhU`WZTf|w?m+|r6aK|5qzotRU->oLMN*q>KhQ@&11
zl1JQb{@!^l3dYn0f8cl_vKj)kFrw~Jg17X
zEgS8%_;JE)cPs>A_!q8Y^PXaj$Y~9}aZQJQKXYTgyQ^_$z^Nl5BOAad=pIOOGW8lP
z3L*Eq-&Eeb2jp7_4E1u`j`JyJym2t2S_cObw??fBJQ8JIvguvBW%**LrHu~`#yPah
zZ4)(HI-b*_N{6+6eRhuu$NgxdWq)rbJXE|BoMQ^cB0rxApfW`3@I@QM=E5!?aqCK_
zvKxKuQ}_c@iE_R
z;)cOsY-1$7oc#H~BPIe+cfKZF1%)O8QP3bgSM5JpRSGb6M$Co4d9!AxbFa>GoAbgA
z0X_bQaz>pv!$Kq~Vdf(H-a`So#&g%>J#=h-jX_e~K?5MQCVD8q86QWm8$m;9$%eyM
z0%;y}PBjW*aQK+Ag~df_lLwWDk$qWVsj6I+<9o;Gv^Ff>vuAaEkaMf-y~Vp7u2|#u
zN$|!G%92C)-VPzoS1y3#oK`|f&kUzB+MKPQE^9kldPat$bGKSUnW%#9SAjbONTgJk
zLkK3~?5}HHksS(N7rZ1h9$xV*R)MJyDN^*&oa$7Ehq>rGd%oAy(;w@oyeOVVp0LK(
zy3V?t5rqoo%hPUXB9M##x$)|;cR6=ak28`B{TJCCUdQPS}O
z>R8`j85bZedc|nEC+JLmdLP25m|(cjl{niL6%5=arlc!iu2V`0m41riZ0dvNEB0H>
z0@v(^pkUCzFu(x<=j>XZB2)wxxxh`ZaSBjXi_H{bSas&N9ePngS5)wgZ1iTEJ1Q1g
z&zgR6#Ev(POjhLbi^qg4S`olVjjM1k%|mjSI^7n`T7G=e3GSdmk+N{5vk-xF;pK0Iup*6m*%
zc;UZ>b~Y4N<+4nIIs94@0}M&I0fG132jWQSwYP2r<{2ckVhLK5tI3eEEmjmCEiEK@
zg@Nv=PAU_^pPH({_{`7+R^r_Ek>apjL6wpe08_W6KAAz!iw%4pdb{V~{b5HkQ7|T{
zRJrX0Cna15@R5jZv^)3n(T1%Lu41wBCfA`6e>vd|{Kgf|FSLCjD-dux!zyVe8kztu
z^BOfRDjX!Ut4<9jd!;sJd2A=U;-OQ6+GZl7_h1#`Q`Vf;q}cIwkM#WDOZ=KOCpP%F
zsBU?PimztAlNPlO1RdPKdHsAdD{C1x$w&7~_n*_EfW)&=%tPG4{R`TASJ$+5J}}iV|@+cwIg`T&VQ?tR_2Mv
zOKV9_JmTmb(TOk(@`qvmZvuGAKPXHp#a^j0PQh1!=rb6)^cGlq=BvMrd70+=?r@fT
z-3CTPYTnzSDi;52Vl5t{eKLr=P76Q2x^_1A8$s#gwl!Pqr3FUFSeebtQ}*{$@pkWx
zhVu1@UPt77zGc}D$7?=2dN&spDbH)AyJDZOTN!CNkh8xSvRo)z?*`R(pGGOiTf40$
zhNnI=Gvh;)8#arJ00ukweKq{R{v*GDKpYq%O!i^KB<2iE685UXi)*pDt(6W4#o0)k
zdvd)5ws0WbomX1GU3^YwCO8br=JO^C8MaOf8Z$7TqC8q|XBT|)RuG)#=9+`n@jH*E
zli_Rbh8!&4ki8aSTjF0yJL_BUqRx&C8}Lkf?%uYmP7YF5xVbxI)Sffbl_VVB97;;6
zPzOv6@7+W+)$ILv|M4hn67CS-Ajcjb?g}KK9oE|ydzNCi!UDp!S>1j|MGr_sq6EuYuHHbc~{lSk1;Mm-g-2DQ*
z>Z`aVd3x16!0Z3<>Q2!R!$M2goTmdgd_XFF({c6=IGC+(ED9RcvQ&3qWODvCGlqxm
z!@St(77>gZYkFB5&lrWX3eNZs)pfMis!4F(UmGau>FMdPLxUR$RNX$7AH?8E%aRob
zj@`SOaYm`)kA^{S%Kp;&N0vb>1b@&W4wKNO(7H`YX8|bm${+?KR$^k_F`Z5n%P8yfd4aI-4O@FR7P$S>k|mPi9&B1%C+@T-8q1$x>)hJ%ww
z{B?^mU$I&I{rr2lsY!85_=(UTp~F|6MC5T%$*65&7fs7Ut$Y1GtwpXEq2r-`7`?*o
zJwwG`Lqm`#uba=w_Ax!Q=o*q7ij8e>yzi&`mw>bd+et-51rVl(#>n%l6V=aWr-9ON
zobQkUyRJ2g5j%4Iy6Bq*sVW)C$)o)|d~R*f^4)AujTD^&M`KUzzYlY?Nb@}Y^wfda
zz!yv$9*cWRKE3K<{j7`|_oPYxn)-0uL&GFm{ex*QgwltqN4g{lwpn+F~tKfOZM8z({o(<@pn$bfoF8v=A4
z_ulH1LVhy5ddBR4BbvuZwOp5XuT(s+mED}0nmVjBgsxqH&2v4c})=5hhfw^Gnm676KPIW2(C%pjMT?MfVrmoOc9uav-N
zbYx8}S!+_&26)hr7hiaN=n1gtyBVdb&sD9&`
z#>>PLFX=dn8nhorwtyHEuchSXGrD_YS)l&r9JB_2GRuHL;hb1>AwF~y;hU+F318Ns
z8!{M%ZgC=^HM}9Rhr)LH6=gKD)>z|MO-L^_7@jMvybdVF?g|ZZp{3%0-EN_RcA19H
zTQoH`#T-RswcZq_lbx!A<`R_Lm_WK`5%4z2Y=?o06s_E5tQm+(e)n}Mz|{`ArS%Jw
z1YAB*GdRq55Vv&8`4PRlnr+Ygnv;e`_4RF-Yz)g{mc1i5XDe_r#-vkXfeSuk1PYEX
zJ2cHadEBaifS{0x^K5#V!6Ng2yqc7nXYn4tg