From 92412eb5ce9354cc61ef5cf9dde37234165851c6 Mon Sep 17 00:00:00 2001 From: Xin Chen Date: Thu, 13 Apr 2023 14:36:13 +0800 Subject: [PATCH 1/3] update docs for mmdeploy main --- projects/mmpose4aigc/README.md | 6 +- projects/mmpose4aigc/README_CN.md | 6 +- .../mmpose4aigc/install_posetracker_linux.sh | 15 ++- projects/mmpose4aigc/mmpose_style_skeleton.sh | 10 +- projects/rtmpose/README.md | 96 ++++++++++--------- projects/rtmpose/README_CN.md | 90 ++++++++--------- projects/rtmpose/benchmark/README.md | 2 +- projects/rtmpose/benchmark/README_CN.md | 2 +- projects/rtmpose/rtmpose/pruning/README.md | 4 +- projects/rtmpose/rtmpose/pruning/README_CN.md | 4 +- 10 files changed, 120 insertions(+), 115 deletions(-) diff --git a/projects/mmpose4aigc/README.md b/projects/mmpose4aigc/README.md index 1c9d268093..c3759d846c 100644 --- a/projects/mmpose4aigc/README.md +++ b/projects/mmpose4aigc/README.md @@ -67,8 +67,8 @@ bash install_posetracker_linux.sh After installation, files are organized as follows: ```shell -|----mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1 -| |----sdk +|----mmdeploy-1.0.0-linux-x86_64-cxx11abi +| |----README.md | |----rtmpose-ort | | |----rtmdet-nano | | |----rtmpose-m @@ -83,7 +83,7 @@ Run the following command to generate a skeleton image: ```shell # generate a skeleton image bash mmpose_style_skeleton.sh \ - mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1/rtmpose-ort/000000147979.jpg + mmdeploy-1.0.0-linux-x86_64-cxx11abi/rtmpose-ort/000000147979.jpg ``` For more details, you can refer to [RTMPose](../rtmpose/README.md). diff --git a/projects/mmpose4aigc/README_CN.md b/projects/mmpose4aigc/README_CN.md index bdb943ec17..44bbe2d459 100644 --- a/projects/mmpose4aigc/README_CN.md +++ b/projects/mmpose4aigc/README_CN.md @@ -66,8 +66,8 @@ bash install_posetracker_linux.sh 最终的文件结构如下: ```shell -|----mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1 -| |----sdk +|----mmdeploy-1.0.0-linux-x86_64-cxx11abi +| |----README.md | |----rtmpose-ort | | |----rtmdet-nano | | |----rtmpose-m @@ -82,7 +82,7 @@ bash install_posetracker_linux.sh ```shell # 生成骨架图片 bash mmpose_style_skeleton.sh \ - mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1/rtmpose-ort/000000147979.jpg + mmdeploy-1.0.0-linux-x86_64-cxx11abi/rtmpose-ort/000000147979.jpg ``` 更多详细信息可以查看 [RTMPose](../rtmpose/README_CN.md)。 diff --git a/projects/mmpose4aigc/install_posetracker_linux.sh b/projects/mmpose4aigc/install_posetracker_linux.sh index 3b91409b16..09c91ce9d1 100644 --- a/projects/mmpose4aigc/install_posetracker_linux.sh +++ b/projects/mmpose4aigc/install_posetracker_linux.sh @@ -2,26 +2,23 @@ # Copyright (c) OpenMMLab. All rights reserved. # Download pre-compiled files -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc3/mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1.tar.gz +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz # Unzip files -tar -xzvf mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1.tar.gz +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz # Go to the sdk folder -cd mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1/sdk +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi # Init environment -source env.sh +source set_env.sh # If opencv 3+ is not installed on your system, execute the following command. # If it is installed, skip this command -bash opencv.sh +bash install_opencv.sh # Compile executable programs -bash build.sh - -# Go to mmdeploy folder -cd ../ +bash build_sdk.sh # Download models wget https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip diff --git a/projects/mmpose4aigc/mmpose_style_skeleton.sh b/projects/mmpose4aigc/mmpose_style_skeleton.sh index e8c07bef70..afb03ecfc7 100644 --- a/projects/mmpose4aigc/mmpose_style_skeleton.sh +++ b/projects/mmpose4aigc/mmpose_style_skeleton.sh @@ -1,17 +1,17 @@ #!/bin/bash # Copyright (c) OpenMMLab. All rights reserved. -WORKSPACE=mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1/sdk/ +WORKSPACE=mmdeploy-1.0.0-linux-x86_64-cxx11abi export LD_LIBRARY_PATH=${WORKSPACE}/lib:${WORKSPACE}/thirdparty/onnxruntime/lib:$LD_LIBRARY_PATH INPUT_IMAGE=$1 -mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1/sdk/bin/pose_tracker \ - mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1//rtmpose-ort/rtmdet-nano \ - mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1//rtmpose-ort/rtmpose-m \ +${WORKSPACE}/bin/pose_tracker \ + ${WORKSPACE}/rtmpose-ort/rtmdet-nano \ + ${WORKSPACE}/rtmpose-ort/rtmpose-m \ $INPUT_IMAGE \ --background black \ - --skeleton mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1//rtmpose-ort/t2i-adapter_skeleton.txt \ + --skeleton ${WORKSPACE}/rtmpose-ort/t2i-adapter_skeleton.txt \ --output ./skeleton_res.jpg \ --pose_kpt_thr 0.4 \ --show -1 diff --git a/projects/rtmpose/README.md b/projects/rtmpose/README.md index bf7f2f456f..948f900cec 100644 --- a/projects/rtmpose/README.md +++ b/projects/rtmpose/README.md @@ -211,7 +211,7 @@ We provide the UDP pretraining configs of the CSPNeXt backbone. Find more detail | CSPNeXt-m | 256x192 | 13.05 | 3.06 | 74.8 | 77.7 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | | CSPNeXt-l | 256x192 | 32.44 | 5.33 | 77.2 | 79.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | -We also provide the ImageNet classification pre-trained weights of the CSPNeXt backbone. Find more details in [RTMDet](https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/rtmdet/README.md#classification). +We also provide the ImageNet classification pre-trained weights of the CSPNeXt backbone. Find more details in [RTMDet](https://github.com/open-mmlab/mmdetection/blob/latest/configs/rtmdet/README.md#classification). | Model | Input Size | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------------: | @@ -249,23 +249,23 @@ Env Requirements: ```shell # Download pre-compiled files -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc3/mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1.tar.gz +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz # Unzip files -tar -xzvf mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1.tar.gz +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz # Go to the sdk folder -cd mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1/sdk +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi # Init environment -source env.sh +source set_env.sh # If opencv 3+ is not installed on your system, execute the following command. # If it is installed, skip this command -bash opencv.sh +bash install_opencv.sh # Compile executable programs -bash build.sh +bash build_sdk.sh # Inference for an image # Please pass the folder of the model, not the model file @@ -280,23 +280,23 @@ bash build.sh ```shell # Download pre-compiled files -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc3/mmdeploy-1.0.0rc3-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz # Unzip files -tar -xzvf mmdeploy-1.0.0rc3-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz # Go to the sdk folder -cd mmdeploy-1.0.0rc3-linux-x86_64-cuda11.1-tensorrt8.2.3.0/sdk +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3 # Init environment -source env.sh +source set_env.sh # If opencv 3+ is not installed on your system, execute the following command. # If it is installed, skip this command -bash opencv.sh +bash install_opencv.sh # Compile executable programs -bash build.sh +bash build_sdk.sh # Inference for an image # Please pass the folder of the model, not the model file @@ -313,16 +313,20 @@ For details, see [Pipeline Inference](#-step4-pipeline-inference). ##### Python Inference -1. Download the [pre-compiled SDK](https://github.com/open-mmlab/mmdeploy/releases). -2. Unzip the SDK and go to the `sdk/python` folder. -3. Install `mmdeploy_python` via `.whl` file. +1. Install mmdeploy_runtime or mmdeploy_runtime_gpu -```shell -pip install {file_name}.whl +``` +# for onnxruntime +pip install mmdeploy-runtime +download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) add third party runtime libraries to the PATH + +# for onnxruntime-gpu / tensorrt +pip install mmdeploy-runtime-gpu +download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) add third party runtime libraries to the PATH ``` -4. Download the [sdk models](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip) and unzip. -5. Inference with `pose_tracker.py`: +2. Download the [sdk models](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip) and unzip. +3. Inference with `pose_tracker.py`: **Note:** @@ -349,14 +353,14 @@ set-ExecutionPolicy RemoteSigned ```shell # in sdk folder: -.\opencv.ps1 +.\install_opencv.ps1 ``` 6. Set environment variables: ```shell # in sdk folder: -.\env.ps1 +. .\set_env.ps1 ``` 7. Compile the SDK: @@ -364,7 +368,7 @@ set-ExecutionPolicy RemoteSigned ```shell # in sdk folder: # (if you installed opencv by .\install_opencv.ps1) -.\build.ps1 +.\build_sdk.ps1 # (if you installed opencv yourself) .\build_sdk.ps1 "path/to/folder/of/OpenCVConfig.cmake" ``` @@ -415,38 +419,38 @@ Please refer to [Train and Test](https://mmpose.readthedocs.io/en/latest/user_gu ## 🏗️ How to Deploy [🔝](#-table-of-contents) -Here is a basic example of deploy RTMPose with [MMDeploy-1.x](https://github.com/open-mmlab/mmdeploy/tree/1.x). +Here is a basic example of deploy RTMPose with [MMDeploy](https://github.com/open-mmlab/mmdeploy/tree/main). ### 🧩 Step1. Install MMDeploy -Before starting the deployment, please make sure you install MMPose-1.x and MMDeploy-1.x correctly. +Before starting the deployment, please make sure you install MMPose and MMDeploy correctly. -- Install MMPose-1.x, please refer to the [MMPose-1.x installation guide](https://mmpose.readthedocs.io/en/latest/installation.html). -- Install MMDeploy-1.x, please refer to the [MMDeploy-1.x installation guide](https://mmdeploy.readthedocs.io/en/1.x/get_started.html#installation). +- Install MMPose, please refer to the [MMPose installation guide](https://mmpose.readthedocs.io/en/latest/installation.html). +- Install MMDeploy, please refer to the [MMDeploy installation guide](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation). Depending on the deployment backend, some backends require compilation of custom operators, so please refer to the corresponding document to ensure the environment is built correctly according to your needs: -- [ONNX RUNTIME SUPPORT](https://mmdeploy.readthedocs.io/en/1.x/05-supported-backends/onnxruntime.html) -- [TENSORRT SUPPORT](https://mmdeploy.readthedocs.io/en/1.x/05-supported-backends/tensorrt.html) -- [OPENVINO SUPPORT](https://mmdeploy.readthedocs.io/en/1.x/05-supported-backends/openvino.html) -- [More](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/05-supported-backends) +- [ONNX RUNTIME SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/onnxruntime.html) +- [TENSORRT SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/tensorrt.html) +- [OPENVINO SUPPORT](https://mmdeploy.readthedocs.io/en/latest/05-supported-backends/openvino.html) +- [More](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/05-supported-backends) ### 🛠️ Step2. Convert Model After the installation, you can enjoy the model deployment journey starting from converting PyTorch model to backend model by running MMDeploy's `tools/deploy.py`. -The detailed model conversion tutorial please refer to the [MMDeploy document](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/convert_model.html). Here we only give the example of converting RTMPose. +The detailed model conversion tutorial please refer to the [MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html). Here we only give the example of converting RTMPose. Here we take converting RTMDet-nano and RTMPose-m to ONNX/TensorRT as an example. - If you only want to use ONNX, please use: - - [`detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_onnxruntime_static.py) for RTMDet. - - [`pose-detection_simcc_onnxruntime_dynamic.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py) for RTMPose. + - [`detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_onnxruntime_static.py) for RTMDet. + - [`pose-detection_simcc_onnxruntime_dynamic.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py) for RTMPose. - If you want to use TensorRT, please use: - - [`detection_tensorrt_static-320x320.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_tensorrt_static-320x320.py) for RTMDet. - - [`pose-detection_simcc_tensorrt_dynamic-256x192.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py) for RTMPose. + - [`detection_tensorrt_static-320x320.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_tensorrt_static-320x320.py) for RTMDet. + - [`pose-detection_simcc_tensorrt_dynamic-256x192.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py) for RTMPose. -If you want to customize the settings in the deployment config for your requirements, please refer to [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/write_config.html). +If you want to customize the settings in the deployment config for your requirements, please refer to [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/write_config.html). In this tutorial, we organize files as follows: @@ -587,7 +591,7 @@ import argparse import cv2 import numpy as np -from mmdeploy_python import PoseDetector +from mmdeploy_runtime import PoseDetector def parse_args(): @@ -701,8 +705,8 @@ target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) #### Other languages -- [C# API Examples](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csharp) -- [JAVA API Examples](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/java) +- [C# API Examples](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csharp) +- [JAVA API Examples](https://github.com/open-mmlab/mmdeploy/tree/main/demo/java) ## 🚀 Step4. Pipeline Inference @@ -736,8 +740,8 @@ optional arguments: #### API Example -- [`det_pose.py`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/demo/python/det_pose.py) -- [`det_pose.cxx`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/demo/csrc/cpp/det_pose.cxx) +- [`det_pose.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/det_pose.py) +- [`det_pose.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/det_pose.cxx) ### Inference for a video @@ -792,8 +796,8 @@ optional arguments: #### API Example -- [`pose_tracker.py`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/demo/python/pose_tracker.py) -- [`pose_tracker.cxx`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/demo/csrc/cpp/pose_tracker.cxx) +- [`pose_tracker.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/pose_tracker.py) +- [`pose_tracker.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/pose_tracker.cxx) ## 📚 Common Usage [🔝](#-table-of-contents) @@ -846,7 +850,7 @@ The result is as follows: +--------+------------+---------+ ``` -If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/useful_tools.html#profiler). +If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). ### 📊 Model Test [🔝](#-table-of-contents) @@ -860,7 +864,7 @@ python tools/test.py \ --device cpu ``` -You can also refer to [MMDeploy Docs](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/en/02-how-to-run/profile_model.md) for more details. +You can also refer to [MMDeploy Docs](https://github.com/open-mmlab/mmdeploy/blob/main/docs/en/02-how-to-run/profile_model.md) for more details. ## 📜 Citation [🔝](#-table-of-contents) diff --git a/projects/rtmpose/README_CN.md b/projects/rtmpose/README_CN.md index 041c05e2b0..1aee2adf13 100644 --- a/projects/rtmpose/README_CN.md +++ b/projects/rtmpose/README_CN.md @@ -206,7 +206,7 @@ RTMPose 是一个长期优化迭代的项目,致力于业务场景下的高性 | CSPNeXt-m | 256x192 | 13.05 | 3.06 | 74.8 | 77.7 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth) | | CSPNeXt-l | 256x192 | 32.44 | 5.33 | 77.2 | 79.9 | [Model](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth) | -我们提供了 ImageNet 分类训练的 CSPNeXt 模型参数,更多细节请参考 [RTMDet](https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/rtmdet/README.md#classification)。 +我们提供了 ImageNet 分类训练的 CSPNeXt 模型参数,更多细节请参考 [RTMDet](https://github.com/open-mmlab/mmdetection/blob/latest/configs/rtmdet/README.md#classification)。 | Model | Input Size | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------------: | @@ -244,22 +244,22 @@ MMDeploy 提供了预编译的 SDK,用于对 RTMPose 项目进行 Pipeline 推 ```shell # 下载预编译包 -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc3/mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1.tar.gz +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz # 解压文件 -tar -xzvf mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1.tar.gz +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi.tar.gz # 切换到 sdk 目录 -cd mmdeploy-1.0.0rc3-linux-x86_64-onnxruntime1.8.1/sdk +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi # 设置环境变量 -source env.sh +source set_env.sh # 如果系统中没有安装 opencv 3+,请执行以下命令。如果已安装,可略过 -bash opencv.sh +bash install_opencv.sh # 编译可执行程序 -bash build.sh +bash build_sdk.sh # 图片推理 # 请传入模型目录,而不是模型文件 @@ -274,22 +274,22 @@ bash build.sh ```shell # 下载预编译包 -wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc3/mmdeploy-1.0.0rc3-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz +wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz # 解压文件 -tar -xzvf mmdeploy-1.0.0rc3-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz +tar -xzvf mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3.tar.gz # 切换到 sdk 目录 -cd mmdeploy-1.0.0rc3-linux-x86_64-cuda11.1-tensorrt8.2.3.0/sdk +cd mmdeploy-1.0.0-linux-x86_64-cxx11abi-cuda11.3 # 设置环境变量 -source env.sh +source set_env.sh # 如果系统中没有安装 opencv 3+,请执行以下命令。如果已安装,可略过 -bash opencv.sh +bash install_opencv.sh # 编译可执行程序 -bash build.sh +bash build_sdk.sh # 图片推理 # 请传入模型目录,而不是模型文件 @@ -306,16 +306,20 @@ bash build.sh ##### Python 推理 -1. 下载 [预编译包](https://github.com/open-mmlab/mmdeploy/releases)。 -2. 解压文件,进入 `sdk/python` 目录。 -3. 用 `whl` 安装 `mmdeploy_python`。 +1. 安装 mmdeploy_runtime 或者 mmdeploy_runtime_gpu -```shell -pip install {file_name}.whl +``` +# for onnxruntime +pip install mmdeploy-runtime +下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH + +# for onnxruntime-gpu / tensorrt +pip install mmdeploy-runtime-gpu +下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64-cuda11.3.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH ``` -4. 下载 [sdk 模型](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip)并解压。 -5. 使用 `pose_tracker.py` 进行推理: +2. 下载 [sdk 模型](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip)并解压。 +3. 使用 `pose_tracker.py` 进行推理: **提示:** @@ -342,14 +346,14 @@ set-ExecutionPolicy RemoteSigned ```shell # in sdk folder: -.\opencv.ps1 +.\install_opencv.ps1 ``` 6. 配置环境变量: ```shell # in sdk folder: -.\set_env.ps1 +. .\set_env.ps1 ``` 7. 编译 sdk: @@ -408,41 +412,41 @@ python demo/topdown_demo_with_mmdet.py \ ## 🏗️ 部署教程 [🔝](#-table-of-contents) -本教程将展示如何通过 [MMDeploy-1.x](https://github.com/open-mmlab/mmdeploy/tree/1.x) 部署 RTMPose 项目。 +本教程将展示如何通过 [MMDeploy](https://github.com/open-mmlab/mmdeploy/tree/main) 部署 RTMPose 项目。 ### 🧩 安装 在开始部署之前,首先你需要确保正确安装了 MMPose, MMDetection, MMDeploy,相关安装教程如下: - [安装 MMPose 与 MMDetection](https://mmpose.readthedocs.io/zh_CN/latest/installation.html) -- [安装 MMDeploy](https://mmdeploy.readthedocs.io/zh_CN/1.x/04-supported-codebases/mmpose.html) +- [安装 MMDeploy](https://mmdeploy.readthedocs.io/zh_CN/latest/04-supported-codebases/mmpose.html) 根据部署后端的不同,有的后端需要对自定义算子进行编译,请根据需求前往对应的文档确保环境搭建正确: -- [ONNX](https://mmdeploy.readthedocs.io/zh_CN/1.x/05-supported-backends/onnxruntime.html) -- [TensorRT](https://mmdeploy.readthedocs.io/zh_CN/1.x/05-supported-backends/tensorrt.html) -- [OpenVINO](https://mmdeploy.readthedocs.io/zh_CN/1.x/05-supported-backends/openvino.html) -- [更多](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/05-supported-backends) +- [ONNX](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/onnxruntime.html) +- [TensorRT](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/tensorrt.html) +- [OpenVINO](https://mmdeploy.readthedocs.io/zh_CN/latest/05-supported-backends/openvino.html) +- [更多](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/05-supported-backends) ### 🛠️ 模型转换 在完成安装之后,你就可以开始模型部署了。通过 MMDeploy 提供的 `tools/deploy.py` 可以方便地将 Pytorch 模型转换到不同的部署后端。 -我们本节演示将 RTMDet 和 RTMPose 模型导出为 ONNX 和 TensorRT 格式,如果你希望了解更多内容请前往 [MMDeploy 文档](https://mmdeploy.readthedocs.io/zh_CN/1.x/02-how-to-run/convert_model.html)。 +我们本节演示将 RTMDet 和 RTMPose 模型导出为 ONNX 和 TensorRT 格式,如果你希望了解更多内容请前往 [MMDeploy 文档](https://mmdeploy.readthedocs.io/zh_CN/latest/02-how-to-run/convert_model.html)。 - ONNX 配置 - \- RTMDet:[`detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_onnxruntime_static.py) + \- RTMDet:[`detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_onnxruntime_static.py) - \- RTMPose:[`pose-detection_simcc_onnxruntime_dynamic.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py) + \- RTMPose:[`pose-detection_simcc_onnxruntime_dynamic.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_onnxruntime_dynamic.py) - TensorRT 配置 - \- RTMDet:[`detection_tensorrt_static-320x320.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_tensorrt_static-320x320.py) + \- RTMDet:[`detection_tensorrt_static-320x320.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_tensorrt_static-320x320.py) - \- RTMPose:[`pose-detection_simcc_tensorrt_dynamic-256x192.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py) + \- RTMPose:[`pose-detection_simcc_tensorrt_dynamic-256x192.py`](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmpose/pose-detection_simcc_tensorrt_dynamic-256x192.py) -如果你需要对部署配置进行修改,请参考 [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/zh_CN/1.x/02-how-to-run/write_config.html). +如果你需要对部署配置进行修改,请参考 [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/zh_CN/latest/02-how-to-run/write_config.html). 本教程中使用的文件结构如下: @@ -585,7 +589,7 @@ import argparse import cv2 import numpy as np -from mmdeploy_python import PoseDetector +from mmdeploy_runtime import PoseDetector def parse_args(): parser = argparse.ArgumentParser( @@ -694,8 +698,8 @@ target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) #### 其他语言 -- [C# API 示例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csharp) -- [JAVA API 示例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/java) +- [C# API 示例](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csharp) +- [JAVA API 示例](https://github.com/open-mmlab/mmdeploy/tree/main/demo/java) ### 🚀 Pipeline 推理 @@ -729,9 +733,9 @@ optional arguments: **API** **示例** -\- [`det_pose.py`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/demo/python/det_pose.py) +\- [`det_pose.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/det_pose.py) -\- [`det_pose.cxx`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/demo/csrc/cpp/det_pose.cxx) +\- [`det_pose.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/det_pose.cxx) #### 视频推理 @@ -781,9 +785,9 @@ optional arguments: **API** **示例** -\- [`pose_tracker.py`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/demo/python/pose_tracker.py) +\- [`pose_tracker.py`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/python/pose_tracker.py) -\- [`pose_tracker.cxx`](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/demo/csrc/cpp/pose_tracker.cxx) +\- [`pose_tracker.cxx`](https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/cpp/pose_tracker.cxx) ## 📚 常用功能 [🔝](#-table-of-contents) @@ -836,7 +840,7 @@ python tools/profiler.py \ +--------+------------+---------+ ``` -如果你希望详细了解 profiler 的更多参数设置与功能,可以前往 [Profiler Docs](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/useful_tools.html#profiler) +如果你希望详细了解 profiler 的更多参数设置与功能,可以前往 [Profiler Docs](https://mmdeploy.readthedocs.io/en/main/02-how-to-run/useful_tools.html#profiler) ### 📊 精度验证 [🔝](#-table-of-contents) @@ -850,7 +854,7 @@ python tools/test.py \ --device cpu ``` -详细内容请参考 [MMDeploys Docs](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/zh_cn/02-how-to-run/profile_model.md) +详细内容请参考 [MMDeploys Docs](https://github.com/open-mmlab/mmdeploy/blob/main/docs/zh_cn/02-how-to-run/profile_model.md) ## 📜 引用 [🔝](#-table-of-contents) diff --git a/projects/rtmpose/benchmark/README.md b/projects/rtmpose/benchmark/README.md index 13fe9c183f..46c036273c 100644 --- a/projects/rtmpose/benchmark/README.md +++ b/projects/rtmpose/benchmark/README.md @@ -113,4 +113,4 @@ The result is as follows: +--------+------------+---------+ ``` -If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/useful_tools.html#profiler). +If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). diff --git a/projects/rtmpose/benchmark/README_CN.md b/projects/rtmpose/benchmark/README_CN.md index 08578f44f5..e1824d12b7 100644 --- a/projects/rtmpose/benchmark/README_CN.md +++ b/projects/rtmpose/benchmark/README_CN.md @@ -113,4 +113,4 @@ The result is as follows: +--------+------------+---------+ ``` -If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/useful_tools.html#profiler). +If you want to learn more details of profiler, you can refer to the [Profiler Docs](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/useful_tools.html#profiler). diff --git a/projects/rtmpose/rtmpose/pruning/README.md b/projects/rtmpose/rtmpose/pruning/README.md index 28be530cc1..0ffea8e1c4 100644 --- a/projects/rtmpose/rtmpose/pruning/README.md +++ b/projects/rtmpose/rtmpose/pruning/README.md @@ -82,7 +82,7 @@ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_test.sh \ ### Deploy -For a pruned model, you only need to use the pruning deploy config to instead the pretrain config to deploy the pruned version of your model. If you are not familiar with mmdeploy, it's recommended to refer to [MMDeploy document](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/convert_model.html). +For a pruned model, you only need to use the pruning deploy config to instead the pretrain config to deploy the pruned version of your model. If you are not familiar with mmdeploy, it's recommended to refer to [MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html). ```bash python {mmdeploy}/tools/deploy.py \ @@ -107,7 +107,7 @@ The divisor is important for the actual inference speed, and we suggest you to t ## Reference -[GroupFisher in MMRazor](https://github.com/open-mmlab/mmrazor/tree/dev-1.x/configs/pruning/base/group_fisher) +[GroupFisher in MMRazor](https://github.com/open-mmlab/mmrazor/tree/latest/configs/pruning/base/group_fisher) [rp_sa_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth [rp_sa_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.json diff --git a/projects/rtmpose/rtmpose/pruning/README_CN.md b/projects/rtmpose/rtmpose/pruning/README_CN.md index 945160b246..92aaf8766f 100644 --- a/projects/rtmpose/rtmpose/pruning/README_CN.md +++ b/projects/rtmpose/rtmpose/pruning/README_CN.md @@ -81,7 +81,7 @@ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_test.sh \ ### Deploy -对于剪枝模型,你只需要使用剪枝部署 config 来代替预训练 config 来部署模型的剪枝版本。如果你不熟悉 MMDeploy,请参看[MMDeploy document](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/convert_model.html)。 +对于剪枝模型,你只需要使用剪枝部署 config 来代替预训练 config 来部署模型的剪枝版本。如果你不熟悉 MMDeploy,请参看[MMDeploy document](https://mmdeploy.readthedocs.io/en/latest/02-how-to-run/convert_model.html)。 ```bash python {mmdeploy}/tools/deploy.py \ @@ -106,7 +106,7 @@ divisor 设置十分重要,我们建议你在尝试 \[1,2,4,8,16,32\],以找 ## Reference -[GroupFisher in MMRazor](https://github.com/open-mmlab/mmrazor/tree/dev-1.x/configs/pruning/base/group_fisher) +[GroupFisher in MMRazor](https://github.com/open-mmlab/mmrazor/tree/latest/configs/pruning/base/group_fisher) [rp_sa_f]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.pth [rp_sa_l]: https://download.openmmlab.com/mmrazor/v1/pruning/group_fisher/rtmpose-s/group_fisher_finetune_rtmpose-s_8xb256-420e_aic-coco-256x192.json From 2d0ff9a26969e087aa850b84ee24e00fb9fa09c9 Mon Sep 17 00:00:00 2001 From: Xin Chen Date: Thu, 13 Apr 2023 15:17:56 +0800 Subject: [PATCH 2/3] remvoe note --- projects/rtmpose/README.md | 6 +----- projects/rtmpose/README_CN.md | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/projects/rtmpose/README.md b/projects/rtmpose/README.md index 948f900cec..196a947a6c 100644 --- a/projects/rtmpose/README.md +++ b/projects/rtmpose/README.md @@ -328,12 +328,8 @@ download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/m 2. Download the [sdk models](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip) and unzip. 3. Inference with `pose_tracker.py`: -**Note:** - -- If you meet `ImportError: DLL load failed while importing mmdeploy_python`, please copy `thirdparty/onnxruntime/lib/onnxruntime.dll` to `site-packages/mmdeploy_python/` of your current Python env. - ```shell -# go to ./sdk/example/python +# go to ./example/python # Please pass the folder of the model, not the model file python pose_tracker.py cpu {det work-dir} {pose work-dir} {your_video.mp4} ``` diff --git a/projects/rtmpose/README_CN.md b/projects/rtmpose/README_CN.md index 1aee2adf13..59e98b6961 100644 --- a/projects/rtmpose/README_CN.md +++ b/projects/rtmpose/README_CN.md @@ -321,12 +321,8 @@ pip install mmdeploy-runtime-gpu 2. 下载 [sdk 模型](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip)并解压。 3. 使用 `pose_tracker.py` 进行推理: -**提示:** - -- 如果遇到 `ImportError: DLL load failed while importing mmdeploy_python`,请复制 `thirdparty/onnxruntime/lib/onnxruntime.dll` 到当前环境中 python 安装目录的 `site-packages/mmdeploy_python/`。 - ```shell -# 进入 ./sdk/example/python +# 进入 ./example/python # 请传入模型目录,而不是模型文件 python pose_tracker.py cpu {det work-dir} {pose work-dir} {your_video.mp4} ``` From f93e32ad4f7dfe23478ccd60fcef18982050daf1 Mon Sep 17 00:00:00 2001 From: Xin Chen Date: Thu, 13 Apr 2023 15:51:45 +0800 Subject: [PATCH 3/3] add # --- projects/rtmpose/README.md | 4 ++-- projects/rtmpose/README_CN.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/projects/rtmpose/README.md b/projects/rtmpose/README.md index 196a947a6c..12de1ad1fe 100644 --- a/projects/rtmpose/README.md +++ b/projects/rtmpose/README.md @@ -318,11 +318,11 @@ For details, see [Pipeline Inference](#-step4-pipeline-inference). ``` # for onnxruntime pip install mmdeploy-runtime -download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) add third party runtime libraries to the PATH +# download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) add third party runtime libraries to the PATH # for onnxruntime-gpu / tensorrt pip install mmdeploy-runtime-gpu -download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) add third party runtime libraries to the PATH +# download [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) add third party runtime libraries to the PATH ``` 2. Download the [sdk models](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip) and unzip. diff --git a/projects/rtmpose/README_CN.md b/projects/rtmpose/README_CN.md index 59e98b6961..5478bed4e4 100644 --- a/projects/rtmpose/README_CN.md +++ b/projects/rtmpose/README_CN.md @@ -311,11 +311,11 @@ bash build_sdk.sh ``` # for onnxruntime pip install mmdeploy-runtime -下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH +# 下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH # for onnxruntime-gpu / tensorrt pip install mmdeploy-runtime-gpu -下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64-cuda11.3.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH +# 下载 [sdk](https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-windows-amd64-cuda11.3.zip) 并将 third_party 中第三方推理库的动态库添加到 PATH ``` 2. 下载 [sdk 模型](https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-cpu.zip)并解压。