-
Notifications
You must be signed in to change notification settings - Fork 676
Modify PPMatting backend and docs #182
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 74 commits
1684b05
71c00d9
21ab2f9
d63e862
7b3b0e2
d039e80
a34a815
eb010a8
39f64f2
d071b37
d5026ca
fb376ad
4b8737c
ce922a0
6e00b82
8c359fb
906c730
80c1223
6072757
2c6e6a4
48136f0
6feca92
ae70d4f
f591b85
f0def41
15b9160
4706e8c
dc83584
086debd
4f980b9
2e61c95
80beadf
8103772
f5f7a86
e6cec25
e25e4f2
e8a8439
a182893
3aa015f
d6b98aa
871cfc6
013921a
7a5a6d9
c996117
0aefe32
2330414
4660161
033c18e
6c94d65
85fb256
90ca4cb
f6a4ed2
3682091
ca1e110
93ba6a6
767842e
cc32733
2771a3b
a1e29ac
5ecc6fe
2780588
c00be81
9082178
4b14f56
4876b82
9cebb1f
d1e3b29
69cf0d2
2ff10e1
a673a2c
832d777
e513eac
ded2054
19db925
15be4a6
3a5b93a
f765853
c2332b0
950f948
64a13c9
09c073d
99969b6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -50,39 +50,6 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file, | |
| << std::endl; | ||
| } | ||
|
|
||
| void GpuInfer(const std::string& model_dir, const std::string& image_file, | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里保留GpuInfer,但是设置一下backend为paddle option.UsePaddleBackend() |
||
| const std::string& background_file) { | ||
| auto model_file = model_dir + sep + "model.pdmodel"; | ||
| auto params_file = model_dir + sep + "model.pdiparams"; | ||
| auto config_file = model_dir + sep + "deploy.yaml"; | ||
|
|
||
| auto option = fastdeploy::RuntimeOption(); | ||
| option.UseGpu(); | ||
| auto model = fastdeploy::vision::matting::PPMatting(model_file, params_file, | ||
| config_file, option); | ||
| if (!model.Initialized()) { | ||
| std::cerr << "Failed to initialize." << std::endl; | ||
| return; | ||
| } | ||
|
|
||
| auto im = cv::imread(image_file); | ||
| auto im_bak = im.clone(); | ||
| cv::Mat bg = cv::imread(background_file); | ||
| fastdeploy::vision::MattingResult res; | ||
| if (!model.Predict(&im, &res)) { | ||
| std::cerr << "Failed to predict." << std::endl; | ||
| return; | ||
| } | ||
| auto vis_im = fastdeploy::vision::Visualize::VisMattingAlpha(im_bak, res); | ||
| auto vis_im_with_bg = | ||
| fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res); | ||
| cv::imwrite("visualized_result.jpg", vis_im_with_bg); | ||
| cv::imwrite("visualized_result_fg.jpg", vis_im); | ||
| std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " | ||
| "and ./visualized_result_fg.jpgg" | ||
| << std::endl; | ||
| } | ||
|
|
||
| void TrtInfer(const std::string& model_dir, const std::string& image_file, | ||
| const std::string& background_file) { | ||
| auto model_file = model_dir + sep + "model.pdmodel"; | ||
|
|
@@ -131,8 +98,6 @@ int main(int argc, char* argv[]) { | |
| } | ||
| if (std::atoi(argv[4]) == 0) { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 同上,保留gpu推理案例 |
||
| CpuInfer(argv[1], argv[2], argv[3]); | ||
| } else if (std::atoi(argv[4]) == 1) { | ||
| GpuInfer(argv[1], argv[2], argv[3]); | ||
| } else if (std::atoi(argv[4]) == 2) { | ||
| TrtInfer(argv[1], argv[2], argv[3]); | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -19,8 +19,6 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg | |
| wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg | ||
| # CPU推理 | ||
| python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device cpu | ||
| # GPU推理 (TODO: ORT-GPU 推理会报错) | ||
| python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 同上 |
||
| # GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) | ||
| python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu --use_trt True | ||
| ``` | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这里保留GpuInfer,但是设置一下backend为paddle