Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
109 commits
Select commit Hold shift + click to select a range
1684b05
first commit for yolov7
ziqi-jin Jul 13, 2022
71c00d9
pybind for yolov7
ziqi-jin Jul 14, 2022
21ab2f9
CPP README.md
ziqi-jin Jul 14, 2022
d63e862
CPP README.md
ziqi-jin Jul 14, 2022
7b3b0e2
modified yolov7.cc
ziqi-jin Jul 14, 2022
d039e80
README.md
ziqi-jin Jul 15, 2022
a34a815
python file modify
ziqi-jin Jul 18, 2022
eb010a8
merge test
ziqi-jin Jul 18, 2022
39f64f2
delete license in fastdeploy/
ziqi-jin Jul 18, 2022
d071b37
repush the conflict part
ziqi-jin Jul 18, 2022
d5026ca
README.md modified
ziqi-jin Jul 18, 2022
fb376ad
README.md modified
ziqi-jin Jul 18, 2022
4b8737c
file path modified
ziqi-jin Jul 18, 2022
ce922a0
file path modified
ziqi-jin Jul 18, 2022
6e00b82
file path modified
ziqi-jin Jul 18, 2022
8c359fb
file path modified
ziqi-jin Jul 18, 2022
906c730
file path modified
ziqi-jin Jul 18, 2022
80c1223
README modified
ziqi-jin Jul 18, 2022
6072757
README modified
ziqi-jin Jul 18, 2022
2c6e6a4
move some helpers to private
ziqi-jin Jul 18, 2022
48136f0
add examples for yolov7
ziqi-jin Jul 18, 2022
6feca92
api.md modified
ziqi-jin Jul 18, 2022
ae70d4f
api.md modified
ziqi-jin Jul 18, 2022
f591b85
api.md modified
ziqi-jin Jul 18, 2022
f0def41
YOLOv7
ziqi-jin Jul 18, 2022
15b9160
yolov7 release link
ziqi-jin Jul 18, 2022
4706e8c
yolov7 release link
ziqi-jin Jul 18, 2022
dc83584
yolov7 release link
ziqi-jin Jul 18, 2022
086debd
copyright
ziqi-jin Jul 18, 2022
4f980b9
change some helpers to private
ziqi-jin Jul 18, 2022
2e61c95
Merge branch 'develop' into develop
ziqi-jin Jul 19, 2022
80beadf
change variables to const and fix documents.
ziqi-jin Jul 19, 2022
8103772
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 19, 2022
f5f7a86
gitignore
ziqi-jin Jul 19, 2022
e6cec25
Transfer some funtions to private member of class
ziqi-jin Jul 19, 2022
e25e4f2
Transfer some funtions to private member of class
ziqi-jin Jul 19, 2022
e8a8439
Merge from develop (#9)
ziqi-jin Jul 20, 2022
a182893
first commit for yolor
ziqi-jin Jul 20, 2022
3aa015f
for merge
ziqi-jin Jul 20, 2022
d6b98aa
Develop (#11)
ziqi-jin Jul 20, 2022
871cfc6
Merge branch 'yolor' into develop
ziqi-jin Jul 20, 2022
013921a
Yolor (#16)
ziqi-jin Jul 21, 2022
7a5a6d9
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 21, 2022
c996117
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 22, 2022
0aefe32
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 26, 2022
2330414
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 26, 2022
4660161
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 27, 2022
033c18e
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 28, 2022
6c94d65
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 28, 2022
85fb256
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Jul 29, 2022
90ca4cb
add is_dynamic for YOLO series (#22)
ziqi-jin Jul 29, 2022
f6a4ed2
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 1, 2022
3682091
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 3, 2022
ca1e110
Merge remote-tracking branch 'upstream/develop' into develop
ziqi-jin Aug 8, 2022
93ba6a6
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 9, 2022
767842e
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 10, 2022
cc32733
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 10, 2022
2771a3b
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 11, 2022
a1e29ac
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 11, 2022
5ecc6fe
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 11, 2022
2780588
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 12, 2022
c00be81
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 15, 2022
9082178
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 15, 2022
4b14f56
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 15, 2022
4876b82
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 16, 2022
9cebb1f
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 18, 2022
d1e3b29
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 19, 2022
69cf0d2
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 22, 2022
2ff10e1
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 23, 2022
a673a2c
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 25, 2022
832d777
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 25, 2022
e513eac
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Aug 29, 2022
ded2054
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Sep 1, 2022
19db925
modify ppmatting backend and docs
ziqi-jin Sep 1, 2022
15be4a6
modify ppmatting docs
ziqi-jin Sep 1, 2022
3a5b93a
fix the PPMatting size problem
ziqi-jin Sep 3, 2022
f765853
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Sep 3, 2022
c2332b0
fix LimitShort's log
ziqi-jin Sep 3, 2022
950f948
retrigger ci
ziqi-jin Sep 4, 2022
64a13c9
modify PPMatting docs
ziqi-jin Sep 4, 2022
09c073d
modify the way for dealing with LimitShort
ziqi-jin Sep 6, 2022
99969b6
Merge branch 'develop' into develop
jiangjiajun Sep 6, 2022
cf248de
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Sep 8, 2022
9d4a4c9
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Sep 13, 2022
622fbf7
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Sep 15, 2022
d1cf1ad
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Sep 19, 2022
ff9a07e
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Sep 21, 2022
2707b03
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Sep 22, 2022
896d1d9
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 8, 2022
25ee7e2
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 12, 2022
79068d3
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 17, 2022
74b3ee0
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 21, 2022
a75c0c4
add python comments for external models
ziqi-jin Oct 21, 2022
985d273
modify resnet c++ comments
ziqi-jin Oct 21, 2022
e32a25c
modify C++ comments for external models
ziqi-jin Oct 21, 2022
8a73af6
modify python comments and add result class comments
ziqi-jin Oct 21, 2022
2aa7939
Merge branch 'develop' into doc_python
jiangjiajun Oct 22, 2022
887c53a
Merge branch 'develop' into doc_python
jiangjiajun Oct 23, 2022
963b9b9
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 24, 2022
337e8c0
fix comments compile error
ziqi-jin Oct 24, 2022
d1d6890
modify result.h comments
ziqi-jin Oct 24, 2022
67234dd
Merge branch 'develop' into doc_python
jiangjiajun Oct 24, 2022
440e2a9
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 24, 2022
ac35141
Merge branch 'doc_python' into develop
ziqi-jin Oct 24, 2022
3d83785
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 24, 2022
363a485
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 25, 2022
dc44eac
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 26, 2022
07717b4
Merge branch 'PaddlePaddle:develop' into develop
ziqi-jin Oct 26, 2022
5ecf0ee
add default values for public variables in comments
ziqi-jin Oct 26, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions fastdeploy/vision/classification/contrib/resnet.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,16 @@ class FASTDEPLOY_DECL ResNet : public FastDeployModel {
*/
virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1);
/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {224, 224}
*/
std::vector<int> size;
/// Mean parameters for normalize, size should be the the same as channels
/*! @brief
Mean parameters for normalize, size should be the the same as channels, default mean_vals = {0.485f, 0.456f, 0.406f}
*/
std::vector<float> mean_vals;
/// Std parameters for normalize, size should be the the same as channels
/*! @brief
Std parameters for normalize, size should be the the same as channels, default std_vals = {0.229f, 0.224f, 0.225f}
*/
std::vector<float> std_vals;


Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/contrib/nanodet_plus.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL NanoDetPlus : public FastDeployModel {
float nms_iou_threshold = 0.5f);

/*! @brief
Argument for image preprocessing step, tuple of input size (width, height), e.g (320, 320)
Argument for image preprocessing step, tuple of input size (width, height), default (320, 320)
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/contrib/scaledyolov4.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class FASTDEPLOY_DECL ScaledYOLOv4 : public FastDeployModel {
float nms_iou_threshold = 0.5);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand Down
4 changes: 2 additions & 2 deletions fastdeploy/vision/detection/contrib/yolor.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class FASTDEPLOY_DECL YOLOR : public FastDeployModel {
virtual std::string ModelName() const { return "YOLOR"; }
/** \brief Predict the detection result for an input image
*
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] im The input image data, comes from cv::imread()
* \param[in] result The output detection result will be writen to this structure
* \param[in] conf_threshold confidence threashold for postprocessing, default is 0.25
* \param[in] nms_iou_threshold iou threashold for NMS, default is 0.5
Expand All @@ -50,7 +50,7 @@ class FASTDEPLOY_DECL YOLOR : public FastDeployModel {
float nms_iou_threshold = 0.5);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand Down
6 changes: 4 additions & 2 deletions fastdeploy/vision/detection/contrib/yolov5.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel {
float max_wh = 7680.0);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}
*/
std::vector<int> size_;
// padding value, size should be the same as channels
Expand All @@ -96,7 +96,9 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel {
int stride_;
// for offseting the boxes by classes when using NMS
float max_wh_;
/// for different strategies to get boxes when postprocessing
/*! @brief
Argument for image preprocessing step, for different strategies to get boxes when postprocessing, default true
*/
bool multi_label_;

private:
Expand Down
4 changes: 2 additions & 2 deletions fastdeploy/vision/detection/contrib/yolov5lite.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL YOLOv5Lite : public FastDeployModel {
void UseCudaPreprocessing(int max_img_size = 3840 * 2160);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, size = {640, 640}
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand Down Expand Up @@ -84,7 +84,7 @@ class FASTDEPLOY_DECL YOLOv5Lite : public FastDeployModel {
decode module. Please set it 'true' manually if the model file
was exported with decode module.
false : ONNX files without decode module.
true : ONNX file with decode module.
true : ONNX file with decode module. default false.
*/
bool is_decode_exported;

Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/contrib/yolov6.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class FASTDEPLOY_DECL YOLOv6 : public FastDeployModel {
void UseCudaPreprocessing(int max_img_size = 3840 * 2160);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640};
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/contrib/yolov7.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel {
void UseCudaPreprocessing(int max_img_size = 3840 * 2160);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/contrib/yolov7end2end_ort.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class FASTDEPLOY_DECL YOLOv7End2EndORT : public FastDeployModel {
float conf_threshold = 0.25);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/contrib/yolov7end2end_trt.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class FASTDEPLOY_DECL YOLOv7End2EndTRT : public FastDeployModel {
void UseCudaPreprocessing(int max_img_size = 3840 * 2160);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand Down
4 changes: 2 additions & 2 deletions fastdeploy/vision/detection/contrib/yolox.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class FASTDEPLOY_DECL YOLOX : public FastDeployModel {
float nms_iou_threshold = 0.5);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand All @@ -61,7 +61,7 @@ class FASTDEPLOY_DECL YOLOX : public FastDeployModel {
whether the model_file was exported with decode module. The official
YOLOX/tools/export_onnx.py script will export ONNX file without
decode module. Please set it 'true' manually if the model file
was exported with decode module.
was exported with decode module. default false.
*/
bool is_decode_exported;
// downsample strides for YOLOX to generate anchors,
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/facedet/contrib/retinaface.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class FASTDEPLOY_DECL RetinaFace : public FastDeployModel {
*/
std::vector<int> downsample_strides;
/*! @brief
Argument for image postprocessing step, min sizes, width and height for each anchor
Argument for image postprocessing step, min sizes, width and height for each anchor, default min_sizes = {{16, 32}, {64, 128}, {256, 512}}
*/
std::vector<std::vector<int>> min_sizes;
/*! @brief
Expand Down
8 changes: 5 additions & 3 deletions fastdeploy/vision/facedet/contrib/scrfd.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,14 +77,16 @@ class FASTDEPLOY_DECL SCRFD : public FastDeployModel {
*/
int landmarks_per_face;
/*! @brief
Argument for image postprocessing step, the outputs of onnx file with key points features or not
Argument for image postprocessing step, the outputs of onnx file with key points features or not, default true
*/
bool use_kps;
/*! @brief
Argument for image postprocessing step, the upperbond number of boxes processed by nms
Argument for image postprocessing step, the upperbond number of boxes processed by nms, default 30000
*/
int max_nms;
/// Argument for image postprocessing step, anchor number of each stride
/*! @brief
Argument for image postprocessing step, anchor number of each stride, default 2
*/
unsigned int num_anchors;

private:
Expand Down
4 changes: 2 additions & 2 deletions fastdeploy/vision/facedet/contrib/yolov5face.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class FASTDEPLOY_DECL YOLOv5Face : public FastDeployModel {
float nms_iou_threshold = 0.5);

/*! @brief
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}
*/
std::vector<int> size;
// padding value, size should be the same as channels
Expand All @@ -72,7 +72,7 @@ class FASTDEPLOY_DECL YOLOv5Face : public FastDeployModel {
/*! @brief
Argument for image postprocessing step, setup the number of landmarks for per face (if have), default 5 in
official yolov5face note that, the outupt tensor's shape must be:
(1,n,4+1+2*landmarks_per_face+1=box+obj+landmarks+cls)
(1,n,4+1+2*landmarks_per_face+1=box+obj+landmarks+cls), default 5
*/
int landmarks_per_face;

Expand Down
8 changes: 6 additions & 2 deletions fastdeploy/vision/faceid/contrib/insightface_rec.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,13 @@ class FASTDEPLOY_DECL InsightFaceRecognitionModel : public FastDeployModel {
Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default (112, 112)
*/
std::vector<int> size;
/// Argument for image preprocessing step, alpha values for normalization
/*! @brief
Argument for image preprocessing step, alpha values for normalization, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
*/
std::vector<float> alpha;
/// Argument for image preprocessing step, beta values for normalization
/*! @brief
Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
*/
std::vector<float> beta;
/*! @brief
Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true.
Expand Down
4 changes: 2 additions & 2 deletions fastdeploy/vision/matting/contrib/modnet.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,11 @@ class FASTDEPLOY_DECL MODNet : public FastDeployModel {
*/
std::vector<int> size;
/*! @brief
Argument for image preprocessing step, parameters for normalization, size should be the the same as channels
Argument for image preprocessing step, parameters for normalization, size should be the the same as channels, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f}
*/
std::vector<float> alpha;
/*! @brief
Argument for image preprocessing step, parameters for normalization, size should be the the same as channels
Argument for image preprocessing step, parameters for normalization, size should be the the same as channels, default beta = {-1.f, -1.f, -1.f}
*/
std::vector<float> beta;
/*! @brief
Expand Down
6 changes: 3 additions & 3 deletions python/fastdeploy/vision/classification/contrib/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,21 +56,21 @@ def predict(self, input_image, topk=1):
@property
def size(self):
"""
Returns the preprocess image size
Returns the preprocess image size, default size = [224, 224];
"""
return self._model.size

@property
def mean_vals(self):
"""
Returns the mean value of normlization
Returns the mean value of normlization, default mean_vals = [0.485f, 0.456f, 0.406f];
"""
return self._model.mean_vals

@property
def std_vals(self):
"""
Returns the std value of normlization
Returns the std value of normlization, default std_vals = [0.229f, 0.224f, 0.225f];
"""
return self._model.std_vals

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def predict(self, input_image, topk=1):
@property
def size(self):
"""
Returns the preprocess image size
Returns the preprocess image size, default is (224, 224)
"""
return self._model.size

Expand Down
2 changes: 1 addition & 1 deletion python/fastdeploy/vision/detection/contrib/nanodet_plus.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (320, 320)
"""
return self._model.size

Expand Down
3 changes: 2 additions & 1 deletion python/fastdeploy/vision/detection/contrib/scaled_yolov4.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]

"""
return self._model.size

Expand Down
2 changes: 1 addition & 1 deletion python/fastdeploy/vision/detection/contrib/yolor.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
"""
return self._model.size

Expand Down
5 changes: 4 additions & 1 deletion python/fastdeploy/vision/detection/contrib/yolov5.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def postprocess(infer_result,
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
"""
return self._model.size

Expand Down Expand Up @@ -117,6 +117,9 @@ def max_wh(self):

@property
def multi_label(self):
"""
Argument for image preprocessing step, for different strategies to get boxes when postprocessing, default True
"""
return self._model.multi_label

@size.setter
Expand Down
5 changes: 3 additions & 2 deletions python/fastdeploy/vision/detection/contrib/yolov5lite.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
"""
return self._model.size

Expand Down Expand Up @@ -96,7 +96,8 @@ def is_decode_exported(self):
whether the model_file was exported with decode module.
The official YOLOv5Lite/export.py script will export ONNX file without decode module.
Please set it 'true' manually if the model file was exported with decode module.
false : ONNX files without decode module. true : ONNX file with decode module.
False : ONNX files without decode module. True : ONNX file with decode module.
default False
"""
return self._model.is_decode_exported

Expand Down
2 changes: 1 addition & 1 deletion python/fastdeploy/vision/detection/contrib/yolov6.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
"""
return self._model.size

Expand Down
2 changes: 1 addition & 1 deletion python/fastdeploy/vision/detection/contrib/yolov7.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
"""
return self._model.size

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def predict(self, input_image, conf_threshold=0.25):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
"""
return self._model.size

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def predict(self, input_image, conf_threshold=0.25):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
"""
return self._model.size

Expand Down
3 changes: 2 additions & 1 deletion python/fastdeploy/vision/detection/contrib/yolox.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
"""
return self._model.size

Expand All @@ -71,6 +71,7 @@ def is_decode_exported(self):
whether the model_file was exported with decode module.
The official YOLOX/tools/export_onnx.py script will export ONNX file without decode module.
Please set it 'true' manually if the model file was exported with decode module.
Defalut False.
"""
return self._model.is_decode_exported

Expand Down
4 changes: 2 additions & 2 deletions python/fastdeploy/vision/facedet/contrib/retinaface.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.7, nms_iou_threshold=0.3):
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height)
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (640, 640)
"""
return self._model.size

Expand All @@ -77,7 +77,7 @@ def downsample_strides(self):
@property
def min_sizes(self):
"""
Argument for image postprocessing step, min sizes, width and height for each anchor
Argument for image postprocessing step, min sizes, width and height for each anchor, default min_sizes = [[16, 32], [64, 128], [256, 512]]
"""
return self._model.min_sizes

Expand Down
Loading