diff --git a/fastdeploy/vision/classification/contrib/resnet.h b/fastdeploy/vision/classification/contrib/resnet.h index f5db8b1bed6..fa557c71567 100644 --- a/fastdeploy/vision/classification/contrib/resnet.h +++ b/fastdeploy/vision/classification/contrib/resnet.h @@ -50,12 +50,16 @@ class FASTDEPLOY_DECL ResNet : public FastDeployModel { */ virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {224, 224} */ std::vector size; - /// Mean parameters for normalize, size should be the the same as channels + /*! @brief + Mean parameters for normalize, size should be the the same as channels, default mean_vals = {0.485f, 0.456f, 0.406f} + */ std::vector mean_vals; - /// Std parameters for normalize, size should be the the same as channels + /*! @brief + Std parameters for normalize, size should be the the same as channels, default std_vals = {0.229f, 0.224f, 0.225f} + */ std::vector std_vals; diff --git a/fastdeploy/vision/detection/contrib/nanodet_plus.h b/fastdeploy/vision/detection/contrib/nanodet_plus.h index 9923e4d37dd..45ed40fe727 100644 --- a/fastdeploy/vision/detection/contrib/nanodet_plus.h +++ b/fastdeploy/vision/detection/contrib/nanodet_plus.h @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL NanoDetPlus : public FastDeployModel { float nms_iou_threshold = 0.5f); /*! @brief - Argument for image preprocessing step, tuple of input size (width, height), e.g (320, 320) + Argument for image preprocessing step, tuple of input size (width, height), default (320, 320) */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/scaledyolov4.h b/fastdeploy/vision/detection/contrib/scaledyolov4.h index a0108cfa57b..c7b5fb57d27 100644 --- a/fastdeploy/vision/detection/contrib/scaledyolov4.h +++ b/fastdeploy/vision/detection/contrib/scaledyolov4.h @@ -51,7 +51,7 @@ class FASTDEPLOY_DECL ScaledYOLOv4 : public FastDeployModel { float nms_iou_threshold = 0.5); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolor.h b/fastdeploy/vision/detection/contrib/yolor.h index 0f8e23537e8..e98da3ee1c6 100644 --- a/fastdeploy/vision/detection/contrib/yolor.h +++ b/fastdeploy/vision/detection/contrib/yolor.h @@ -39,7 +39,7 @@ class FASTDEPLOY_DECL YOLOR : public FastDeployModel { virtual std::string ModelName() const { return "YOLOR"; } /** \brief Predict the detection result for an input image * - * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format + * \param[in] im The input image data, comes from cv::imread() * \param[in] result The output detection result will be writen to this structure * \param[in] conf_threshold confidence threashold for postprocessing, default is 0.25 * \param[in] nms_iou_threshold iou threashold for NMS, default is 0.5 @@ -50,7 +50,7 @@ class FASTDEPLOY_DECL YOLOR : public FastDeployModel { float nms_iou_threshold = 0.5); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolov5.h b/fastdeploy/vision/detection/contrib/yolov5.h index 15d98e6f239..7be906a9dda 100644 --- a/fastdeploy/vision/detection/contrib/yolov5.h +++ b/fastdeploy/vision/detection/contrib/yolov5.h @@ -78,7 +78,7 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel { float max_wh = 7680.0); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size_; // padding value, size should be the same as channels @@ -96,7 +96,9 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel { int stride_; // for offseting the boxes by classes when using NMS float max_wh_; - /// for different strategies to get boxes when postprocessing + /*! @brief + Argument for image preprocessing step, for different strategies to get boxes when postprocessing, default true + */ bool multi_label_; private: diff --git a/fastdeploy/vision/detection/contrib/yolov5lite.h b/fastdeploy/vision/detection/contrib/yolov5lite.h index 8bbcf331ae2..edaa18a63ed 100644 --- a/fastdeploy/vision/detection/contrib/yolov5lite.h +++ b/fastdeploy/vision/detection/contrib/yolov5lite.h @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL YOLOv5Lite : public FastDeployModel { void UseCudaPreprocessing(int max_img_size = 3840 * 2160); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, size = {640, 640} */ std::vector size; // padding value, size should be the same as channels @@ -84,7 +84,7 @@ class FASTDEPLOY_DECL YOLOv5Lite : public FastDeployModel { decode module. Please set it 'true' manually if the model file was exported with decode module. false : ONNX files without decode module. - true : ONNX file with decode module. + true : ONNX file with decode module. default false. */ bool is_decode_exported; diff --git a/fastdeploy/vision/detection/contrib/yolov6.h b/fastdeploy/vision/detection/contrib/yolov6.h index 1e0af6fd33a..bb6c988cc58 100644 --- a/fastdeploy/vision/detection/contrib/yolov6.h +++ b/fastdeploy/vision/detection/contrib/yolov6.h @@ -57,7 +57,7 @@ class FASTDEPLOY_DECL YOLOv6 : public FastDeployModel { void UseCudaPreprocessing(int max_img_size = 3840 * 2160); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}; */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolov7.h b/fastdeploy/vision/detection/contrib/yolov7.h index 2eb038f719d..cdf56969e40 100644 --- a/fastdeploy/vision/detection/contrib/yolov7.h +++ b/fastdeploy/vision/detection/contrib/yolov7.h @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { void UseCudaPreprocessing(int max_img_size = 3840 * 2160); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolov7end2end_ort.h b/fastdeploy/vision/detection/contrib/yolov7end2end_ort.h index 9434d69edb9..b685895799e 100644 --- a/fastdeploy/vision/detection/contrib/yolov7end2end_ort.h +++ b/fastdeploy/vision/detection/contrib/yolov7end2end_ort.h @@ -48,7 +48,7 @@ class FASTDEPLOY_DECL YOLOv7End2EndORT : public FastDeployModel { float conf_threshold = 0.25); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolov7end2end_trt.h b/fastdeploy/vision/detection/contrib/yolov7end2end_trt.h index f6ce6e943f2..10b95d02a68 100644 --- a/fastdeploy/vision/detection/contrib/yolov7end2end_trt.h +++ b/fastdeploy/vision/detection/contrib/yolov7end2end_trt.h @@ -53,7 +53,7 @@ class FASTDEPLOY_DECL YOLOv7End2EndTRT : public FastDeployModel { void UseCudaPreprocessing(int max_img_size = 3840 * 2160); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolox.h b/fastdeploy/vision/detection/contrib/yolox.h index c040c28a8ec..8ad029b9558 100644 --- a/fastdeploy/vision/detection/contrib/yolox.h +++ b/fastdeploy/vision/detection/contrib/yolox.h @@ -52,7 +52,7 @@ class FASTDEPLOY_DECL YOLOX : public FastDeployModel { float nms_iou_threshold = 0.5); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels @@ -61,7 +61,7 @@ class FASTDEPLOY_DECL YOLOX : public FastDeployModel { whether the model_file was exported with decode module. The official YOLOX/tools/export_onnx.py script will export ONNX file without decode module. Please set it 'true' manually if the model file - was exported with decode module. + was exported with decode module. default false. */ bool is_decode_exported; // downsample strides for YOLOX to generate anchors, diff --git a/fastdeploy/vision/facedet/contrib/retinaface.h b/fastdeploy/vision/facedet/contrib/retinaface.h index e7011df89d0..c05deedff5f 100644 --- a/fastdeploy/vision/facedet/contrib/retinaface.h +++ b/fastdeploy/vision/facedet/contrib/retinaface.h @@ -65,7 +65,7 @@ class FASTDEPLOY_DECL RetinaFace : public FastDeployModel { */ std::vector downsample_strides; /*! @brief - Argument for image postprocessing step, min sizes, width and height for each anchor + Argument for image postprocessing step, min sizes, width and height for each anchor, default min_sizes = {{16, 32}, {64, 128}, {256, 512}} */ std::vector> min_sizes; /*! @brief diff --git a/fastdeploy/vision/facedet/contrib/scrfd.h b/fastdeploy/vision/facedet/contrib/scrfd.h index 58dd8807b9e..38da3af4258 100644 --- a/fastdeploy/vision/facedet/contrib/scrfd.h +++ b/fastdeploy/vision/facedet/contrib/scrfd.h @@ -77,14 +77,16 @@ class FASTDEPLOY_DECL SCRFD : public FastDeployModel { */ int landmarks_per_face; /*! @brief - Argument for image postprocessing step, the outputs of onnx file with key points features or not + Argument for image postprocessing step, the outputs of onnx file with key points features or not, default true */ bool use_kps; /*! @brief - Argument for image postprocessing step, the upperbond number of boxes processed by nms + Argument for image postprocessing step, the upperbond number of boxes processed by nms, default 30000 */ int max_nms; - /// Argument for image postprocessing step, anchor number of each stride + /*! @brief + Argument for image postprocessing step, anchor number of each stride, default 2 + */ unsigned int num_anchors; private: diff --git a/fastdeploy/vision/facedet/contrib/yolov5face.h b/fastdeploy/vision/facedet/contrib/yolov5face.h index 10479052de9..199ed35df5e 100644 --- a/fastdeploy/vision/facedet/contrib/yolov5face.h +++ b/fastdeploy/vision/facedet/contrib/yolov5face.h @@ -51,7 +51,7 @@ class FASTDEPLOY_DECL YOLOv5Face : public FastDeployModel { float nms_iou_threshold = 0.5); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels @@ -72,7 +72,7 @@ class FASTDEPLOY_DECL YOLOv5Face : public FastDeployModel { /*! @brief Argument for image postprocessing step, setup the number of landmarks for per face (if have), default 5 in official yolov5face note that, the outupt tensor's shape must be: - (1,n,4+1+2*landmarks_per_face+1=box+obj+landmarks+cls) + (1,n,4+1+2*landmarks_per_face+1=box+obj+landmarks+cls), default 5 */ int landmarks_per_face; diff --git a/fastdeploy/vision/faceid/contrib/insightface_rec.h b/fastdeploy/vision/faceid/contrib/insightface_rec.h index 12f882d7aa2..2e66d3d71fe 100644 --- a/fastdeploy/vision/faceid/contrib/insightface_rec.h +++ b/fastdeploy/vision/faceid/contrib/insightface_rec.h @@ -44,9 +44,13 @@ class FASTDEPLOY_DECL InsightFaceRecognitionModel : public FastDeployModel { Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default (112, 112) */ std::vector size; - /// Argument for image preprocessing step, alpha values for normalization + /*! @brief + Argument for image preprocessing step, alpha values for normalization, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f}; + */ std::vector alpha; - /// Argument for image preprocessing step, beta values for normalization + /*! @brief + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} + */ std::vector beta; /*! @brief Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. diff --git a/fastdeploy/vision/matting/contrib/modnet.h b/fastdeploy/vision/matting/contrib/modnet.h index 75148b60a12..09810a62ef4 100644 --- a/fastdeploy/vision/matting/contrib/modnet.h +++ b/fastdeploy/vision/matting/contrib/modnet.h @@ -44,11 +44,11 @@ class FASTDEPLOY_DECL MODNet : public FastDeployModel { */ std::vector size; /*! @brief - Argument for image preprocessing step, parameters for normalization, size should be the the same as channels + Argument for image preprocessing step, parameters for normalization, size should be the the same as channels, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f} */ std::vector alpha; /*! @brief - Argument for image preprocessing step, parameters for normalization, size should be the the same as channels + Argument for image preprocessing step, parameters for normalization, size should be the the same as channels, default beta = {-1.f, -1.f, -1.f} */ std::vector beta; /*! @brief diff --git a/python/fastdeploy/vision/classification/contrib/resnet.py b/python/fastdeploy/vision/classification/contrib/resnet.py index 52f45933ba2..46383c15916 100644 --- a/python/fastdeploy/vision/classification/contrib/resnet.py +++ b/python/fastdeploy/vision/classification/contrib/resnet.py @@ -56,21 +56,21 @@ def predict(self, input_image, topk=1): @property def size(self): """ - Returns the preprocess image size + Returns the preprocess image size, default size = [224, 224]; """ return self._model.size @property def mean_vals(self): """ - Returns the mean value of normlization + Returns the mean value of normlization, default mean_vals = [0.485f, 0.456f, 0.406f]; """ return self._model.mean_vals @property def std_vals(self): """ - Returns the std value of normlization + Returns the std value of normlization, default std_vals = [0.229f, 0.224f, 0.225f]; """ return self._model.std_vals diff --git a/python/fastdeploy/vision/classification/contrib/yolov5cls.py b/python/fastdeploy/vision/classification/contrib/yolov5cls.py index 8a4744e56fc..5f401fa1dbf 100644 --- a/python/fastdeploy/vision/classification/contrib/yolov5cls.py +++ b/python/fastdeploy/vision/classification/contrib/yolov5cls.py @@ -52,7 +52,7 @@ def predict(self, input_image, topk=1): @property def size(self): """ - Returns the preprocess image size + Returns the preprocess image size, default is (224, 224) """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/nanodet_plus.py b/python/fastdeploy/vision/detection/contrib/nanodet_plus.py index b5a83fe2b25..30dfd12573c 100644 --- a/python/fastdeploy/vision/detection/contrib/nanodet_plus.py +++ b/python/fastdeploy/vision/detection/contrib/nanodet_plus.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (320, 320) """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/scaled_yolov4.py b/python/fastdeploy/vision/detection/contrib/scaled_yolov4.py index 1e46ba1a147..f9466fe808e 100644 --- a/python/fastdeploy/vision/detection/contrib/scaled_yolov4.py +++ b/python/fastdeploy/vision/detection/contrib/scaled_yolov4.py @@ -56,7 +56,8 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] + """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolor.py b/python/fastdeploy/vision/detection/contrib/yolor.py index 6326630e1ad..1e24544191d 100644 --- a/python/fastdeploy/vision/detection/contrib/yolor.py +++ b/python/fastdeploy/vision/detection/contrib/yolor.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolov5.py b/python/fastdeploy/vision/detection/contrib/yolov5.py index a5068df5e7b..5ecef307bc8 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov5.py +++ b/python/fastdeploy/vision/detection/contrib/yolov5.py @@ -81,7 +81,7 @@ def postprocess(infer_result, @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size @@ -117,6 +117,9 @@ def max_wh(self): @property def multi_label(self): + """ + Argument for image preprocessing step, for different strategies to get boxes when postprocessing, default True + """ return self._model.multi_label @size.setter diff --git a/python/fastdeploy/vision/detection/contrib/yolov5lite.py b/python/fastdeploy/vision/detection/contrib/yolov5lite.py index 606dc98c43a..c04a348a6db 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov5lite.py +++ b/python/fastdeploy/vision/detection/contrib/yolov5lite.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size @@ -96,7 +96,8 @@ def is_decode_exported(self): whether the model_file was exported with decode module. The official YOLOv5Lite/export.py script will export ONNX file without decode module. Please set it 'true' manually if the model file was exported with decode module. - false : ONNX files without decode module. true : ONNX file with decode module. + False : ONNX files without decode module. True : ONNX file with decode module. + default False """ return self._model.is_decode_exported diff --git a/python/fastdeploy/vision/detection/contrib/yolov6.py b/python/fastdeploy/vision/detection/contrib/yolov6.py index 9f953311440..61a9c072891 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov6.py +++ b/python/fastdeploy/vision/detection/contrib/yolov6.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolov7.py b/python/fastdeploy/vision/detection/contrib/yolov7.py index 53ef24a1019..0334504851b 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov7.py +++ b/python/fastdeploy/vision/detection/contrib/yolov7.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolov7end2end_ort.py b/python/fastdeploy/vision/detection/contrib/yolov7end2end_ort.py index e16ec6a901c..47a07feff7a 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov7end2end_ort.py +++ b/python/fastdeploy/vision/detection/contrib/yolov7end2end_ort.py @@ -54,7 +54,7 @@ def predict(self, input_image, conf_threshold=0.25): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolov7end2end_trt.py b/python/fastdeploy/vision/detection/contrib/yolov7end2end_trt.py index 4a2621b44cd..7059c9d1d5f 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov7end2end_trt.py +++ b/python/fastdeploy/vision/detection/contrib/yolov7end2end_trt.py @@ -54,7 +54,7 @@ def predict(self, input_image, conf_threshold=0.25): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolox.py b/python/fastdeploy/vision/detection/contrib/yolox.py index c121cd80260..ae042b150d9 100644 --- a/python/fastdeploy/vision/detection/contrib/yolox.py +++ b/python/fastdeploy/vision/detection/contrib/yolox.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size @@ -71,6 +71,7 @@ def is_decode_exported(self): whether the model_file was exported with decode module. The official YOLOX/tools/export_onnx.py script will export ONNX file without decode module. Please set it 'true' manually if the model file was exported with decode module. + Defalut False. """ return self._model.is_decode_exported diff --git a/python/fastdeploy/vision/facedet/contrib/retinaface.py b/python/fastdeploy/vision/facedet/contrib/retinaface.py index 9afa5055c64..895aeebf275 100644 --- a/python/fastdeploy/vision/facedet/contrib/retinaface.py +++ b/python/fastdeploy/vision/facedet/contrib/retinaface.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.7, nms_iou_threshold=0.3): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (640, 640) """ return self._model.size @@ -77,7 +77,7 @@ def downsample_strides(self): @property def min_sizes(self): """ - Argument for image postprocessing step, min sizes, width and height for each anchor + Argument for image postprocessing step, min sizes, width and height for each anchor, default min_sizes = [[16, 32], [64, 128], [256, 512]] """ return self._model.min_sizes diff --git a/python/fastdeploy/vision/facedet/contrib/scrfd.py b/python/fastdeploy/vision/facedet/contrib/scrfd.py index fa8e1cfd7d4..96171088cd0 100644 --- a/python/fastdeploy/vision/facedet/contrib/scrfd.py +++ b/python/fastdeploy/vision/facedet/contrib/scrfd.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.7, nms_iou_threshold=0.3): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (640, 640) """ return self._model.size @@ -87,22 +87,40 @@ def stride(self): @property def downsample_strides(self): + """ + Argument for image postprocessing step, + downsample strides (namely, steps) for SCRFD to generate anchors, + will take (8,16,32) as default values + """ return self._model.downsample_strides @property def landmarks_per_face(self): + """ + Argument for image postprocessing step, landmarks_per_face, default 5 in SCRFD + """ return self._model.landmarks_per_face @property def use_kps(self): + """ + Argument for image postprocessing step, + the outputs of onnx file with key points features or not, default true + """ return self._model.use_kps @property def max_nms(self): + """ + Argument for image postprocessing step, the upperbond number of boxes processed by nms, default 30000 + """ return self._model.max_nms @property def num_anchors(self): + """ + Argument for image postprocessing step, anchor number of each stride, default 2 + """ return self._model.num_anchors @size.setter diff --git a/python/fastdeploy/vision/facedet/contrib/ultraface.py b/python/fastdeploy/vision/facedet/contrib/ultraface.py index 8d84a6d86f7..d4a007c1759 100644 --- a/python/fastdeploy/vision/facedet/contrib/ultraface.py +++ b/python/fastdeploy/vision/facedet/contrib/ultraface.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.7, nms_iou_threshold=0.3): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (320, 240) """ return self._model.size diff --git a/python/fastdeploy/vision/facedet/contrib/yolov5face.py b/python/fastdeploy/vision/facedet/contrib/yolov5face.py index be09e840a8a..50acb20120d 100644 --- a/python/fastdeploy/vision/facedet/contrib/yolov5face.py +++ b/python/fastdeploy/vision/facedet/contrib/yolov5face.py @@ -56,7 +56,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640,640] """ return self._model.size diff --git a/python/fastdeploy/vision/faceid/contrib/adaface.py b/python/fastdeploy/vision/faceid/contrib/adaface.py index c0b6d9b1dd5..140cdb50472 100644 --- a/python/fastdeploy/vision/faceid/contrib/adaface.py +++ b/python/fastdeploy/vision/faceid/contrib/adaface.py @@ -52,35 +52,36 @@ def predict(self, input_image): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} + """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb @property def l2_normalize(self): """ - Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default; + Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False; """ return self._model.l2_normalize diff --git a/python/fastdeploy/vision/faceid/contrib/arcface.py b/python/fastdeploy/vision/faceid/contrib/arcface.py index be0a09d954c..f4341b17094 100644 --- a/python/fastdeploy/vision/faceid/contrib/arcface.py +++ b/python/fastdeploy/vision/faceid/contrib/arcface.py @@ -54,35 +54,35 @@ def predict(self, input_image): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb @property def l2_normalize(self): """ - Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default; + Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False; """ return self._model.l2_normalize diff --git a/python/fastdeploy/vision/faceid/contrib/cosface.py b/python/fastdeploy/vision/faceid/contrib/cosface.py index 982f3c48106..61d1f2cb927 100644 --- a/python/fastdeploy/vision/faceid/contrib/cosface.py +++ b/python/fastdeploy/vision/faceid/contrib/cosface.py @@ -53,28 +53,28 @@ def predict(self, input_image): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb diff --git a/python/fastdeploy/vision/faceid/contrib/insightface_rec.py b/python/fastdeploy/vision/faceid/contrib/insightface_rec.py index 2793b88f4ea..ea4aed81ad6 100644 --- a/python/fastdeploy/vision/faceid/contrib/insightface_rec.py +++ b/python/fastdeploy/vision/faceid/contrib/insightface_rec.py @@ -53,28 +53,28 @@ def predict(self, input_image): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb diff --git a/python/fastdeploy/vision/faceid/contrib/partial_fc.py b/python/fastdeploy/vision/faceid/contrib/partial_fc.py index de31b0a2730..0798af56e4c 100644 --- a/python/fastdeploy/vision/faceid/contrib/partial_fc.py +++ b/python/fastdeploy/vision/faceid/contrib/partial_fc.py @@ -53,28 +53,28 @@ def predict(self, input_image): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb diff --git a/python/fastdeploy/vision/faceid/contrib/vpl.py b/python/fastdeploy/vision/faceid/contrib/vpl.py index 3a8df5f169f..5db5b4e672f 100644 --- a/python/fastdeploy/vision/faceid/contrib/vpl.py +++ b/python/fastdeploy/vision/faceid/contrib/vpl.py @@ -53,28 +53,28 @@ def predict(self, input_image): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb diff --git a/python/fastdeploy/vision/matting/contrib/modnet.py b/python/fastdeploy/vision/matting/contrib/modnet.py index 33fb5a025d2..da8f6c1d020 100644 --- a/python/fastdeploy/vision/matting/contrib/modnet.py +++ b/python/fastdeploy/vision/matting/contrib/modnet.py @@ -53,28 +53,28 @@ def predict(self, input_image): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [256,256] """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f} """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta value for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb