diff --git a/paddle/phi/kernels/funcs/jit/helper.h b/paddle/phi/kernels/funcs/jit/helper.h index 7e3394dffd4a2a..c230738db9a5dd 100644 --- a/paddle/phi/kernels/funcs/jit/helper.h +++ b/paddle/phi/kernels/funcs/jit/helper.h @@ -107,7 +107,7 @@ inline typename KernelTuple::func_type GetReferFunc() { PADDLE_ENFORCE_NOT_NULL( p, phi::errors::InvalidArgument("Get the reference code of kernel in CPU " - "failed. The Refer kernel should exsit.")); + "failed. The Refer kernel should exist.")); return p->GetFunc(); } @@ -115,7 +115,7 @@ inline typename KernelTuple::func_type GetReferFunc() { template std::vector GetAllCandidateKernels( const typename KernelTuple::attr_type& attr) { - // the search order shoudl be jitcode > more > refer + // the search order should be jitcode > more > refer std::vector res; auto jitker = GetJitCode(attr); if (jitker) { diff --git a/paddle/phi/kernels/funcs/jit/more/mkl/mkl.h b/paddle/phi/kernels/funcs/jit/more/mkl/mkl.h index 017fd7980039dc..a43e8adcac90a0 100644 --- a/paddle/phi/kernels/funcs/jit/more/mkl/mkl.h +++ b/paddle/phi/kernels/funcs/jit/more/mkl/mkl.h @@ -118,18 +118,18 @@ void EmbSeqPool(const T* table, idx[i], attr->table_height, phi::errors::InvalidArgument( - "The idx shoud be lower than the attribute table_height of " + "The idx should be lower than the attribute table_height of " "EmbSeqPool. But %dth of idx is %d and table_height is %d.", i, idx[i], attr->table_height)); - PADDLE_ENFORCE_GE( - idx[i], - 0, - phi::errors::InvalidArgument("The idx shoud be equal to or larger than " - "the 0. But %dth of idx is %d.", - i, - idx[i])); + PADDLE_ENFORCE_GE(idx[i], + 0, + phi::errors::InvalidArgument( + "The idx should be equal to or larger than " + "the 0. But %dth of idx is %d.", + i, + idx[i])); }; for (int64_t w = 0; w != attr->index_width; ++w) { diff --git a/paddle/phi/kernels/funcs/jit/refer/refer.h b/paddle/phi/kernels/funcs/jit/refer/refer.h index c7c3835f890682..97551159d1b1d5 100644 --- a/paddle/phi/kernels/funcs/jit/refer/refer.h +++ b/paddle/phi/kernels/funcs/jit/refer/refer.h @@ -418,18 +418,18 @@ void EmbSeqPool(const T* table, idx[i], attr->table_height, phi::errors::InvalidArgument( - "The idx shoud be lower than the attribute table_height of " + "The idx should be lower than the attribute table_height of " "EmbSeqPool. But %dth of idx is %d and table_height is %d.", i, idx[i], attr->table_height)); - PADDLE_ENFORCE_GE( - idx[i], - 0, - phi::errors::InvalidArgument("The idx shoud be equal to or larger than " - "the 0. But %dth of idx is %d.", - i, - idx[i])); + PADDLE_ENFORCE_GE(idx[i], + 0, + phi::errors::InvalidArgument( + "The idx should be equal to or larger than " + "the 0. But %dth of idx is %d.", + i, + idx[i])); }; for (int64_t w = 0; w != attr->index_width; ++w) { diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index e87ab0068ff2c3..5ef30f909939a1 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -77,7 +77,7 @@ class Metric(metaclass=abc.ABCMeta): :code:`pred` and :code:`label` in :code:`compute`. For examples, prediction results contains 10 classes, while :code:`pred` shape is [N, 10], :code:`label` shape is [N, 1], N is mini-batch size, - and we only need to calculate accurary of top-1 and top-5, we could + and we only need to calculate accuracy of top-1 and top-5, we could calculate the correct prediction matrix of the top-5 scores of the prediction of each sample like follows, while the correct prediction matrix shape is [N, 5]. @@ -160,7 +160,7 @@ def name(self): def compute(self, *args): """ - This API is advanced usage to accelerate metric calculating, calulations + This API is advanced usage to accelerate metric calculating, calculations from outputs of model to the states which should be updated by Metric can be defined here, where Paddle OPs is also supported. Outputs of this API will be the inputs of "Metric.update". diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index 775a11fd8e398b..8e3282e766fffa 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -392,7 +392,7 @@ class ClipGradByValue(ClipGradBase): Note: ``need_clip`` of ``ClipGradByValue`` HAS BEEN DEPRECATED since 2.0. - Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope. + Please use ``need_clip`` in ``ParamAttr`` to specify the clip scope. Args: max (float): The maximum value to clip by. @@ -499,7 +499,7 @@ class ClipGradByNorm(ClipGradBase): Note: ``need_clip`` of ``ClipGradByNorm`` HAS BEEN DEPRECATED since 2.0. - Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope. + Please use ``need_clip`` in ``ParamAttr`` to specify the clip scope. Args: clip_norm(float): The maximum norm value. @@ -630,7 +630,7 @@ class ClipGradByGlobalNorm(ClipGradBase): Note: ``need_clip`` of ``ClipGradyGlobalNorm`` HAS BEEN DEPRECATED since 2.0. - Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope. + Please use ``need_clip`` in ``ParamAttr`` to specify the clip scope. Args: clip_norm (float): The maximum norm value. diff --git a/python/paddle/quantization/ptq.py b/python/paddle/quantization/ptq.py index 6a5562abb94076..0971996dcda5ad 100644 --- a/python/paddle/quantization/ptq.py +++ b/python/paddle/quantization/ptq.py @@ -118,7 +118,7 @@ def quantize(self, model: Layer, inplace=False): _model.eval() assert ( not model.training - ), "Post-Training Quantization shoud not work on training models. Please set evaluation mode by model.eval()." + ), "Post-Training Quantization should not work on training models. Please set evaluation mode by model.eval()." self._config._specify(_model) self._convert_to_quant_layers(_model, self._config) self._insert_activation_observers(_model, self._config) diff --git a/python/paddle/quantization/qat.py b/python/paddle/quantization/qat.py index e762400c316f7f..a90a03e39cbe8a 100644 --- a/python/paddle/quantization/qat.py +++ b/python/paddle/quantization/qat.py @@ -110,7 +110,7 @@ def quantize(self, model: Layer, inplace=False): """ assert ( model.training - ), "Quantization-Aware Training shoud work on training models. Please set training mode by model.train()." + ), "Quantization-Aware Training should work on training models. Please set training mode by model.train()." _model = model if inplace else copy.deepcopy(model) self._config._specify(_model) self._convert_to_quant_layers(_model, self._config) diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 7243e3a67cb905..eea8925367ab5e 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -826,7 +826,7 @@ def deform_conv2d( dilation_H = dilation_W = dilation. Default: 1. deformable_groups (int): The number of deformable group partitions. Default: 1. - groups (int, optonal): The groups number of the deformable conv layer. According to + groups (int, optional): The groups number of the deformable conv layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only @@ -2255,7 +2255,7 @@ def matrix_nms( than score_threshold (if provided), then the top k candidate is selected if nms_top_k is larger than -1. Score of the remaining candidate are then decayed according to the Matrix NMS scheme. - Aftern NMS step, at most keep_top_k number of total bboxes are to be kept + After NMS step, at most keep_top_k number of total bboxes are to be kept per image if keep_top_k is larger than -1. Args: