Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions paddle/phi/kernels/funcs/jit/helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,15 +107,15 @@ inline typename KernelTuple::func_type GetReferFunc() {
PADDLE_ENFORCE_NOT_NULL(
p,
phi::errors::InvalidArgument("Get the reference code of kernel in CPU "
"failed. The Refer kernel should exsit."));
"failed. The Refer kernel should exist."));
return p->GetFunc();
}

// Return all Kernels that can be used
template <typename KernelTuple, typename PlaceType>
std::vector<const Kernel*> GetAllCandidateKernels(
const typename KernelTuple::attr_type& attr) {
// the search order shoudl be jitcode > more > refer
// the search order should be jitcode > more > refer
std::vector<const Kernel*> res;
auto jitker = GetJitCode<KernelTuple, PlaceType>(attr);
if (jitker) {
Expand Down
16 changes: 8 additions & 8 deletions paddle/phi/kernels/funcs/jit/more/mkl/mkl.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,18 +118,18 @@ void EmbSeqPool(const T* table,
idx[i],
attr->table_height,
phi::errors::InvalidArgument(
"The idx shoud be lower than the attribute table_height of "
"The idx should be lower than the attribute table_height of "
"EmbSeqPool. But %dth of idx is %d and table_height is %d.",
i,
idx[i],
attr->table_height));
PADDLE_ENFORCE_GE(
idx[i],
0,
phi::errors::InvalidArgument("The idx shoud be equal to or larger than "
"the 0. But %dth of idx is %d.",
i,
idx[i]));
PADDLE_ENFORCE_GE(idx[i],
0,
phi::errors::InvalidArgument(
"The idx should be equal to or larger than "
"the 0. But %dth of idx is %d.",
i,
idx[i]));
};

for (int64_t w = 0; w != attr->index_width; ++w) {
Expand Down
16 changes: 8 additions & 8 deletions paddle/phi/kernels/funcs/jit/refer/refer.h
Original file line number Diff line number Diff line change
Expand Up @@ -418,18 +418,18 @@ void EmbSeqPool(const T* table,
idx[i],
attr->table_height,
phi::errors::InvalidArgument(
"The idx shoud be lower than the attribute table_height of "
"The idx should be lower than the attribute table_height of "
"EmbSeqPool. But %dth of idx is %d and table_height is %d.",
i,
idx[i],
attr->table_height));
PADDLE_ENFORCE_GE(
idx[i],
0,
phi::errors::InvalidArgument("The idx shoud be equal to or larger than "
"the 0. But %dth of idx is %d.",
i,
idx[i]));
PADDLE_ENFORCE_GE(idx[i],
0,
phi::errors::InvalidArgument(
"The idx should be equal to or larger than "
"the 0. But %dth of idx is %d.",
i,
idx[i]));
};

for (int64_t w = 0; w != attr->index_width; ++w) {
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/metric/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class Metric(metaclass=abc.ABCMeta):
:code:`pred` and :code:`label` in :code:`compute`.
For examples, prediction results contains 10 classes, while :code:`pred`
shape is [N, 10], :code:`label` shape is [N, 1], N is mini-batch size,
and we only need to calculate accurary of top-1 and top-5, we could
and we only need to calculate accuracy of top-1 and top-5, we could
calculate the correct prediction matrix of the top-5 scores of the
prediction of each sample like follows, while the correct prediction
matrix shape is [N, 5].
Expand Down Expand Up @@ -160,7 +160,7 @@ def name(self):

def compute(self, *args):
"""
This API is advanced usage to accelerate metric calculating, calulations
This API is advanced usage to accelerate metric calculating, calculations
from outputs of model to the states which should be updated by Metric can
be defined here, where Paddle OPs is also supported. Outputs of this API
will be the inputs of "Metric.update".
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/nn/clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ class ClipGradByValue(ClipGradBase):

Note:
``need_clip`` of ``ClipGradByValue`` HAS BEEN DEPRECATED since 2.0.
Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope.
Please use ``need_clip`` in ``ParamAttr`` to specify the clip scope.

Args:
max (float): The maximum value to clip by.
Expand Down Expand Up @@ -499,7 +499,7 @@ class ClipGradByNorm(ClipGradBase):

Note:
``need_clip`` of ``ClipGradByNorm`` HAS BEEN DEPRECATED since 2.0.
Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope.
Please use ``need_clip`` in ``ParamAttr`` to specify the clip scope.

Args:
clip_norm(float): The maximum norm value.
Expand Down Expand Up @@ -630,7 +630,7 @@ class ClipGradByGlobalNorm(ClipGradBase):

Note:
``need_clip`` of ``ClipGradyGlobalNorm`` HAS BEEN DEPRECATED since 2.0.
Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope.
Please use ``need_clip`` in ``ParamAttr`` to specify the clip scope.

Args:
clip_norm (float): The maximum norm value.
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/quantization/ptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def quantize(self, model: Layer, inplace=False):
_model.eval()
assert (
not model.training
), "Post-Training Quantization shoud not work on training models. Please set evaluation mode by model.eval()."
), "Post-Training Quantization should not work on training models. Please set evaluation mode by model.eval()."
self._config._specify(_model)
self._convert_to_quant_layers(_model, self._config)
self._insert_activation_observers(_model, self._config)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/quantization/qat.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def quantize(self, model: Layer, inplace=False):
"""
assert (
model.training
), "Quantization-Aware Training shoud work on training models. Please set training mode by model.train()."
), "Quantization-Aware Training should work on training models. Please set training mode by model.train()."
_model = model if inplace else copy.deepcopy(model)
self._config._specify(_model)
self._convert_to_quant_layers(_model, self._config)
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/vision/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -826,7 +826,7 @@ def deform_conv2d(
dilation_H = dilation_W = dilation. Default: 1.
deformable_groups (int): The number of deformable group partitions.
Default: 1.
groups (int, optonal): The groups number of the deformable conv layer. According to
groups (int, optional): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
Expand Down Expand Up @@ -2255,7 +2255,7 @@ def matrix_nms(
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
After NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.

Args:
Expand Down