diff --git a/paddle/phi/kernels/compare_kernel.h b/paddle/phi/kernels/compare_kernel.h index 5d1bb040febb1f..34daa653995978 100644 --- a/paddle/phi/kernels/compare_kernel.h +++ b/paddle/phi/kernels/compare_kernel.h @@ -18,29 +18,29 @@ limitations under the License. */ namespace phi { -#define DECALRE_COMPARE_KERNEL(name) \ +#define DECLARE_COMPARE_KERNEL(name) \ template \ void name##Kernel(const Context& ctx, \ const DenseTensor& x, \ const DenseTensor& y, \ DenseTensor* out); -DECALRE_COMPARE_KERNEL(LessThan) -DECALRE_COMPARE_KERNEL(LessEqual) -DECALRE_COMPARE_KERNEL(GreaterThan) -DECALRE_COMPARE_KERNEL(GreaterEqual) -DECALRE_COMPARE_KERNEL(Equal) -DECALRE_COMPARE_KERNEL(NotEqual) -#undef DECALRE_COMPARE_KERNEL +DECLARE_COMPARE_KERNEL(LessThan) +DECLARE_COMPARE_KERNEL(LessEqual) +DECLARE_COMPARE_KERNEL(GreaterThan) +DECLARE_COMPARE_KERNEL(GreaterEqual) +DECLARE_COMPARE_KERNEL(Equal) +DECLARE_COMPARE_KERNEL(NotEqual) +#undef DECLARE_COMPARE_KERNEL -#define DECALRE_COMPARE_ALL_KERNEL(compare_all) \ +#define DECLARE_COMPARE_ALL_KERNEL(compare_all) \ template \ void compare_all##Kernel(const Context& ctx, \ const DenseTensor& x, \ const DenseTensor& y, \ DenseTensor* out); -DECALRE_COMPARE_ALL_KERNEL(EqualAll) -#undef DECALRE_COMPARE_KERNEL +DECLARE_COMPARE_ALL_KERNEL(EqualAll) +#undef DECLARE_COMPARE_KERNEL } // namespace phi diff --git a/paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h b/paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h index c964f91c690037..ddf8e4e68da6d5 100644 --- a/paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h @@ -98,7 +98,7 @@ void SlogDeterminantGradKernel(const Context& dev_ctx, auto grad_vec = out_grad.Split(1, 0); auto det_grad = grad_vec[1]; - // remmove useless first dimension + // remove useless first dimension int det_grad_size = det_grad.dims().size(); std::vector det_grad_vec; for (int i = 1; i < det_grad_size; ++i) { diff --git a/paddle/phi/kernels/impl/solve_grad_kernel_impl.h b/paddle/phi/kernels/impl/solve_grad_kernel_impl.h index fa25f2a0887972..0f9ff413ec91eb 100644 --- a/paddle/phi/kernels/impl/solve_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/solve_grad_kernel_impl.h @@ -34,7 +34,7 @@ limitations under the License. */ namespace phi { template -struct ReduceSumForSolvelGrad { +struct ReduceSumForSolveGrad { void operator()(const Context& dev_ctx, const DenseTensor& input, DenseTensor* output, @@ -43,7 +43,7 @@ struct ReduceSumForSolvelGrad { }; template -struct ReduceSumForSolvelGrad { +struct ReduceSumForSolveGrad { void operator()(const CPUContext& dev_ctx, const DenseTensor& input, DenseTensor* output, @@ -58,7 +58,7 @@ struct ReduceSumForSolvelGrad { #if defined(__NVCC__) || defined(__HIPCC__) template -struct ReduceSumForSolvelGrad { +struct ReduceSumForSolveGrad { void operator()(const GPUContext& dev_ctx, const DenseTensor& input, DenseTensor* output, @@ -210,7 +210,7 @@ void SolveGradKernel(const Context& dev_ctx, if (dy_help.dims().size() != dy->dims().size()) { keep_dim = false; } - ReduceSumForSolvelGrad()( + ReduceSumForSolveGrad()( dev_ctx, dy_help, dy, dy_reduce_dims, keep_dim); } dy->Resize(y.dims()); @@ -257,7 +257,7 @@ void SolveGradKernel(const Context& dev_ctx, if (dx_help.dims().size() != dx->dims().size()) { keep_dim = false; } - ReduceSumForSolvelGrad()( + ReduceSumForSolveGrad()( dev_ctx, dx_help, dx, dx_reduce_dims, keep_dim); } dx->Resize(x.dims()); diff --git a/paddle/phi/kernels/impl/triangular_solve_grad_kernel_impl.h b/paddle/phi/kernels/impl/triangular_solve_grad_kernel_impl.h index 8faca812a0218b..483694335a7020 100644 --- a/paddle/phi/kernels/impl/triangular_solve_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/triangular_solve_grad_kernel_impl.h @@ -55,7 +55,7 @@ void TriangularSolveGradKernel(const Context& dev_ctx, x.data(), x.numel(), dev_ctx.template Alloc(&x_conj)); x_for_range(x_functor); - // reuse forward to get dy_bst, and the result has been broadcated already. + // reuse forward to get dy_bst, and the result has been broadcasted already. TriangularSolveKernel( dev_ctx, x_conj, dout, upper, !transpose, unitriangular, &dy_bst); diff --git a/paddle/phi/kernels/impl/warprnnt_kernel_impl.h b/paddle/phi/kernels/impl/warprnnt_kernel_impl.h index 80ccf6e21b5377..52d3d394d36672 100644 --- a/paddle/phi/kernels/impl/warprnnt_kernel_impl.h +++ b/paddle/phi/kernels/impl/warprnnt_kernel_impl.h @@ -115,7 +115,7 @@ class WarpRNNTFunctor { * \param D number of vocab symbols, w/ blank. * \param B number of example. * \param blank blank label used in rnnt loss function. - * \param cpu_losss loss of each example in CPU memory. + * \param cpu_loss loss of each example in CPU memory. */ void operator()(const Context& dev_ctx, const T* input, diff --git a/paddle/phi/kernels/onednn/conv_grad_kernel.cc b/paddle/phi/kernels/onednn/conv_grad_kernel.cc index ffec237c8a9885..8c7121740370e2 100644 --- a/paddle/phi/kernels/onednn/conv_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/conv_grad_kernel.cc @@ -96,7 +96,7 @@ void ConvGradKernel(const Context& dev_ctx, handler.AcquireDiffDstMemoryWithReorderFromWeightsPrimitive( &out_grad); - // For convoluition with groups write filter grad into + // For convolution with groups write filter grad into // oneDNN buffer and then we reorder it into filter_grad tensor int g = std::max(groups, 1); auto diff_weights_memory_p = diff --git a/paddle/phi/kernels/primitive/functor_primitives_xpu2.h b/paddle/phi/kernels/primitive/functor_primitives_xpu2.h index 35cea6e6927871..08e47840f60096 100644 --- a/paddle/phi/kernels/primitive/functor_primitives_xpu2.h +++ b/paddle/phi/kernels/primitive/functor_primitives_xpu2.h @@ -184,7 +184,7 @@ struct DivFunctor(1.0f); } inline HOSTDEVICE T operator()(const T& a, const T& b) const { - // For int32/int64, need to check whether the divison is zero. + // For int32/int64, need to check whether the division is zero. PADDLE_ENFORCE_NE(b, 0, phi::errors::InvalidArgument( diff --git a/paddle/phi/kernels/sparse/cpu/conv_kernel.cc b/paddle/phi/kernels/sparse/cpu/conv_kernel.cc index 9f51885a94e1c0..52695d8b4a3ffb 100644 --- a/paddle/phi/kernels/sparse/cpu/conv_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/conv_kernel.cc @@ -137,7 +137,7 @@ void Conv3dCooCPUKernel(const CPUContext& dev_ctx, Gather( x.values().data(), rulebook_ptr + n, n, in_channels, in_features_ptr); - // 3. call gemm for every werght + // 3. call gemm for every weight auto blas = phi::funcs::GetBlas(dev_ctx); int offset = 0; for (int i = 0; i < kernel_size; i++) { diff --git a/paddle/phi/kernels/sparse/cpu/softmax_kernel.cc b/paddle/phi/kernels/sparse/cpu/softmax_kernel.cc index ea790508ab1679..cb6e89f3612951 100644 --- a/paddle/phi/kernels/sparse/cpu/softmax_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/softmax_kernel.cc @@ -160,7 +160,7 @@ void SoftmaxCooCPUKernel(const Context& dev_ctx, } } -// cpu kerenel +// cpu kernel template void SoftmaxCooKernel(const Context& dev_ctx, const SparseCooTensor& x, diff --git a/paddle/phi/kernels/stride/slice_kernel.cc b/paddle/phi/kernels/stride/slice_kernel.cc index 4e693ab4b0d32d..3981f16ab6c424 100644 --- a/paddle/phi/kernels/stride/slice_kernel.cc +++ b/paddle/phi/kernels/stride/slice_kernel.cc @@ -75,7 +75,7 @@ void SliceStridedKernel(const Context& ctx, } } if (FLAGS_set_to_1d && new_shape.size() == 0) { - // NOTE(zoooo0820): Hack procssing to 1-D, when axes decrease to 0-D in + // NOTE(zoooo0820): Hack processing to 1-D, when axes decrease to 0-D in // slice. This will remove in release 2.6. new_shape.push_back(1); new_stride.push_back(0);