Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions paddle/phi/kernels/compare_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,29 +18,29 @@ limitations under the License. */

namespace phi {

#define DECALRE_COMPARE_KERNEL(name) \
#define DECLARE_COMPARE_KERNEL(name) \
template <typename T, typename Context> \
void name##Kernel(const Context& ctx, \
const DenseTensor& x, \
const DenseTensor& y, \
DenseTensor* out);

DECALRE_COMPARE_KERNEL(LessThan)
DECALRE_COMPARE_KERNEL(LessEqual)
DECALRE_COMPARE_KERNEL(GreaterThan)
DECALRE_COMPARE_KERNEL(GreaterEqual)
DECALRE_COMPARE_KERNEL(Equal)
DECALRE_COMPARE_KERNEL(NotEqual)
#undef DECALRE_COMPARE_KERNEL
DECLARE_COMPARE_KERNEL(LessThan)
DECLARE_COMPARE_KERNEL(LessEqual)
DECLARE_COMPARE_KERNEL(GreaterThan)
DECLARE_COMPARE_KERNEL(GreaterEqual)
DECLARE_COMPARE_KERNEL(Equal)
DECLARE_COMPARE_KERNEL(NotEqual)
#undef DECLARE_COMPARE_KERNEL

#define DECALRE_COMPARE_ALL_KERNEL(compare_all) \
#define DECLARE_COMPARE_ALL_KERNEL(compare_all) \
template <typename T, typename Context> \
void compare_all##Kernel(const Context& ctx, \
const DenseTensor& x, \
const DenseTensor& y, \
DenseTensor* out);

DECALRE_COMPARE_ALL_KERNEL(EqualAll)
#undef DECALRE_COMPARE_KERNEL
DECLARE_COMPARE_ALL_KERNEL(EqualAll)
#undef DECLARE_COMPARE_KERNEL

} // namespace phi
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/slogdeterminant_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ void SlogDeterminantGradKernel(const Context& dev_ctx,
auto grad_vec = out_grad.Split(1, 0);
auto det_grad = grad_vec[1];

// remmove useless first dimension
// remove useless first dimension
int det_grad_size = det_grad.dims().size();
std::vector<int> det_grad_vec;
for (int i = 1; i < det_grad_size; ++i) {
Expand Down
10 changes: 5 additions & 5 deletions paddle/phi/kernels/impl/solve_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ limitations under the License. */
namespace phi {

template <typename Context, typename T>
struct ReduceSumForSolvelGrad {
struct ReduceSumForSolveGrad {
void operator()(const Context& dev_ctx,
const DenseTensor& input,
DenseTensor* output,
Expand All @@ -43,7 +43,7 @@ struct ReduceSumForSolvelGrad {
};

template <typename T>
struct ReduceSumForSolvelGrad<CPUContext, T> {
struct ReduceSumForSolveGrad<CPUContext, T> {
void operator()(const CPUContext& dev_ctx,
const DenseTensor& input,
DenseTensor* output,
Expand All @@ -58,7 +58,7 @@ struct ReduceSumForSolvelGrad<CPUContext, T> {

#if defined(__NVCC__) || defined(__HIPCC__)
template <typename T>
struct ReduceSumForSolvelGrad<GPUContext, T> {
struct ReduceSumForSolveGrad<GPUContext, T> {
void operator()(const GPUContext& dev_ctx,
const DenseTensor& input,
DenseTensor* output,
Expand Down Expand Up @@ -210,7 +210,7 @@ void SolveGradKernel(const Context& dev_ctx,
if (dy_help.dims().size() != dy->dims().size()) {
keep_dim = false;
}
ReduceSumForSolvelGrad<Context, T>()(
ReduceSumForSolveGrad<Context, T>()(
dev_ctx, dy_help, dy, dy_reduce_dims, keep_dim);
}
dy->Resize(y.dims());
Expand Down Expand Up @@ -257,7 +257,7 @@ void SolveGradKernel(const Context& dev_ctx,
if (dx_help.dims().size() != dx->dims().size()) {
keep_dim = false;
}
ReduceSumForSolvelGrad<Context, T>()(
ReduceSumForSolveGrad<Context, T>()(
dev_ctx, dx_help, dx, dx_reduce_dims, keep_dim);
}
dx->Resize(x.dims());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ void TriangularSolveGradKernel(const Context& dev_ctx,
x.data<T>(), x.numel(), dev_ctx.template Alloc<T>(&x_conj));
x_for_range(x_functor);

// reuse forward to get dy_bst, and the result has been broadcated already.
// reuse forward to get dy_bst, and the result has been broadcasted already.
TriangularSolveKernel<T, Context>(
dev_ctx, x_conj, dout, upper, !transpose, unitriangular, &dy_bst);

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/warprnnt_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ class WarpRNNTFunctor {
* \param D number of vocab symbols, w/ blank.
* \param B number of example.
* \param blank blank label used in rnnt loss function.
* \param cpu_losss loss of each example in CPU memory.
* \param cpu_loss loss of each example in CPU memory.
*/
void operator()(const Context& dev_ctx,
const T* input,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/onednn/conv_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ void ConvGradKernel(const Context& dev_ctx,
handler.AcquireDiffDstMemoryWithReorderFromWeightsPrimitive(
&out_grad);

// For convoluition with groups write filter grad into
// For convolution with groups write filter grad into
// oneDNN buffer and then we reorder it into filter_grad tensor
int g = std::max(groups, 1);
auto diff_weights_memory_p =
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/primitive/functor_primitives_xpu2.h
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ struct DivFunctor<T,
inline T initial() { return static_cast<T>(1.0f); }

inline HOSTDEVICE T operator()(const T& a, const T& b) const {
// For int32/int64, need to check whether the divison is zero.
// For int32/int64, need to check whether the division is zero.
PADDLE_ENFORCE_NE(b,
0,
phi::errors::InvalidArgument(
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/sparse/cpu/conv_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ void Conv3dCooCPUKernel(const CPUContext& dev_ctx,
Gather<T, IntT>(
x.values().data<T>(), rulebook_ptr + n, n, in_channels, in_features_ptr);

// 3. call gemm for every werght
// 3. call gemm for every weight
auto blas = phi::funcs::GetBlas<CPUContext, T>(dev_ctx);
int offset = 0;
for (int i = 0; i < kernel_size; i++) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/sparse/cpu/softmax_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ void SoftmaxCooCPUKernel(const Context& dev_ctx,
}
}

// cpu kerenel
// cpu kernel
template <typename T, typename Context>
void SoftmaxCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/stride/slice_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ void SliceStridedKernel(const Context& ctx,
}
}
if (FLAGS_set_to_1d && new_shape.size() == 0) {
// NOTE(zoooo0820): Hack procssing to 1-D, when axes decrease to 0-D in
// NOTE(zoooo0820): Hack processing to 1-D, when axes decrease to 0-D in
// slice. This will remove in release 2.6.
new_shape.push_back(1);
new_stride.push_back(0);
Expand Down