Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/eig.h
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ void ApplyEigKernel(const DenseTensor& input,
int num_dims = input.dims().size();

// transfer to column-major memory layout i.e. common::make_ddim from
// tranposed_input: [batch,row,col]->[batch,col,row]
// transposed_input: [batch,row,col]->[batch,col,row]
TransposeTwoAxis<T, Context>(
input, &input_column_major, num_dims - 1, num_dims - 2, dev_ctx);
// make sure 'vectors_row_major' holds memory before passed to LapackEig()
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/embedding_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ void EmbeddingGradKernel(const Context& ctx,
functor.template apply<int64_t>();
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"emebdding input only support int32 and int64"));
"embedding input only support int32 and int64"));
}
}

Expand Down Expand Up @@ -196,7 +196,7 @@ void EmbeddingSparseGradKernel(const Context& ctx,
functor.template apply<int64_t>();
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"emebdding input only support int32 and int64"));
"embedding input only support int32 and int64"));
}
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/matrix_nms_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ namespace phi {
template <class T>
static inline T BBoxArea(const T* box, const bool normalized) {
if (box[2] < box[0] || box[3] < box[1]) {
// If coordinate values are is invalid
// If coordinate values are invalid
// (e.g. xmax < xmin or ymax < ymin), return 0.
return static_cast<T>(0.);
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/mode_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ void ModeGradKernel(const Context& dev_ctx,
common::slice_ddim(trans_in_shape, 0, trans_in_shape.size() - 1));
const int64_t input_width = trans_in_shape[trans_in_shape.size() - 1];

// Assign the out_grad to tranpose input_grad
// Assign the out_grad to transpose input_grad
DenseTensor tmp_out;
tmp_out.Resize(trans_in_shape);
T* t_out = dev_ctx.template Alloc<T>(&tmp_out);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/mode_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void ModeKernel(const Context& dev_ctx,
}

// if axis is not the last dim, transpose it to the last dim, do the
// calculation, then tranpose it back to original axis.
// calculation, then transpose it back to original axis.
if (axis == in_dims.size() - 1) {
const int64_t& input_height =
common::product(common::slice_ddim(in_dims, 0, in_dims.size() - 1));
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/svd_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ void SvdKernel(const Context& dev_ctx,
BatchSvd<T>(x_data, U_out, VH_out, S_out, rows, cols, batches, full);
/* let C[m, n] as a col major matrix with m rows and n cols.
* let R[m, n] is row major matrix with m rows and n cols.
* then we have: R[m,n] = C[m, n].resize((n,m)).tranpose_last_two()
* then we have: R[m,n] = C[m, n].resize((n,m)).transpose_last_two()
* */
auto col_major_to_row_major = [&dev_ctx](DenseTensor* out) {
auto origin_dim = out->dims();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/top_k_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ void TopkGradKernel(const Context& dev_ctx,
common::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1));
const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1];

// Assign the out_grad to tranpose input_grad
// Assign the out_grad to transpose input_grad
DenseTensor tmp_out;
tmp_out.Resize(trans_in_dims);
T* t_out = dev_ctx.template Alloc<T>(&tmp_out);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/top_k_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ void TopkKernel(const Context& dev_ctx,
largest,
sorted);
} else {
// if the topk dims is not last dim, will tranpose and do topk
// if the topk dims is not last dim, will transpose and do topk
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/mode_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ void ModeKernel(const Context& dev_ctx,
}
trans_out_shape[in_dims.size() - 1] = 1;

// second step, tranpose the input
// second step, transpose the input
DenseTensor trans_input;
trans_input.Resize(trans_shape);
dev_ctx.template Alloc<T>(&trans_input);
Expand All @@ -118,7 +118,7 @@ void ModeKernel(const Context& dev_ctx,
input_height,
trans_out_data,
trans_ind_data);
// last step, tranpose back the indices and output
// last step, transpose back the indices and output
funcs::TransCompute<Context, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans_axis);
funcs::TransCompute<Context, T>(ndims, dev_ctx, trans_out, out, trans_axis);
Expand Down
10 changes: 5 additions & 5 deletions paddle/phi/kernels/gpu/top_k_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -228,10 +228,10 @@ void TopkKernel(const Context& dev_ctx,
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// if get topK not from the last axis, will transpose the tensor and get
// TopK

// first step, prepare the trans args for the tranpose
// first step, prepare the trans args for the transpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
Expand All @@ -248,7 +248,7 @@ void TopkKernel(const Context& dev_ctx,
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
// second step, transpose the input
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
Expand Down Expand Up @@ -282,7 +282,7 @@ void TopkKernel(const Context& dev_ctx,
&trans_out,
&trans_ind,
largest)) {
// last step, tranpose back the indices and output
// last step, transpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
Expand Down Expand Up @@ -337,7 +337,7 @@ void TopkKernel(const Context& dev_ctx,
"the input data shape has error in the topk cuda kernel."));
}

// last step, tranpose back the indices and output
// last step, transpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
Expand Down