Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/broadcast_tensors_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ void BroadcastTensorsGradKernel(const Context& dev_ctx,

default: {
PADDLE_THROW(
errors::InvalidArgument("Detected reduce size: %d out of range"
errors::InvalidArgument("Detected reduce size: %d out of range. "
"While maximum supported is: 5",
reduce_size));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/lookup_table_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ void LookupTableGradKernel(const Context &dev_ctx,
0,
common::errors::InvalidArgument(
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input"
"expected >= 0 and < %ld, but got %ld. Please check input "
"value.",
N,
ids_data[i]));
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/kernels/cpu/tdm_sampler_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,8 @@ void TDMSamplerInner(const Context &dev_ctx,
layer_data[layer_offset[layer_idx] + sample_res],
node_id_max,
common::errors::InvalidArgument(
"Negative node id of OP(fluid.layers.tdm_sampler) at layer %ld"
"Negative node id of OP(fluid.layers.tdm_sampler) at layer "
"%ld, "
"expected >= %ld and <= %ld, but got %ld. Please check input "
"tdm tree structure and tdm travel info.",
layer_idx,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/funcs/blas/blas_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -1872,7 +1872,7 @@ void Blas<DeviceContext>::MatMulWithHead(const phi::DenseTensor &mat_a,
dim_a.width_ % head_number,
0,
common::errors::InvalidArgument(
"The first input width must be some times the head number"
"The first input width must be some times the head number, "
"but received first input width %d"
", head_number %d",
dim_a.width_,
Expand Down Expand Up @@ -1908,7 +1908,7 @@ void Blas<DeviceContext>::MatMulWithHead(const phi::DenseTensor &mat_a,
dim_a.width_ % head_number,
0,
common::errors::InvalidArgument(
"The second input width should be some times the head number"
"The second input width should be some times the head number, "
"but received second input width %d"
", head_number %d",
dim_b.width_,
Expand Down
12 changes: 6 additions & 6 deletions paddle/phi/kernels/funcs/cublaslt.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class CublasLtHelper {
status,
CUBLAS_STATUS_SUCCESS,
common::errors::External(
"cublasLtMatmulDescCreate execution error"
"cublasLtMatmulDescCreate execution error, "
"refer https://docs.nvidia.com/cuda/cublas/index.html to get more "
"information"));
cublasOperation_t op_transpose = CUBLAS_OP_T;
Expand All @@ -73,7 +73,7 @@ class CublasLtHelper {
status,
CUBLAS_STATUS_SUCCESS,
common::errors::External(
"cublasLtMatmulDescSetAttribute execution error"
"cublasLtMatmulDescSetAttribute execution error, "
"refer https://docs.nvidia.com/cuda/cublas/index.html to get more "
"information"));

Expand All @@ -83,7 +83,7 @@ class CublasLtHelper {
status,
CUBLAS_STATUS_SUCCESS,
common::errors::External(
"cublasLtMatrixLayoutCreate execution error"
"cublasLtMatrixLayoutCreate execution error, "
"refer https://docs.nvidia.com/cuda/cublas/index.html to get more "
"information"));

Expand All @@ -92,7 +92,7 @@ class CublasLtHelper {
status,
CUBLAS_STATUS_SUCCESS,
common::errors::External(
"cublasLtMatrixLayoutCreate execution error"
"cublasLtMatrixLayoutCreate execution error, "
"refer https://docs.nvidia.com/cuda/cublas/index.html to get more "
"information"));

Expand All @@ -101,7 +101,7 @@ class CublasLtHelper {
status,
CUBLAS_STATUS_SUCCESS,
common::errors::External(
"cublasLtMatrixLayoutCreate execution error"
"cublasLtMatrixLayoutCreate execution error, "
"refer https://docs.nvidia.com/cuda/cublas/index.html to get more "
"information"));

Expand Down Expand Up @@ -204,7 +204,7 @@ class CublasLtHelper {
status,
CUBLAS_STATUS_SUCCESS,
common::errors::External(
"cublasLtMatmul execution error"
"cublasLtMatmul execution error, "
"refer https://docs.nvidia.com/cuda/cublas/index.html to get more "
"information"));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/fused_gate_attention.h
Original file line number Diff line number Diff line change
Expand Up @@ -993,7 +993,7 @@ class FlashAttnWithGating {

PADDLE_ENFORCE_NOT_NULL(
qkv_transpose_out,
common::errors::NotFound("The input qkv_transpose_out can not be"
common::errors::NotFound("The input qkv_transpose_out can not be "
"nullptr when merge_qkv is true."));

int64_t q_size = config->GetQuerySize();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/values_vectors_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ static void CheckEighResult(const int batch, const int info) {
info,
0,
common::errors::PreconditionNotMet(
"For batch [%d]: the [%d] off-diagonal elements of an intermediate"
"For batch [%d]: the [%d] off-diagonal elements of an intermediate "
"tridiagonal form did not converge to zero",
batch,
info));
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/funcs/xpufft_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ class FFTConfig {
for (int i = 0; i < signal_ndim; ++i) {
if (signal_sizes[i] <= 8) {
PADDLE_THROW(phi::errors::InvalidArgument(
"XPU FFT requires all axes to have greater than 8 elements,"
"but axis %d has size %d.Set XFFT_DEBUG=1 environment variable"
"XPU FFT requires all axes to have greater than 8 elements, "
"but axis %d has size %d.Set XFFT_DEBUG=1 environment variable "
"to inspect dimensions.",
i,
signal_sizes[i]));
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/fusion/gpu/fmha_ref.h
Original file line number Diff line number Diff line change
Expand Up @@ -671,7 +671,7 @@ class FMHARef {
qk_out_grad_tensor);
} else {
PADDLE_THROW(errors::InvalidArgument(
"Only used for the backward elementwise_add op when"
"Only used for the backward elementwise_add op when "
"dy is not needed and dx is not reduce"));
return;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/fusion/gpu/fused_attention_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ static void AllReduce(phi::DenseTensor &tensor, // NOLINT
pg = map->get(ring_id);
} else {
PADDLE_THROW(common::errors::Unimplemented(
"ring_id %d is not in ProcessGroupMap, please check related"
"ring_id %d is not in ProcessGroupMap, please check related "
"configurations and retry.",
ring_id));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ void CrossAttentionXPUKernel(
return;
}
PADDLE_THROW(common::errors::Unimplemented(
"Not support q_dtype is %s, k_dtype is %s, k_dtype is %s"
"Not support q_dtype is %s, k_dtype is %s, k_dtype is %s "
"and qkv_dtype is %s.",
DataTypeToString(input_q.dtype()),
DataTypeToString(input_kv.dtype()),
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/fusion/xpu/qkv_attention_xpu_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ void QKVAttentionXPUKernel(const Context& dev_ctx,

} else {
PADDLE_THROW(common::errors::Unimplemented(
"Not support q_dtype is %s, k_dtype is %s, k_dtype is %s"
"Not support q_dtype is %s, k_dtype is %s, k_dtype is %s "
"and qkv_dtype is %s.",
DataTypeToString(q.dtype()),
DataTypeToString(k.dtype()),
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ void BeamSearchDecodeOpKernel(const Context& dev_ctx,
step_num,
0UL,
common::errors::InvalidArgument(
"beam search steps, which is the"
"beam search steps, which is the "
"size of Input(Ids) TensorArray. beam search steps should "
"be larger than 0, but received %d. ",
step_num));
Expand All @@ -136,9 +136,9 @@ void BeamSearchDecodeOpKernel(const Context& dev_ctx,
source_num,
0UL,
common::errors::InvalidArgument(
"source_num is the sequence number of the"
"source_num is the sequence number of the "
"first decoding step, indicating by Input(Ids)[0].lod[0].size. "
"The number of source_num should be larger than"
"The number of source_num should be larger than "
"0, but received %d. ",
source_num));

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ void BroadcastTensorsKernel(const Context& dev_ctx,
SWITCH_OUT_RANK_CASE(6)
default: {
PADDLE_THROW(common::errors::InvalidArgument(
"Target tensor rank out of range"
"Target tensor rank out of range. "
"Maximum supported rank for broadcast is: 6"));
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/kron_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ inline DenseTensor UnsqueezeTo(const DenseTensor &src, int ndims) {
rank,
ndims,
errors::InvalidArgument(
"The input Tensor's rank should be less than or equal to ndims"
"The input Tensor's rank should be less than or equal to ndims. "
"Received input Tensor's rank = %d, ndims = %d",
rank,
ndims));
Expand Down
22 changes: 11 additions & 11 deletions paddle/phi/kernels/impl/matmul_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -242,8 +242,8 @@ void MatMulFunctionImplWithBlas(
PADDLE_ENFORCE_EQ(
x_dims[x_ndim - 2],
N,
common::errors::InvalidArgument("Input(X) has error dim."
"X'dims[%d] must be equal to %d"
common::errors::InvalidArgument("Input(X) has error dim. "
"X'dims[%d] must be equal to %d. "
"But received X'dims[%d] is %d",
x_ndim - 2,
N,
Expand All @@ -253,8 +253,8 @@ void MatMulFunctionImplWithBlas(
PADDLE_ENFORCE_EQ(
x_dims[x_ndim - 1],
N,
common::errors::InvalidArgument("Input(X) has error dim."
"X'dims[%d] must be equal to %d"
common::errors::InvalidArgument("Input(X) has error dim. "
"X'dims[%d] must be equal to %d. "
"But received X'dims[%d] is %d",
x_ndim - 1,
N,
Expand Down Expand Up @@ -639,7 +639,7 @@ void MatMulFunctionImplWithCublasLt(
x_dims[x_ndim - 2],
N,
common::errors::InvalidArgument("Input(X) has error dim."
"X'dims[%d] must be equal to %d"
"X'dims[%d] must be equal to %d. "
"But received X'dims[%d] is %d",
x_ndim - 2,
N,
Expand Down Expand Up @@ -1034,7 +1034,7 @@ bool inline MatMulInt8Function(const phi::GPUContext& dev_ctx,
N,
common::errors::InvalidArgument(
"X's numbers must be equal to Y's numbers, "
"when X/Y's dims =1. But received X has [%d] elements, s"
"when X/Y's dims =1. But received X has [%d] elements, "
"received Y has [%d] elements",
M,
N));
Expand Down Expand Up @@ -1574,8 +1574,8 @@ bool inline MatMulInt8Function(const phi::GPUContext& dev_ctx,
PADDLE_ENFORCE_EQ(
x_dims[x_ndim - 2],
N,
common::errors::InvalidArgument("Input(X) has error dim."
"X'dims[%d] must be equal to %d"
common::errors::InvalidArgument("Input(X) has error dim. "
"X'dims[%d] must be equal to %d, "
"But received X'dims[%d] is %d",
x_ndim - 2,
N,
Expand All @@ -1585,8 +1585,8 @@ bool inline MatMulInt8Function(const phi::GPUContext& dev_ctx,
PADDLE_ENFORCE_EQ(
x_dims[x_ndim - 1],
N,
common::errors::InvalidArgument("Input(X) has error dim."
"X'dims[%d] must be equal to %d"
common::errors::InvalidArgument("Input(X) has error dim. "
"X'dims[%d] must be equal to %d, "
"But received X'dims[%d] is %d",
x_ndim - 1,
N,
Expand Down Expand Up @@ -2109,7 +2109,7 @@ void MatmulWithFlattenKernelInt8Impl(const Context& dev_ctx,
true,
common::errors::InvalidArgument(
"The dimension size K used in int8 mul must be a "
"multiple of 4 does not match the size (%d) currently"
"multiple of 4 does not match the size (%d) currently "
"contained in the container.",
x_matrix.dims()[1]));

Expand Down