From bd67dd81520be4ad82108c4cff0d9a0b4b3d99f5 Mon Sep 17 00:00:00 2001 From: ooooo <3164076421@qq.com> Date: Sun, 3 Aug 2025 18:14:00 +0800 Subject: [PATCH] refine error message --- .../cpu/broadcast_tensors_grad_kernel.cc | 2 +- .../kernels/cpu/lookup_table_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/tdm_sampler_kernel.cc | 3 ++- paddle/phi/kernels/funcs/blas/blas_impl.h | 4 ++-- paddle/phi/kernels/funcs/cublaslt.h | 12 +++++----- .../phi/kernels/funcs/fused_gate_attention.h | 2 +- .../kernels/funcs/values_vectors_functor.h | 2 +- paddle/phi/kernels/funcs/xpufft_util.h | 4 ++-- paddle/phi/kernels/fusion/gpu/fmha_ref.h | 2 +- .../fusion/gpu/fused_attention_utils.h | 2 +- .../fusion/xpu/cross_attention_xpu_kernel.cc | 2 +- .../fusion/xpu/qkv_attention_xpu_kernel.cc | 2 +- .../impl/beam_search_decode_kernel_impl.h | 6 ++--- .../impl/broadcast_tensors_kernel_impl.h | 2 +- paddle/phi/kernels/impl/kron_kernel_impl.h | 2 +- paddle/phi/kernels/impl/matmul_kernel_impl.h | 22 +++++++++---------- 16 files changed, 36 insertions(+), 35 deletions(-) diff --git a/paddle/phi/kernels/cpu/broadcast_tensors_grad_kernel.cc b/paddle/phi/kernels/cpu/broadcast_tensors_grad_kernel.cc index d731a12b1fc10b..ef661fe8019cd6 100644 --- a/paddle/phi/kernels/cpu/broadcast_tensors_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/broadcast_tensors_grad_kernel.cc @@ -189,7 +189,7 @@ void BroadcastTensorsGradKernel(const Context& dev_ctx, default: { PADDLE_THROW( - errors::InvalidArgument("Detected reduce size: %d out of range" + errors::InvalidArgument("Detected reduce size: %d out of range. " "While maximum supported is: 5", reduce_size)); } diff --git a/paddle/phi/kernels/cpu/lookup_table_grad_kernel.cc b/paddle/phi/kernels/cpu/lookup_table_grad_kernel.cc index c5e0dd4fe66e36..87d9c2248606ee 100644 --- a/paddle/phi/kernels/cpu/lookup_table_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/lookup_table_grad_kernel.cc @@ -85,7 +85,7 @@ void LookupTableGradKernel(const Context &dev_ctx, 0, common::errors::InvalidArgument( "Variable value (input) of OP(fluid.layers.embedding) " - "expected >= 0 and < %ld, but got %ld. Please check input" + "expected >= 0 and < %ld, but got %ld. Please check input " "value.", N, ids_data[i])); diff --git a/paddle/phi/kernels/cpu/tdm_sampler_kernel.cc b/paddle/phi/kernels/cpu/tdm_sampler_kernel.cc index f64e90decbe0a3..79b45e4b2e7ab0 100644 --- a/paddle/phi/kernels/cpu/tdm_sampler_kernel.cc +++ b/paddle/phi/kernels/cpu/tdm_sampler_kernel.cc @@ -218,7 +218,8 @@ void TDMSamplerInner(const Context &dev_ctx, layer_data[layer_offset[layer_idx] + sample_res], node_id_max, common::errors::InvalidArgument( - "Negative node id of OP(fluid.layers.tdm_sampler) at layer %ld" + "Negative node id of OP(fluid.layers.tdm_sampler) at layer " + "%ld, " "expected >= %ld and <= %ld, but got %ld. Please check input " "tdm tree structure and tdm travel info.", layer_idx, diff --git a/paddle/phi/kernels/funcs/blas/blas_impl.h b/paddle/phi/kernels/funcs/blas/blas_impl.h index 97d43ffd26771f..af6ea0b9c517c2 100644 --- a/paddle/phi/kernels/funcs/blas/blas_impl.h +++ b/paddle/phi/kernels/funcs/blas/blas_impl.h @@ -1872,7 +1872,7 @@ void Blas::MatMulWithHead(const phi::DenseTensor &mat_a, dim_a.width_ % head_number, 0, common::errors::InvalidArgument( - "The first input width must be some times the head number" + "The first input width must be some times the head number, " "but received first input width %d" ", head_number %d", dim_a.width_, @@ -1908,7 +1908,7 @@ void Blas::MatMulWithHead(const phi::DenseTensor &mat_a, dim_a.width_ % head_number, 0, common::errors::InvalidArgument( - "The second input width should be some times the head number" + "The second input width should be some times the head number, " "but received second input width %d" ", head_number %d", dim_b.width_, diff --git a/paddle/phi/kernels/funcs/cublaslt.h b/paddle/phi/kernels/funcs/cublaslt.h index ecd40e5842426d..f3a497b4b5b7b8 100644 --- a/paddle/phi/kernels/funcs/cublaslt.h +++ b/paddle/phi/kernels/funcs/cublaslt.h @@ -61,7 +61,7 @@ class CublasLtHelper { status, CUBLAS_STATUS_SUCCESS, common::errors::External( - "cublasLtMatmulDescCreate execution error" + "cublasLtMatmulDescCreate execution error, " "refer https://docs.nvidia.com/cuda/cublas/index.html to get more " "information")); cublasOperation_t op_transpose = CUBLAS_OP_T; @@ -73,7 +73,7 @@ class CublasLtHelper { status, CUBLAS_STATUS_SUCCESS, common::errors::External( - "cublasLtMatmulDescSetAttribute execution error" + "cublasLtMatmulDescSetAttribute execution error, " "refer https://docs.nvidia.com/cuda/cublas/index.html to get more " "information")); @@ -83,7 +83,7 @@ class CublasLtHelper { status, CUBLAS_STATUS_SUCCESS, common::errors::External( - "cublasLtMatrixLayoutCreate execution error" + "cublasLtMatrixLayoutCreate execution error, " "refer https://docs.nvidia.com/cuda/cublas/index.html to get more " "information")); @@ -92,7 +92,7 @@ class CublasLtHelper { status, CUBLAS_STATUS_SUCCESS, common::errors::External( - "cublasLtMatrixLayoutCreate execution error" + "cublasLtMatrixLayoutCreate execution error, " "refer https://docs.nvidia.com/cuda/cublas/index.html to get more " "information")); @@ -101,7 +101,7 @@ class CublasLtHelper { status, CUBLAS_STATUS_SUCCESS, common::errors::External( - "cublasLtMatrixLayoutCreate execution error" + "cublasLtMatrixLayoutCreate execution error, " "refer https://docs.nvidia.com/cuda/cublas/index.html to get more " "information")); @@ -204,7 +204,7 @@ class CublasLtHelper { status, CUBLAS_STATUS_SUCCESS, common::errors::External( - "cublasLtMatmul execution error" + "cublasLtMatmul execution error, " "refer https://docs.nvidia.com/cuda/cublas/index.html to get more " "information")); } diff --git a/paddle/phi/kernels/funcs/fused_gate_attention.h b/paddle/phi/kernels/funcs/fused_gate_attention.h index 89070a1e26ce62..87b64411453b90 100644 --- a/paddle/phi/kernels/funcs/fused_gate_attention.h +++ b/paddle/phi/kernels/funcs/fused_gate_attention.h @@ -993,7 +993,7 @@ class FlashAttnWithGating { PADDLE_ENFORCE_NOT_NULL( qkv_transpose_out, - common::errors::NotFound("The input qkv_transpose_out can not be" + common::errors::NotFound("The input qkv_transpose_out can not be " "nullptr when merge_qkv is true.")); int64_t q_size = config->GetQuerySize(); diff --git a/paddle/phi/kernels/funcs/values_vectors_functor.h b/paddle/phi/kernels/funcs/values_vectors_functor.h index 1258910267e085..0d93bad2524f26 100644 --- a/paddle/phi/kernels/funcs/values_vectors_functor.h +++ b/paddle/phi/kernels/funcs/values_vectors_functor.h @@ -47,7 +47,7 @@ static void CheckEighResult(const int batch, const int info) { info, 0, common::errors::PreconditionNotMet( - "For batch [%d]: the [%d] off-diagonal elements of an intermediate" + "For batch [%d]: the [%d] off-diagonal elements of an intermediate " "tridiagonal form did not converge to zero", batch, info)); diff --git a/paddle/phi/kernels/funcs/xpufft_util.h b/paddle/phi/kernels/funcs/xpufft_util.h index f8bfdbe3fdafd3..14f21dde763490 100644 --- a/paddle/phi/kernels/funcs/xpufft_util.h +++ b/paddle/phi/kernels/funcs/xpufft_util.h @@ -83,8 +83,8 @@ class FFTConfig { for (int i = 0; i < signal_ndim; ++i) { if (signal_sizes[i] <= 8) { PADDLE_THROW(phi::errors::InvalidArgument( - "XPU FFT requires all axes to have greater than 8 elements," - "but axis %d has size %d.Set XFFT_DEBUG=1 environment variable" + "XPU FFT requires all axes to have greater than 8 elements, " + "but axis %d has size %d.Set XFFT_DEBUG=1 environment variable " "to inspect dimensions.", i, signal_sizes[i])); diff --git a/paddle/phi/kernels/fusion/gpu/fmha_ref.h b/paddle/phi/kernels/fusion/gpu/fmha_ref.h index a41582c7076b3b..98e456f177e27e 100644 --- a/paddle/phi/kernels/fusion/gpu/fmha_ref.h +++ b/paddle/phi/kernels/fusion/gpu/fmha_ref.h @@ -671,7 +671,7 @@ class FMHARef { qk_out_grad_tensor); } else { PADDLE_THROW(errors::InvalidArgument( - "Only used for the backward elementwise_add op when" + "Only used for the backward elementwise_add op when " "dy is not needed and dx is not reduce")); return; } diff --git a/paddle/phi/kernels/fusion/gpu/fused_attention_utils.h b/paddle/phi/kernels/fusion/gpu/fused_attention_utils.h index 3b05db752e2d8f..a35c54aaea8e34 100644 --- a/paddle/phi/kernels/fusion/gpu/fused_attention_utils.h +++ b/paddle/phi/kernels/fusion/gpu/fused_attention_utils.h @@ -39,7 +39,7 @@ static void AllReduce(phi::DenseTensor &tensor, // NOLINT pg = map->get(ring_id); } else { PADDLE_THROW(common::errors::Unimplemented( - "ring_id %d is not in ProcessGroupMap, please check related" + "ring_id %d is not in ProcessGroupMap, please check related " "configurations and retry.", ring_id)); } diff --git a/paddle/phi/kernels/fusion/xpu/cross_attention_xpu_kernel.cc b/paddle/phi/kernels/fusion/xpu/cross_attention_xpu_kernel.cc index 6aae1f7d18c2bb..40e067c227b13e 100644 --- a/paddle/phi/kernels/fusion/xpu/cross_attention_xpu_kernel.cc +++ b/paddle/phi/kernels/fusion/xpu/cross_attention_xpu_kernel.cc @@ -225,7 +225,7 @@ void CrossAttentionXPUKernel( return; } PADDLE_THROW(common::errors::Unimplemented( - "Not support q_dtype is %s, k_dtype is %s, k_dtype is %s" + "Not support q_dtype is %s, k_dtype is %s, k_dtype is %s " "and qkv_dtype is %s.", DataTypeToString(input_q.dtype()), DataTypeToString(input_kv.dtype()), diff --git a/paddle/phi/kernels/fusion/xpu/qkv_attention_xpu_kernel.cc b/paddle/phi/kernels/fusion/xpu/qkv_attention_xpu_kernel.cc index 3bdc06f6ed50a2..f5484361278086 100644 --- a/paddle/phi/kernels/fusion/xpu/qkv_attention_xpu_kernel.cc +++ b/paddle/phi/kernels/fusion/xpu/qkv_attention_xpu_kernel.cc @@ -321,7 +321,7 @@ void QKVAttentionXPUKernel(const Context& dev_ctx, } else { PADDLE_THROW(common::errors::Unimplemented( - "Not support q_dtype is %s, k_dtype is %s, k_dtype is %s" + "Not support q_dtype is %s, k_dtype is %s, k_dtype is %s " "and qkv_dtype is %s.", DataTypeToString(q.dtype()), DataTypeToString(k.dtype()), diff --git a/paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h b/paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h index ab40e26f83c219..59d4a4fa1ed4a4 100644 --- a/paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h +++ b/paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h @@ -127,7 +127,7 @@ void BeamSearchDecodeOpKernel(const Context& dev_ctx, step_num, 0UL, common::errors::InvalidArgument( - "beam search steps, which is the" + "beam search steps, which is the " "size of Input(Ids) TensorArray. beam search steps should " "be larger than 0, but received %d. ", step_num)); @@ -136,9 +136,9 @@ void BeamSearchDecodeOpKernel(const Context& dev_ctx, source_num, 0UL, common::errors::InvalidArgument( - "source_num is the sequence number of the" + "source_num is the sequence number of the " "first decoding step, indicating by Input(Ids)[0].lod[0].size. " - "The number of source_num should be larger than" + "The number of source_num should be larger than " "0, but received %d. ", source_num)); diff --git a/paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h b/paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h index 66821809c53bd9..fe734bfb3dc441 100644 --- a/paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h +++ b/paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h @@ -116,7 +116,7 @@ void BroadcastTensorsKernel(const Context& dev_ctx, SWITCH_OUT_RANK_CASE(6) default: { PADDLE_THROW(common::errors::InvalidArgument( - "Target tensor rank out of range" + "Target tensor rank out of range. " "Maximum supported rank for broadcast is: 6")); } } diff --git a/paddle/phi/kernels/impl/kron_kernel_impl.h b/paddle/phi/kernels/impl/kron_kernel_impl.h index bf5eba3f7de0c9..28d503fa3c1921 100644 --- a/paddle/phi/kernels/impl/kron_kernel_impl.h +++ b/paddle/phi/kernels/impl/kron_kernel_impl.h @@ -36,7 +36,7 @@ inline DenseTensor UnsqueezeTo(const DenseTensor &src, int ndims) { rank, ndims, errors::InvalidArgument( - "The input Tensor's rank should be less than or equal to ndims" + "The input Tensor's rank should be less than or equal to ndims. " "Received input Tensor's rank = %d, ndims = %d", rank, ndims)); diff --git a/paddle/phi/kernels/impl/matmul_kernel_impl.h b/paddle/phi/kernels/impl/matmul_kernel_impl.h index 55ed029126d31d..957e33aab09c3c 100644 --- a/paddle/phi/kernels/impl/matmul_kernel_impl.h +++ b/paddle/phi/kernels/impl/matmul_kernel_impl.h @@ -242,8 +242,8 @@ void MatMulFunctionImplWithBlas( PADDLE_ENFORCE_EQ( x_dims[x_ndim - 2], N, - common::errors::InvalidArgument("Input(X) has error dim." - "X'dims[%d] must be equal to %d" + common::errors::InvalidArgument("Input(X) has error dim. " + "X'dims[%d] must be equal to %d. " "But received X'dims[%d] is %d", x_ndim - 2, N, @@ -253,8 +253,8 @@ void MatMulFunctionImplWithBlas( PADDLE_ENFORCE_EQ( x_dims[x_ndim - 1], N, - common::errors::InvalidArgument("Input(X) has error dim." - "X'dims[%d] must be equal to %d" + common::errors::InvalidArgument("Input(X) has error dim. " + "X'dims[%d] must be equal to %d. " "But received X'dims[%d] is %d", x_ndim - 1, N, @@ -639,7 +639,7 @@ void MatMulFunctionImplWithCublasLt( x_dims[x_ndim - 2], N, common::errors::InvalidArgument("Input(X) has error dim." - "X'dims[%d] must be equal to %d" + "X'dims[%d] must be equal to %d. " "But received X'dims[%d] is %d", x_ndim - 2, N, @@ -1034,7 +1034,7 @@ bool inline MatMulInt8Function(const phi::GPUContext& dev_ctx, N, common::errors::InvalidArgument( "X's numbers must be equal to Y's numbers, " - "when X/Y's dims =1. But received X has [%d] elements, s" + "when X/Y's dims =1. But received X has [%d] elements, " "received Y has [%d] elements", M, N)); @@ -1574,8 +1574,8 @@ bool inline MatMulInt8Function(const phi::GPUContext& dev_ctx, PADDLE_ENFORCE_EQ( x_dims[x_ndim - 2], N, - common::errors::InvalidArgument("Input(X) has error dim." - "X'dims[%d] must be equal to %d" + common::errors::InvalidArgument("Input(X) has error dim. " + "X'dims[%d] must be equal to %d, " "But received X'dims[%d] is %d", x_ndim - 2, N, @@ -1585,8 +1585,8 @@ bool inline MatMulInt8Function(const phi::GPUContext& dev_ctx, PADDLE_ENFORCE_EQ( x_dims[x_ndim - 1], N, - common::errors::InvalidArgument("Input(X) has error dim." - "X'dims[%d] must be equal to %d" + common::errors::InvalidArgument("Input(X) has error dim. " + "X'dims[%d] must be equal to %d, " "But received X'dims[%d] is %d", x_ndim - 1, N, @@ -2109,7 +2109,7 @@ void MatmulWithFlattenKernelInt8Impl(const Context& dev_ctx, true, common::errors::InvalidArgument( "The dimension size K used in int8 mul must be a " - "multiple of 4 does not match the size (%d) currently" + "multiple of 4 does not match the size (%d) currently " "contained in the container.", x_matrix.dims()[1]));