diff --git a/paddle/cinn/hlir/framework/pir/utils.cc b/paddle/cinn/hlir/framework/pir/utils.cc index 42a3ab376f374e..2d7d01b168e809 100644 --- a/paddle/cinn/hlir/framework/pir/utils.cc +++ b/paddle/cinn/hlir/framework/pir/utils.cc @@ -139,7 +139,7 @@ class OpTransInfo { "pool2d", "pool2d_grad", "pool3d", - "pool3d_grad" + "pool3d_grad", "split", "matmul", "matmul_grad", diff --git a/paddle/cinn/hlir/op/custom_call.cc b/paddle/cinn/hlir/op/custom_call.cc index cd32cd0a793f2a..364224e4327110 100644 --- a/paddle/cinn/hlir/op/custom_call.cc +++ b/paddle/cinn/hlir/op/custom_call.cc @@ -212,7 +212,7 @@ std::vector CustomCallArgsForCublas( (x_num_col_dims > 0 && y_num_col_dims > 0), true, ::common::errors::InvalidArgument( - "x_num_col_dims and y_num_cole_dims should both be 0 or positive" + "x_num_col_dims and y_num_cole_dims should both be 0 or positive, " "now x_num_col_dims is %d and y_num_col_dims is %d", x_num_col_dims, y_num_col_dims)); @@ -634,13 +634,13 @@ std::vector CustomCallArgsForCudnnConvBackwardData( PADDLE_ENFORCE_EQ( attr_store.count("padding"), true, - ::common::errors::NotFound("The CudnnConvBackwardData custom_call" + ::common::errors::NotFound("The CudnnConvBackwardData custom_call " "must has attribute \"padding\"")); auto padding = std::get>(attr_store.at("padding")); PADDLE_ENFORCE_EQ( attr_store.count("stride"), true, - ::common::errors::NotFound("The CudnnConvBackwardData custom_call" + ::common::errors::NotFound("The CudnnConvBackwardData custom_call " "must has attribute \"stride\"")); auto stride = std::get>(attr_store.at("stride")); auto dilation = attr_store.count("dilation") @@ -716,13 +716,13 @@ std::vector CustomCallArgsForCudnnConvBackwardFilter( PADDLE_ENFORCE_EQ( attr_store.count("padding"), true, - ::common::errors::NotFound("The CudnnConvBackwardFilter custom_call" + ::common::errors::NotFound("The CudnnConvBackwardFilter custom_call " "must has attribute \"padding\"")); auto padding = std::get>(attr_store.at("padding")); PADDLE_ENFORCE_EQ( attr_store.count("stride"), true, - ::common::errors::NotFound("The CudnnConvBackwardFilter custom_call" + ::common::errors::NotFound("The CudnnConvBackwardFilter custom_call " "must has attribute \"stride\"")); auto stride = std::get>(attr_store.at("stride")); auto dilation = attr_store.count("dilation") @@ -799,31 +799,31 @@ std::vector CustomCallArgsForCudnnPoolForward( PADDLE_ENFORCE_EQ( attr_store.count("kernel_size"), true, - ::common::errors::NotFound("The CudnnPoolForward custom_call" + ::common::errors::NotFound("The CudnnPoolForward custom_call " "must has attribute \"kernel_size\"")); auto kernel = std::get>(attr_store.at("kernel_size")); PADDLE_ENFORCE_EQ( attr_store.count("padding_size"), true, - ::common::errors::NotFound("The CudnnPoolForward custom_call" + ::common::errors::NotFound("The CudnnPoolForward custom_call " "must has attribute \"padding_size\"")); auto padding = std::get>(attr_store.at("padding_size")); PADDLE_ENFORCE_EQ( attr_store.count("stride_size"), true, - ::common::errors::NotFound("The CudnnPoolForward custom_call" + ::common::errors::NotFound("The CudnnPoolForward custom_call " "must has attribute \"stride_size\"")); auto stride = std::get>(attr_store.at("stride_size")); PADDLE_ENFORCE_EQ( attr_store.count("pool_type"), true, - ::common::errors::NotFound("The CudnnPoolForward custom_call" + ::common::errors::NotFound("The CudnnPoolForward custom_call " "must has attribute \"pool_type\"")); auto pool_type = std::get(attr_store.at("pool_type")); PADDLE_ENFORCE_EQ( attr_store.count("data_format"), true, - ::common::errors::NotFound("The CudnnPoolForward custom_call" + ::common::errors::NotFound("The CudnnPoolForward custom_call " "must has attribute \"data_format\"")); std::string data_format = std::get(attr_store.at("data_format")); @@ -893,31 +893,31 @@ std::vector CustomCallArgsForCudnnPoolBackward( PADDLE_ENFORCE_EQ( attr_store.count("kernel_size"), true, - ::common::errors::NotFound("The CudnnPoolBackward custom_call" + ::common::errors::NotFound("The CudnnPoolBackward custom_call " "must has attribute \"kernel_size\"")); auto kernel = std::get>(attr_store.at("kernel_size")); PADDLE_ENFORCE_EQ( attr_store.count("padding_size"), true, - ::common::errors::NotFound("The CudnnPoolBackward custom_call" + ::common::errors::NotFound("The CudnnPoolBackward custom_call " "must has attribute \"padding_size\"")); auto padding = std::get>(attr_store.at("padding_size")); PADDLE_ENFORCE_EQ( attr_store.count("stride_size"), true, - ::common::errors::NotFound("The CudnnPoolBackward custom_call" + ::common::errors::NotFound("The CudnnPoolBackward custom_call " "must has attribute \"stride_size\"")); auto stride = std::get>(attr_store.at("stride_size")); PADDLE_ENFORCE_EQ( attr_store.count("pool_type"), true, - ::common::errors::NotFound("The CudnnPoolBackward custom_call" + ::common::errors::NotFound("The CudnnPoolBackward custom_call " "must has attribute \"pool_type\"")); auto pool_type = std::get(attr_store.at("pool_type")); PADDLE_ENFORCE_EQ( attr_store.count("data_format"), true, - ::common::errors::NotFound("The CudnnPoolBackward custom_call" + ::common::errors::NotFound("The CudnnPoolBackward custom_call " "must has attribute \"data_format\"")); std::string data_format = std::get(attrs.attr_store.at("data_format")); @@ -1124,21 +1124,22 @@ std::vector CustomCallArgsForTriangularSolve( PADDLE_ENFORCE_EQ( attr_store.count("left_side"), true, - ::common::errors::NotFound("The TriangularSolve custom_call" + ::common::errors::NotFound("The TriangularSolve custom_call " "must has attribute \"left_side\"")); - PADDLE_ENFORCE_EQ(attr_store.count("upper"), - true, - ::common::errors::NotFound("The TriangularSolve custom_call" - "must has attribute \"upper\"")); + PADDLE_ENFORCE_EQ( + attr_store.count("upper"), + true, + ::common::errors::NotFound("The TriangularSolve custom_call " + "must has attribute \"upper\"")); PADDLE_ENFORCE_EQ( attr_store.count("transpose_a"), true, - ::common::errors::NotFound("The TriangularSolve custom_call" + ::common::errors::NotFound("The TriangularSolve custom_call " "must has attribute \"transpose_a\"")); PADDLE_ENFORCE_EQ( attr_store.count("unit_diagonal"), true, - ::common::errors::NotFound("The TriangularSolve custom_call" + ::common::errors::NotFound("The TriangularSolve custom_call " "must has attribute \"unit_diagonal\"")); ir::Tensor a = inputs[0]; diff --git a/paddle/cinn/hlir/op/elementwise.cc b/paddle/cinn/hlir/op/elementwise.cc index 2d643d6c09d946..c8a85070a2ab32 100644 --- a/paddle/cinn/hlir/op/elementwise.cc +++ b/paddle/cinn/hlir/op/elementwise.cc @@ -92,7 +92,7 @@ std::shared_ptr StrategyForElementwise( PADDLE_ENFORCE_EQ( pack_args.size(), 2U, - ::common::errors::InvalidArgument("the size of pack_args should be" + ::common::errors::InvalidArgument("the size of pack_args should be " "equal to 2, but got %d.", pack_args.size())); PADDLE_ENFORCE_EQ(pack_args[1].is_string(), diff --git a/paddle/cinn/hlir/op/nn.cc b/paddle/cinn/hlir/op/nn.cc index 94f62ba9a1067d..a524d644697278 100644 --- a/paddle/cinn/hlir/op/nn.cc +++ b/paddle/cinn/hlir/op/nn.cc @@ -424,7 +424,7 @@ std::shared_ptr StrategyForConv2d( PADDLE_ENFORCE(out.size() == 3U || out.size() == 2U || out.size() == 5U || out.size() == 12U, ::common::errors::InvalidArgument( - "The output tensor sizes of conv2d op in conv2d op" + "The output tensor sizes of conv2d op in conv2d op " "should be 2 or 3 or 5.")); *ret = CINNValuePack{res}; diff --git a/paddle/cinn/lang/builtin.cc b/paddle/cinn/lang/builtin.cc index 70802871621316..8b37c4d8ea16c0 100644 --- a/paddle/cinn/lang/builtin.cc +++ b/paddle/cinn/lang/builtin.cc @@ -156,7 +156,7 @@ Expr min_value(const Type& type) { PADDLE_ENFORCE_EQ(type.lanes(), 1, ::common::errors::InvalidArgument( - "The value of min type's lanes is incorrect" + "The value of min type's lanes is incorrect. " "Expected value is 1, but receive %d. ", type.lanes())); #define FOR_CASE(type__) \ @@ -183,7 +183,7 @@ Expr max_value(const Type& type) { PADDLE_ENFORCE_EQ(type.lanes(), 1, ::common::errors::InvalidArgument( - "The value of max type's lanes is incorrect" + "The value of max type's lanes is incorrect. " "Expected value is 1, but receive %d. ", type.lanes())); @@ -213,7 +213,7 @@ Expr Epsilon(const Type& type) { PADDLE_ENFORCE_EQ(type.lanes(), 1, ::common::errors::InvalidArgument( - "The value of epsilon type's lanes is incorrect" + "The value of epsilon type's lanes is incorrect. " "Expected value is 1, but receive %d. ", type.lanes())); @@ -280,7 +280,7 @@ Expr Infinity(const Type& type) { PADDLE_ENFORCE_EQ(type.lanes(), 1U, ::common::errors::InvalidArgument( - "The value of infinity type's lanes is incorrect" + "The value of infinity type's lanes is incorrect. " "Expected value is 1, but receive %d. ", type.lanes())); if (type.is_float()) { diff --git a/paddle/cinn/lang/compute.cc b/paddle/cinn/lang/compute.cc index 8fa4f0397fcb3e..efd37c7b62ef81 100644 --- a/paddle/cinn/lang/compute.cc +++ b/paddle/cinn/lang/compute.cc @@ -44,7 +44,7 @@ ir::Tensor Compute(const std::vector &domain, PADDLE_ENFORCE_EQ(axis.size(), 1, ::common::errors::InvalidArgument( - "The size of axis vector is incorrect" + "The size of axis vector is incorrect. " "Expected value is 1, but receive %d. ", axis.size())); return fn(axis[0]); @@ -63,7 +63,7 @@ ir::Tensor Compute(const std::vector &domain, PADDLE_ENFORCE_EQ(axis.size(), 2, ::common::errors::InvalidArgument( - "The size of axis vector is incorrect" + "The size of axis vector is incorrect. " "Expected value is 2, but receive %d. ", axis.size())); return fn(axis[0], axis[1]); @@ -82,7 +82,7 @@ ir::Tensor Compute(const std::vector &domain, PADDLE_ENFORCE_EQ(axis.size(), 3, ::common::errors::InvalidArgument( - "The size of axis vector is incorrect" + "The size of axis vector is incorrect. " "Expected value is 3, but receive %d. ", axis.size())); return fn(axis[0], axis[1], axis[2]); @@ -101,7 +101,7 @@ ir::Tensor Compute(const std::vector &domain, PADDLE_ENFORCE_EQ(axis.size(), 4, ::common::errors::InvalidArgument( - "The size of axis vector is incorrect" + "The size of axis vector is incorrect. " "Expected value is 4, but receive %d. ", axis.size())); return fn(axis[0], axis[1], axis[2], axis[3]); @@ -120,7 +120,7 @@ ir::Tensor Compute(const std::vector &domain, PADDLE_ENFORCE_EQ(axis.size(), 5, ::common::errors::InvalidArgument( - "The size of axis vector is incorrect" + "The size of axis vector is incorrect. " "Expected value is 5, but receive %d. ", axis.size())); return fn(axis[0], axis[1], axis[2], axis[3], axis[4]); @@ -139,7 +139,7 @@ ir::Tensor Compute(const std::vector &domain, PADDLE_ENFORCE_EQ(axis.size(), 6, ::common::errors::InvalidArgument( - "The size of axis vector is incorrect" + "The size of axis vector is incorrect. " "Expected value is 6, but receive %d. ", axis.size())); return fn(axis[0], axis[1], axis[2], axis[3], axis[4], axis[5]); diff --git a/paddle/common/flags.cc b/paddle/common/flags.cc index 92f81f256424d8..7f00be545923e1 100644 --- a/paddle/common/flags.cc +++ b/paddle/common/flags.cc @@ -63,7 +63,7 @@ PHI_DEFINE_EXPORTED_int32(paddle_num_threads, */ PHI_DEFINE_EXPORTED_int32(low_precision_op_list, 0, - "Setting the level of low precision op" + "Setting the level of low precision op " "list printing. It will be return the " "low precision op list of current module."); @@ -171,8 +171,8 @@ PHI_DEFINE_EXPORTED_string( "This option is useful when doing multi process training and " "each process have only one device (GPU). If you want to use " "all visible devices, set this to empty string. NOTE: the " - "reason of doing this is that we want to use P2P communication" - "between GPU devices, use CUDA_VISIBLE_DEVICES can only use" + "reason of doing this is that we want to use P2P communication " + "between GPU devices, use CUDA_VISIBLE_DEVICES can only use " "share-memory only."); #endif @@ -759,7 +759,7 @@ PHI_DEFINE_EXPORTED_int32( 0, "The maximum number of inplace grad_add. When doing " "gradient accumulation, if the number of gradients need to that " - "less FLAGS_max_inplace_grad_add, than it will be use several grad_add" + "less FLAGS_max_inplace_grad_add, than it will be use several grad_add " "instead of sum. Default is 0."); /** @@ -1360,7 +1360,7 @@ PHI_DEFINE_EXPORTED_bool( false, "Doing memory benchmark. It will make deleting scope synchronized, " "and add some memory usage logs." - "Default cuda is asynchronous device, set to True will" + "Default cuda is asynchronous device, set to True will " "force op run in synchronous mode."); PHI_DEFINE_EXPORTED_bool(eager_communication_connection, diff --git a/paddle/fluid/distributed/collective/bkcl_tools.h b/paddle/fluid/distributed/collective/bkcl_tools.h index 31d1f7ee42e287..ba984a10099a6a 100644 --- a/paddle/fluid/distributed/collective/bkcl_tools.h +++ b/paddle/fluid/distributed/collective/bkcl_tools.h @@ -72,7 +72,7 @@ class XPUEventManager { PADDLE_ENFORCE_EQ(device_index, device_index_, common::errors::PreconditionNotMet( - "XPUContext's device %d does not match" + "XPUContext's device %d does not match. " "Event's device %d", device_index, device_index_)); diff --git a/paddle/fluid/distributed/collective/reducer.cc b/paddle/fluid/distributed/collective/reducer.cc index cf6d8211eedd14..a387e43a336f00 100644 --- a/paddle/fluid/distributed/collective/reducer.cc +++ b/paddle/fluid/distributed/collective/reducer.cc @@ -753,13 +753,13 @@ void EagerReducer::PrepareForBackward(const std::vector &outputs) { } void EagerReducer::AddDistHook(size_t var_index) { - PADDLE_ENFORCE_LT( - var_index, - variable_locators_.size(), - common::errors::OutOfRange("Out of bounds variable index. it must be less" - "than %d, but it is %d", - variable_locators_.size(), - var_index)); + PADDLE_ENFORCE_LT(var_index, + variable_locators_.size(), + common::errors::OutOfRange( + "Out of bounds variable index. it must be less " + "than %d, but it is %d", + variable_locators_.size(), + var_index)); // gradient synchronization is not required when grad_need_hooks_ is false. if (!grad_need_hooks_) { diff --git a/paddle/fluid/eager/backward.cc b/paddle/fluid/eager/backward.cc index 8bd7d0a5ed528d..01de85e3b69fbf 100644 --- a/paddle/fluid/eager/backward.cc +++ b/paddle/fluid/eager/backward.cc @@ -186,7 +186,7 @@ std::vector RunBackward( PADDLE_ENFORCE( grad_tensors.size() == tensors.size(), common::errors::Fatal( - "Detected size mismatch between tensors and grad_tensors" + "Detected size mismatch between tensors and grad_tensors, " "grad_tensors should either have " "size = 0 or same size as tensors.")); // Feed given tensor if it's provided @@ -419,7 +419,7 @@ std::vector RunBackward( PADDLE_ENFORCE( node_in_degree_map[next_node] >= 0, common::errors::Fatal( - "Detected in-degree value smaller than zero. For Node: %s" + "Detected in-degree value smaller than zero. For Node: %s, " "Node's in-degree cannot be negative.", next_node->name()));