Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/pir/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ class OpTransInfo {
"pool2d",
"pool2d_grad",
"pool3d",
"pool3d_grad"
"pool3d_grad",
"split",
"matmul",
"matmul_grad",
Expand Down
45 changes: 23 additions & 22 deletions paddle/cinn/hlir/op/custom_call.cc
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ std::vector<ir::Expr> CustomCallArgsForCublas(
(x_num_col_dims > 0 && y_num_col_dims > 0),
true,
::common::errors::InvalidArgument(
"x_num_col_dims and y_num_cole_dims should both be 0 or positive"
"x_num_col_dims and y_num_cole_dims should both be 0 or positive, "
"now x_num_col_dims is %d and y_num_col_dims is %d",
x_num_col_dims,
y_num_col_dims));
Expand Down Expand Up @@ -634,13 +634,13 @@ std::vector<ir::Expr> CustomCallArgsForCudnnConvBackwardData(
PADDLE_ENFORCE_EQ(
attr_store.count("padding"),
true,
::common::errors::NotFound("The CudnnConvBackwardData custom_call"
::common::errors::NotFound("The CudnnConvBackwardData custom_call "
"must has attribute \"padding\""));
auto padding = std::get<std::vector<int>>(attr_store.at("padding"));
PADDLE_ENFORCE_EQ(
attr_store.count("stride"),
true,
::common::errors::NotFound("The CudnnConvBackwardData custom_call"
::common::errors::NotFound("The CudnnConvBackwardData custom_call "
"must has attribute \"stride\""));
auto stride = std::get<std::vector<int>>(attr_store.at("stride"));
auto dilation = attr_store.count("dilation")
Expand Down Expand Up @@ -716,13 +716,13 @@ std::vector<ir::Expr> CustomCallArgsForCudnnConvBackwardFilter(
PADDLE_ENFORCE_EQ(
attr_store.count("padding"),
true,
::common::errors::NotFound("The CudnnConvBackwardFilter custom_call"
::common::errors::NotFound("The CudnnConvBackwardFilter custom_call "
"must has attribute \"padding\""));
auto padding = std::get<std::vector<int>>(attr_store.at("padding"));
PADDLE_ENFORCE_EQ(
attr_store.count("stride"),
true,
::common::errors::NotFound("The CudnnConvBackwardFilter custom_call"
::common::errors::NotFound("The CudnnConvBackwardFilter custom_call "
"must has attribute \"stride\""));
auto stride = std::get<std::vector<int>>(attr_store.at("stride"));
auto dilation = attr_store.count("dilation")
Expand Down Expand Up @@ -799,31 +799,31 @@ std::vector<ir::Expr> CustomCallArgsForCudnnPoolForward(
PADDLE_ENFORCE_EQ(
attr_store.count("kernel_size"),
true,
::common::errors::NotFound("The CudnnPoolForward custom_call"
::common::errors::NotFound("The CudnnPoolForward custom_call "
"must has attribute \"kernel_size\""));
auto kernel = std::get<std::vector<int>>(attr_store.at("kernel_size"));
PADDLE_ENFORCE_EQ(
attr_store.count("padding_size"),
true,
::common::errors::NotFound("The CudnnPoolForward custom_call"
::common::errors::NotFound("The CudnnPoolForward custom_call "
"must has attribute \"padding_size\""));
auto padding = std::get<std::vector<int>>(attr_store.at("padding_size"));
PADDLE_ENFORCE_EQ(
attr_store.count("stride_size"),
true,
::common::errors::NotFound("The CudnnPoolForward custom_call"
::common::errors::NotFound("The CudnnPoolForward custom_call "
"must has attribute \"stride_size\""));
auto stride = std::get<std::vector<int>>(attr_store.at("stride_size"));
PADDLE_ENFORCE_EQ(
attr_store.count("pool_type"),
true,
::common::errors::NotFound("The CudnnPoolForward custom_call"
::common::errors::NotFound("The CudnnPoolForward custom_call "
"must has attribute \"pool_type\""));
auto pool_type = std::get<std::string>(attr_store.at("pool_type"));
PADDLE_ENFORCE_EQ(
attr_store.count("data_format"),
true,
::common::errors::NotFound("The CudnnPoolForward custom_call"
::common::errors::NotFound("The CudnnPoolForward custom_call "
"must has attribute \"data_format\""));
std::string data_format = std::get<std::string>(attr_store.at("data_format"));

Expand Down Expand Up @@ -893,31 +893,31 @@ std::vector<ir::Expr> CustomCallArgsForCudnnPoolBackward(
PADDLE_ENFORCE_EQ(
attr_store.count("kernel_size"),
true,
::common::errors::NotFound("The CudnnPoolBackward custom_call"
::common::errors::NotFound("The CudnnPoolBackward custom_call "
"must has attribute \"kernel_size\""));
auto kernel = std::get<std::vector<int>>(attr_store.at("kernel_size"));
PADDLE_ENFORCE_EQ(
attr_store.count("padding_size"),
true,
::common::errors::NotFound("The CudnnPoolBackward custom_call"
::common::errors::NotFound("The CudnnPoolBackward custom_call "
"must has attribute \"padding_size\""));
auto padding = std::get<std::vector<int>>(attr_store.at("padding_size"));
PADDLE_ENFORCE_EQ(
attr_store.count("stride_size"),
true,
::common::errors::NotFound("The CudnnPoolBackward custom_call"
::common::errors::NotFound("The CudnnPoolBackward custom_call "
"must has attribute \"stride_size\""));
auto stride = std::get<std::vector<int>>(attr_store.at("stride_size"));
PADDLE_ENFORCE_EQ(
attr_store.count("pool_type"),
true,
::common::errors::NotFound("The CudnnPoolBackward custom_call"
::common::errors::NotFound("The CudnnPoolBackward custom_call "
"must has attribute \"pool_type\""));
auto pool_type = std::get<std::string>(attr_store.at("pool_type"));
PADDLE_ENFORCE_EQ(
attr_store.count("data_format"),
true,
::common::errors::NotFound("The CudnnPoolBackward custom_call"
::common::errors::NotFound("The CudnnPoolBackward custom_call "
"must has attribute \"data_format\""));
std::string data_format =
std::get<std::string>(attrs.attr_store.at("data_format"));
Expand Down Expand Up @@ -1124,21 +1124,22 @@ std::vector<ir::Expr> CustomCallArgsForTriangularSolve(
PADDLE_ENFORCE_EQ(
attr_store.count("left_side"),
true,
::common::errors::NotFound("The TriangularSolve custom_call"
::common::errors::NotFound("The TriangularSolve custom_call "
"must has attribute \"left_side\""));
PADDLE_ENFORCE_EQ(attr_store.count("upper"),
true,
::common::errors::NotFound("The TriangularSolve custom_call"
"must has attribute \"upper\""));
PADDLE_ENFORCE_EQ(
attr_store.count("upper"),
true,
::common::errors::NotFound("The TriangularSolve custom_call "
"must has attribute \"upper\""));
PADDLE_ENFORCE_EQ(
attr_store.count("transpose_a"),
true,
::common::errors::NotFound("The TriangularSolve custom_call"
::common::errors::NotFound("The TriangularSolve custom_call "
"must has attribute \"transpose_a\""));
PADDLE_ENFORCE_EQ(
attr_store.count("unit_diagonal"),
true,
::common::errors::NotFound("The TriangularSolve custom_call"
::common::errors::NotFound("The TriangularSolve custom_call "
"must has attribute \"unit_diagonal\""));

ir::Tensor a = inputs[0];
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/op/elementwise.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ std::shared_ptr<OpStrategy> StrategyForElementwise(
PADDLE_ENFORCE_EQ(
pack_args.size(),
2U,
::common::errors::InvalidArgument("the size of pack_args should be"
::common::errors::InvalidArgument("the size of pack_args should be "
"equal to 2, but got %d.",
pack_args.size()));
PADDLE_ENFORCE_EQ(pack_args[1].is_string(),
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/op/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ std::shared_ptr<OpStrategy> StrategyForConv2d(
PADDLE_ENFORCE(out.size() == 3U || out.size() == 2U || out.size() == 5U ||
out.size() == 12U,
::common::errors::InvalidArgument(
"The output tensor sizes of conv2d op in conv2d op"
"The output tensor sizes of conv2d op in conv2d op "
"should be 2 or 3 or 5."));

*ret = CINNValuePack{res};
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/lang/builtin.cc
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ Expr min_value(const Type& type) {
PADDLE_ENFORCE_EQ(type.lanes(),
1,
::common::errors::InvalidArgument(
"The value of min type's lanes is incorrect"
"The value of min type's lanes is incorrect. "
"Expected value is 1, but receive %d. ",
type.lanes()));
#define FOR_CASE(type__) \
Expand All @@ -183,7 +183,7 @@ Expr max_value(const Type& type) {
PADDLE_ENFORCE_EQ(type.lanes(),
1,
::common::errors::InvalidArgument(
"The value of max type's lanes is incorrect"
"The value of max type's lanes is incorrect. "
"Expected value is 1, but receive %d. ",
type.lanes()));

Expand Down Expand Up @@ -213,7 +213,7 @@ Expr Epsilon(const Type& type) {
PADDLE_ENFORCE_EQ(type.lanes(),
1,
::common::errors::InvalidArgument(
"The value of epsilon type's lanes is incorrect"
"The value of epsilon type's lanes is incorrect. "
"Expected value is 1, but receive %d. ",
type.lanes()));

Expand Down Expand Up @@ -280,7 +280,7 @@ Expr Infinity(const Type& type) {
PADDLE_ENFORCE_EQ(type.lanes(),
1U,
::common::errors::InvalidArgument(
"The value of infinity type's lanes is incorrect"
"The value of infinity type's lanes is incorrect. "
"Expected value is 1, but receive %d. ",
type.lanes()));
if (type.is_float()) {
Expand Down
12 changes: 6 additions & 6 deletions paddle/cinn/lang/compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ ir::Tensor Compute(const std::vector<Expr> &domain,
PADDLE_ENFORCE_EQ(axis.size(),
1,
::common::errors::InvalidArgument(
"The size of axis vector is incorrect"
"The size of axis vector is incorrect. "
"Expected value is 1, but receive %d. ",
axis.size()));
return fn(axis[0]);
Expand All @@ -63,7 +63,7 @@ ir::Tensor Compute(const std::vector<Expr> &domain,
PADDLE_ENFORCE_EQ(axis.size(),
2,
::common::errors::InvalidArgument(
"The size of axis vector is incorrect"
"The size of axis vector is incorrect. "
"Expected value is 2, but receive %d. ",
axis.size()));
return fn(axis[0], axis[1]);
Expand All @@ -82,7 +82,7 @@ ir::Tensor Compute(const std::vector<Expr> &domain,
PADDLE_ENFORCE_EQ(axis.size(),
3,
::common::errors::InvalidArgument(
"The size of axis vector is incorrect"
"The size of axis vector is incorrect. "
"Expected value is 3, but receive %d. ",
axis.size()));
return fn(axis[0], axis[1], axis[2]);
Expand All @@ -101,7 +101,7 @@ ir::Tensor Compute(const std::vector<Expr> &domain,
PADDLE_ENFORCE_EQ(axis.size(),
4,
::common::errors::InvalidArgument(
"The size of axis vector is incorrect"
"The size of axis vector is incorrect. "
"Expected value is 4, but receive %d. ",
axis.size()));
return fn(axis[0], axis[1], axis[2], axis[3]);
Expand All @@ -120,7 +120,7 @@ ir::Tensor Compute(const std::vector<Expr> &domain,
PADDLE_ENFORCE_EQ(axis.size(),
5,
::common::errors::InvalidArgument(
"The size of axis vector is incorrect"
"The size of axis vector is incorrect. "
"Expected value is 5, but receive %d. ",
axis.size()));
return fn(axis[0], axis[1], axis[2], axis[3], axis[4]);
Expand All @@ -139,7 +139,7 @@ ir::Tensor Compute(const std::vector<Expr> &domain,
PADDLE_ENFORCE_EQ(axis.size(),
6,
::common::errors::InvalidArgument(
"The size of axis vector is incorrect"
"The size of axis vector is incorrect. "
"Expected value is 6, but receive %d. ",
axis.size()));
return fn(axis[0], axis[1], axis[2], axis[3], axis[4], axis[5]);
Expand Down
10 changes: 5 additions & 5 deletions paddle/common/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ PHI_DEFINE_EXPORTED_int32(paddle_num_threads,
*/
PHI_DEFINE_EXPORTED_int32(low_precision_op_list,
0,
"Setting the level of low precision op"
"Setting the level of low precision op "
"list printing. It will be return the "
"low precision op list of current module.");

Expand Down Expand Up @@ -171,8 +171,8 @@ PHI_DEFINE_EXPORTED_string(
"This option is useful when doing multi process training and "
"each process have only one device (GPU). If you want to use "
"all visible devices, set this to empty string. NOTE: the "
"reason of doing this is that we want to use P2P communication"
"between GPU devices, use CUDA_VISIBLE_DEVICES can only use"
"reason of doing this is that we want to use P2P communication "
"between GPU devices, use CUDA_VISIBLE_DEVICES can only use "
"share-memory only.");
#endif

Expand Down Expand Up @@ -759,7 +759,7 @@ PHI_DEFINE_EXPORTED_int32(
0,
"The maximum number of inplace grad_add. When doing "
"gradient accumulation, if the number of gradients need to that "
"less FLAGS_max_inplace_grad_add, than it will be use several grad_add"
"less FLAGS_max_inplace_grad_add, than it will be use several grad_add "
"instead of sum. Default is 0.");

/**
Expand Down Expand Up @@ -1360,7 +1360,7 @@ PHI_DEFINE_EXPORTED_bool(
false,
"Doing memory benchmark. It will make deleting scope synchronized, "
"and add some memory usage logs."
"Default cuda is asynchronous device, set to True will"
"Default cuda is asynchronous device, set to True will "
"force op run in synchronous mode.");

PHI_DEFINE_EXPORTED_bool(eager_communication_connection,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/collective/bkcl_tools.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class XPUEventManager {
PADDLE_ENFORCE_EQ(device_index,
device_index_,
common::errors::PreconditionNotMet(
"XPUContext's device %d does not match"
"XPUContext's device %d does not match. "
"Event's device %d",
device_index,
device_index_));
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -753,13 +753,13 @@ void EagerReducer::PrepareForBackward(const std::vector<Tensor> &outputs) {
}

void EagerReducer::AddDistHook(size_t var_index) {
PADDLE_ENFORCE_LT(
var_index,
variable_locators_.size(),
common::errors::OutOfRange("Out of bounds variable index. it must be less"
"than %d, but it is %d",
variable_locators_.size(),
var_index));
PADDLE_ENFORCE_LT(var_index,
variable_locators_.size(),
common::errors::OutOfRange(
"Out of bounds variable index. it must be less "
"than %d, but it is %d",
variable_locators_.size(),
var_index));

// gradient synchronization is not required when grad_need_hooks_ is false.
if (!grad_need_hooks_) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/eager/backward.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ std::vector<paddle::Tensor> RunBackward(
PADDLE_ENFORCE(
grad_tensors.size() == tensors.size(),
common::errors::Fatal(
"Detected size mismatch between tensors and grad_tensors"
"Detected size mismatch between tensors and grad_tensors, "
"grad_tensors should either have "
"size = 0 or same size as tensors."));
// Feed given tensor if it's provided
Expand Down Expand Up @@ -419,7 +419,7 @@ std::vector<paddle::Tensor> RunBackward(
PADDLE_ENFORCE(
node_in_degree_map[next_node] >= 0,
common::errors::Fatal(
"Detected in-degree value smaller than zero. For Node: %s"
"Detected in-degree value smaller than zero. For Node: %s, "
"Node's in-degree cannot be negative.",
next_node->name()));

Expand Down