diff --git a/paddle/cinn/frontend/op_mappers/paddle/arg_min_max.cc b/paddle/cinn/frontend/op_mappers/paddle/arg_min_max.cc index dbae77e4c71182..991c18a827d54c 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/arg_min_max.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/arg_min_max.cc @@ -15,6 +15,7 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" #include "paddle/cinn/frontend/var_type_utils.h" +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { @@ -47,24 +48,39 @@ Variable ArgImpl(NetBuilder* builder, template void ArgOpMapperHelper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("The input of Argmax/Argmin op must be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument( + "The output of Argmax/Argmin op must be 1.")); auto out_name = op_desc.Output("Out").front(); auto x = ctx.GetVar(x_name); auto axis = utils::GetAttrOrDefault(op_desc, "axis", -1); - CHECK(op_desc.HasAttr("axis")) - << "Argmax/Argmin op should has attribute \"axis\"! Please check."; + PADDLE_ENFORCE_EQ( + op_desc.HasAttr("axis"), + true, + phi::errors::InvalidArgument("Argmax/Argmin op should has attribute " + "\"axis\"! Please check.")); auto keepdims = utils::GetAttrOrDefault(op_desc, "keepdims", false); - CHECK(op_desc.HasAttr("keepdims")) - << "Argmax/Argmin op should has attribute \"keepdims\"! Please check."; + PADDLE_ENFORCE_EQ( + op_desc.HasAttr("keepdims"), + true, + phi::errors::InvalidArgument("Argmax/Argmin op should has attribute" + " \"keepdims\"! Please check.")); auto flatten = utils::GetAttrOrDefault(op_desc, "flatten", false); - CHECK(op_desc.HasAttr("flatten")) - << "Argmax/Argmin op should has attribute \"flatten\"! Please check."; + PADDLE_ENFORCE_EQ( + op_desc.HasAttr("flatten"), + true, + phi::errors::InvalidArgument("Argmax/Argmin op should has attribute" + " \"flatten\"! Please check.")); auto dtype = utils::GetPaddleDtype( op_desc, "dtype", paddle::cpp::VarDescAPI::Type::INT64); diff --git a/paddle/cinn/frontend/op_mappers/paddle/argsort.cc b/paddle/cinn/frontend/op_mappers/paddle/argsort.cc index b861c7ed407a5f..71a1b20250cb7b 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/argsort.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/argsort.cc @@ -17,20 +17,29 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" #include "paddle/cinn/utils/string.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void ArgsortOpMapper(const paddle::cpp::OpDesc& op_desc, const cinn::frontend::OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("The input of Argmax/Argmin op must be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument( + "The output of Argmax/Argmin op must be 1.")); auto out_name = op_desc.Output("Out").front(); - CHECK_EQ(op_desc.Output("Indices").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Indices").size(), + 1UL, + phi::errors::InvalidArgument( + "The output of Argmax/Argmin op must be 1.")); auto indices_name = op_desc.Output("Indices").front(); auto is_ascend = diff --git a/paddle/cinn/frontend/op_mappers/paddle/atan.cc b/paddle/cinn/frontend/op_mappers/paddle/atan.cc index 961e643ae88e16..997aa37a8c6c3d 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/atan.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/atan.cc @@ -17,18 +17,27 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" #include "paddle/cinn/utils/string.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void Atan2OpMapper(const paddle::cpp::OpDesc& op_desc, const cinn::frontend::OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X1").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X1").size(), + 1UL, + phi::errors::InvalidArgument("The input of Atan2 op must be 1.")); auto x1_name = op_desc.Input("X1").front(); - CHECK_EQ(op_desc.Input("X2").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X2").size(), + 1UL, + phi::errors::InvalidArgument("The input of Atan2 op must be 1.")); auto x2_name = op_desc.Input("X2").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of Atan2 op must be 1.")); auto out_name = op_desc.Output("Out").front(); auto x1 = ctx.GetVar(x1_name); diff --git a/paddle/cinn/frontend/op_mappers/paddle/batchnorm.cc b/paddle/cinn/frontend/op_mappers/paddle/batchnorm.cc index af93bf23db0c2e..ca6aa7a98baf57 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/batchnorm.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/batchnorm.cc @@ -14,7 +14,7 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { @@ -29,7 +29,10 @@ void BatchNormOpMapper(const paddle::cpp::OpDesc& op_desc, << op_desc.Type(); return; } - CHECK_EQ(op_desc.Output(pd_param_name).size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output(pd_param_name).size(), + 1UL, + phi::errors::InvalidArgument("The output of batch_norm op must be 1.")); auto output_name = op_desc.Output(pd_param_name).front(); VLOG(4) << "The " << op_desc.Type() << "'s output " << pd_param_name @@ -39,15 +42,30 @@ void BatchNormOpMapper(const paddle::cpp::OpDesc& op_desc, ctx.AddVarModelToProgram(output_name, out->id, can_inplace); }; - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("The input of batch_norm op must be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Input("Scale").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Scale").size(), + 1UL, + phi::errors::InvalidArgument("The input of batch_norm op must be 1.")); auto scale_name = op_desc.Input("Scale").front(); - CHECK_EQ(op_desc.Input("Bias").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Bias").size(), + 1UL, + phi::errors::InvalidArgument("The input of batch_norm op must be 1.")); auto bias_name = op_desc.Input("Bias").front(); - CHECK_EQ(op_desc.Input("Mean").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Mean").size(), + 1UL, + phi::errors::InvalidArgument("The input of batch_norm op must be 1.")); auto mean_name = op_desc.Input("Mean").front(); - CHECK_EQ(op_desc.Input("Variance").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Variance").size(), + 1UL, + phi::errors::InvalidArgument("The input of batch_norm op must be 1.")); auto variance_name = op_desc.Input("Variance").front(); auto epsilon = utils::GetAttrOrDefault(op_desc, "epsilon", 1e-5f); @@ -105,8 +123,11 @@ void BatchNormOpMapper(const paddle::cpp::OpDesc& op_desc, add_output("VarianceOut", variance_out, true); } else { VLOG(4) << "Invoke batch_norm OpMapper with train mode"; - CHECK_EQ(outs.size(), 5U) - << "batch_norm in train mode should only has 5 output! Please check."; + PADDLE_ENFORCE_EQ(outs.size(), + 5U, + phi::errors::InvalidArgument( + "batch_norm in train mode should only has 5 output! " + "Please check.")); add_output("Y", outs[0]); add_output("SavedMean", outs[1]); @@ -122,7 +143,10 @@ void BatchNormGradOpMapper(const paddle::cpp::OpDesc& op_desc, std::unordered_map input_names_map; auto get_input_var = [&op_desc, &ctx, &input_names_map](const std::string& op_name) { - CHECK_EQ(op_desc.Input(op_name).size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input(op_name).size(), + 1UL, + phi::errors::InvalidArgument( + "The input of batch_norm_grad op must be 1.")); auto var_name = op_desc.Input(op_name).front(); input_names_map.emplace(op_name, var_name); return ctx.GetVar(var_name); @@ -132,12 +156,17 @@ void BatchNormGradOpMapper(const paddle::cpp::OpDesc& op_desc, auto get_output_name = [&op_desc, &output_names_map](const std::string& op_name) -> std::string { if (op_desc.Output(op_name).empty()) { - CHECK_NE(op_name, paddle::GradVarName("X")) - << "The input X should not empty."; + PADDLE_ENFORCE_NE( + op_name, + paddle::GradVarName("X"), + phi::errors::InvalidArgument("The input X should not empty.")); return ""; } - CHECK_EQ(op_desc.Output(op_name).size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output(op_name).size(), + 1UL, + phi::errors::InvalidArgument( + "The output of batch_norm_grad op must be 1.")); auto var_name = op_desc.Output(op_name).front(); output_names_map.emplace(op_name, var_name); return var_name; @@ -174,8 +203,10 @@ void BatchNormGradOpMapper(const paddle::cpp::OpDesc& op_desc, // batch norm grad, output(grad_x, grad_scale, grad_bias) auto outs = ctx.Builder()->BatchNormGrad( dy, x, scale, saved_mean, saved_variance, epsilon, data_layout); - CHECK_EQ(outs.size(), 3ul) - << "batch_norm_grad APIs should return 3 Variable!"; + PADDLE_ENFORCE_EQ(outs.size(), + 3ul, + phi::errors::InvalidArgument( + "batch_norm_grad APIs should return 3 Variable!")); for (int i = 0; i < outs.size(); i++) { if (output_names[i].empty()) { diff --git a/paddle/cinn/frontend/op_mappers/paddle/binary.cc b/paddle/cinn/frontend/op_mappers/paddle/binary.cc index 02016c28354394..bc416bbc616257 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/binary.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/binary.cc @@ -14,25 +14,34 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { -#define BINARY_OPMAPPER_FUNCTION(OP_NAME) \ - void OP_NAME##OpMapper(const paddle::cpp::OpDesc& op_desc, \ - const OpMapperContext& ctx) { \ - CHECK_EQ(op_desc.Input("X").size(), 1UL); \ - auto x_name = op_desc.Input("X").front(); \ - CHECK_EQ(op_desc.Input("Y").size(), 1UL); \ - auto y_name = op_desc.Input("Y").front(); \ - CHECK_EQ(op_desc.Output("Out").size(), 1UL); \ - auto out_name = op_desc.Output("Out").front(); \ - auto x = ctx.GetVar(x_name); \ - auto y = ctx.GetVar(y_name); \ - auto out = ctx.Builder()->OP_NAME(x, y); \ - ctx.AddVar(out_name, out); \ - ctx.AddVarModelToProgram(out_name, out->id); \ +#define BINARY_OPMAPPER_FUNCTION(OP_NAME) \ + void OP_NAME##OpMapper(const paddle::cpp::OpDesc& op_desc, \ + const OpMapperContext& ctx) { \ + PADDLE_ENFORCE_EQ( \ + op_desc.Input("X").size(), \ + 1UL, \ + phi::errors::InvalidArgument("The input of op must be 1.")); \ + auto x_name = op_desc.Input("X").front(); \ + PADDLE_ENFORCE_EQ( \ + op_desc.Input("Y").size(), \ + 1UL, \ + phi::errors::InvalidArgument("The input of op must be 1.")); \ + auto y_name = op_desc.Input("Y").front(); \ + PADDLE_ENFORCE_EQ( \ + op_desc.Output("Out").size(), \ + 1UL, \ + phi::errors::InvalidArgument("The output of op must be 1.")); \ + auto out_name = op_desc.Output("Out").front(); \ + auto x = ctx.GetVar(x_name); \ + auto y = ctx.GetVar(y_name); \ + auto out = ctx.Builder()->OP_NAME(x, y); \ + ctx.AddVar(out_name, out); \ + ctx.AddVarModelToProgram(out_name, out->id); \ } BINARY_OPMAPPER_FUNCTION(LogicalAnd) diff --git a/paddle/cinn/frontend/op_mappers/paddle/cholesky.cc b/paddle/cinn/frontend/op_mappers/paddle/cholesky.cc index 51c8a2fb1dc531..325c9d27d7facc 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/cholesky.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/cholesky.cc @@ -14,16 +14,22 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void CholeskyOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("The input of cholesky op must be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of cholesky op must be 1.")); auto out_name = op_desc.Output("Out").front(); auto upper = utils::GetAttrOrDefault(op_desc, "upper", false); diff --git a/paddle/cinn/frontend/op_mappers/paddle/clip.cc b/paddle/cinn/frontend/op_mappers/paddle/clip.cc index f060ec4175fc99..84dd148908b0d0 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/clip.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/clip.cc @@ -14,28 +14,37 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void ClipOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("The input of clip op must be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of clip op must be 1.")); auto out_name = op_desc.Output("Out").front(); auto x = ctx.GetVar(x_name); auto builder = ctx.Builder(); if (op_desc.HasInput("Min") && op_desc.Input("Min").size() > 0) { - CHECK_EQ(op_desc.Input("Min").size(), 1) - << "clip op should have only one input for Min"; + PADDLE_ENFORCE_EQ(op_desc.Input("Min").size(), + 1UL, + phi::errors::InvalidArgument( + "clip op should have only one input for Min")); auto min_val_name = op_desc.Input("Min").front(); auto min_val_tensor = ctx.GetVar(min_val_name); - CHECK(min_val_tensor->shape == cinn::utils::ShapeType{1}) - << "The [Min] tensor shape of clip op should be [1], but here [" - << cinn::utils::Join(min_val_tensor->shape, ", ") << "]"; + PADDLE_ENFORCE_EQ(min_val_tensor->shape == cinn::utils::ShapeType{1}, + true, + phi::errors::InvalidArgument( + "The [Min] tensor shape of clip op should be [1].")); if (x->type != min_val_tensor->type) { min_val_tensor = builder->Cast(min_val_tensor, cinn::common::Type2Str(x->type)); @@ -43,8 +52,11 @@ void ClipOpMapper(const paddle::cpp::OpDesc& op_desc, min_val_tensor = builder->BroadcastTo(min_val_tensor, x->shape); x = builder->Max(x, min_val_tensor); } else { - CHECK(op_desc.HasAttr("min")) - << "The clip op should has [min] attribute or [Min] tensor input."; + PADDLE_ENFORCE_EQ( + op_desc.HasAttr("min"), + true, + phi::errors::InvalidArgument( + "The clip op should has [min] attribute or [Min] tensor input.")); auto min_value = op_desc.GetAttr("min"); auto min_val_tensor = builder->FillConstant(x->shape, @@ -55,13 +67,16 @@ void ClipOpMapper(const paddle::cpp::OpDesc& op_desc, } if (op_desc.HasInput("Max") && op_desc.Input("Max").size() > 0) { - CHECK_EQ(op_desc.Input("Max").size(), 1) - << "clip op should have only one input for Max"; + PADDLE_ENFORCE_EQ(op_desc.Input("Max").size(), + 1UL, + phi::errors::InvalidArgument( + "clip op should have only one input for Max")); auto max_val_name = op_desc.Input("Max").front(); auto max_val_tensor = ctx.GetVar(max_val_name); - CHECK(max_val_tensor->shape == cinn::utils::ShapeType{1}) - << "The [Max] tensor shape of clip op should be [1], but here [" - << cinn::utils::Join(max_val_tensor->shape, ", ") << "]"; + PADDLE_ENFORCE_EQ(max_val_tensor->shape == cinn::utils::ShapeType{1}, + true, + phi::errors::InvalidArgument( + "The [Max] tensor shape of clip op should be [1].")); if (x->type != max_val_tensor->type) { max_val_tensor = builder->Cast(max_val_tensor, cinn::common::Type2Str(x->type)); @@ -69,8 +84,11 @@ void ClipOpMapper(const paddle::cpp::OpDesc& op_desc, max_val_tensor = builder->BroadcastTo(max_val_tensor, x->shape); x = builder->Min(x, max_val_tensor); } else { - CHECK(op_desc.HasAttr("max")) - << "The clip op should has [max] attribute or [Max] tensor input."; + PADDLE_ENFORCE_EQ( + op_desc.HasAttr("max"), + true, + phi::errors::InvalidArgument( + "The clip op should has [max] attribute or [Max] tensor input.")); auto max_value = op_desc.GetAttr("max"); auto max_val_tensor = builder->FillConstant(x->shape, diff --git a/paddle/cinn/frontend/op_mappers/paddle/compare.cc b/paddle/cinn/frontend/op_mappers/paddle/compare.cc index ecf0313de6d50b..83855bfcec43e5 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/compare.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/compare.cc @@ -14,7 +14,7 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { @@ -28,19 +28,29 @@ static const std::string& GetCompareDebugString(const std::string& compare_op) { {"Equal", " == "}, {"NotEqual", " != "}, }; - CHECK_GT(compare_debug_map.count(compare_op), 0) - << "Unsupported compare op " << compare_op; + PADDLE_ENFORCE_GT(compare_debug_map.count(compare_op), + 0, + phi::errors::InvalidArgument("Unsupported compare op")); return compare_debug_map[compare_op]; } #define COMPARE_OPMAPPER_FUNCTION(OP_NAME) \ void OP_NAME##OpMapper(const paddle::cpp::OpDesc& op_desc, \ const OpMapperContext& ctx) { \ - CHECK_EQ(op_desc.Input("X").size(), 1UL); \ + PADDLE_ENFORCE_EQ( \ + op_desc.Input("X").size(), \ + 1UL, \ + phi::errors::InvalidArgument("The input of op must be 1.")); \ auto x_name = op_desc.Input("X").front(); \ - CHECK_EQ(op_desc.Input("Y").size(), 1UL); \ + PADDLE_ENFORCE_EQ( \ + op_desc.Input("Y").size(), \ + 1UL, \ + phi::errors::InvalidArgument("The input of op must be 1.")); \ auto y_name = op_desc.Input("Y").front(); \ - CHECK_EQ(op_desc.Output("Out").size(), 1UL); \ + PADDLE_ENFORCE_EQ( \ + op_desc.Output("Out").size(), \ + 1UL, \ + phi::errors::InvalidArgument("The output of op must be 1.")); \ auto out_name = op_desc.Output("Out").front(); \ auto axis = utils::GetAttrOrDefault(op_desc, "axis", -1); \ VLOG(4) << out_name << " = " << x_name << GetCompareDebugString(#OP_NAME) \ diff --git a/paddle/cinn/frontend/op_mappers/paddle/concat.cc b/paddle/cinn/frontend/op_mappers/paddle/concat.cc index d7181f3ac1a60b..0372fcc9a40d69 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/concat.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/concat.cc @@ -16,16 +16,22 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void ConcatOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_GE(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_GE(op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument( + "The input of concat op must be at least 1.")); auto x_names = op_desc.Input("X"); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of concat op must be 1.")); auto out_name = op_desc.Output("Out").front(); auto axis = utils::GetAttrOrDefault(op_desc, "axis", 0); @@ -38,11 +44,10 @@ void ConcatOpMapper(const paddle::cpp::OpDesc& op_desc, auto err_x = std::find_if(xs.begin(), xs.end(), [&](Variable x) { return x->type != xs.front()->type; }); - CHECK(err_x == xs.end()) - << "All input's dtype of [concat] should be the same, be the input " - << (*err_x)->id << "'s dtype [" << (*err_x)->type - << "] not equal to the first input " << xs.front()->id << "'s dtype [" - << xs.front()->type << "]"; + PADDLE_ENFORCE_EQ(err_x == xs.end(), + true, + phi::errors::InvalidArgument( + "All input's dtype of [concat] should be the same.")); auto out = ctx.Builder()->Concat(xs, axis); @@ -52,15 +57,24 @@ void ConcatOpMapper(const paddle::cpp::OpDesc& op_desc, void StackOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_GE(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_GE(op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument( + "The input of stack op must be at least 1.")); auto x_names = op_desc.Input("X"); std::string out_name; if (op_desc.HasOutput("Out")) { - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of stack op must be 1.")); out_name = op_desc.Output("Out").front(); } else if (op_desc.HasOutput("Y")) { - CHECK_EQ(op_desc.Output("Y").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Y").size(), + 1UL, + phi::errors::InvalidArgument("The output of stack op must be 1.")); out_name = op_desc.Output("Y").front(); } else { PADDLE_THROW(phi::errors::InvalidArgument( @@ -77,12 +91,10 @@ void StackOpMapper(const paddle::cpp::OpDesc& op_desc, std::vector xs; for (const auto& name : x_names) { auto x = ctx.GetVar(name); - CHECK(x->shape == input_shape) - << "All input shape of [stack] should be the same, be the input " - << x->id << "'s shape [" << cinn::utils::Join(x->shape, ", ") - << "] not equal to " - << "the first input " << ctx.GetVar(x_names.front())->id << "'s shape [" - << cinn::utils::Join(input_shape, ", ") << "]"; + PADDLE_ENFORCE_EQ(x->shape == input_shape, + true, + phi::errors::InvalidArgument( + "All input shape of [stack] should be the same.")); xs.emplace_back(ctx.Builder()->Reshape(x, output_shape)); } @@ -90,11 +102,10 @@ void StackOpMapper(const paddle::cpp::OpDesc& op_desc, auto err_x = std::find_if(xs.begin(), xs.end(), [&](Variable x) { return x->type != xs.front()->type; }); - CHECK(err_x == xs.end()) - << "All input's dtype of [concat] should be the same, be the input " - << (*err_x)->id << "'s dtype [" << (*err_x)->type - << "] not equal to the first input " << xs.front()->id << "'s dtype [" - << xs.front()->type << "]"; + PADDLE_ENFORCE_EQ(err_x == xs.end(), + true, + phi::errors::InvalidArgument( + "All input's dtype of [stack] should be the same.")); auto concat_out = ctx.Builder()->Concat(xs, axis); @@ -104,10 +115,16 @@ void StackOpMapper(const paddle::cpp::OpDesc& op_desc, void SplitOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("The input of split op must be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_GE(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_GE(op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument( + "The output of split op must be at least 1.")); auto out_names = op_desc.Output("Out"); auto x = ctx.GetVar(x_name); @@ -129,38 +146,50 @@ void SplitOpMapper(const paddle::cpp::OpDesc& op_desc, CHECK(num != 0 || !sections.empty()) << "The [num_or_sections] in split op should not empty! Please check."; if (num != 0) { - CHECK(dim % num == 0) << "The num_or_sections:" << num - << " cannot divided by the split axis:" << axis - << " 's dimension:" << dim; + PADDLE_ENFORCE_EQ( + dim % num == 0, + true, + phi::errors::InvalidArgument( + "The num_or_sections cannot divided by the split axis")); sections.clear(); sections.resize(num, dim / num); } - CHECK_EQ(sections.size(), out_names.size()) - << "The output number of split op should be " << sections.size() - << ", but actual " << out_names.size(); + PADDLE_ENFORCE_EQ(sections.size(), + out_names.size(), + phi::errors::InvalidArgument( + "The output number of split op should be same")); int neg_idx = -1, sum = 0; for (int i = 0; i < sections.size(); ++i) { if (sections[i] < 0) { - CHECK_LT(neg_idx, 0) - << "The [num_or_sections] should only has one -1! But here " - << cinn::utils::Join(sections, ", "); + PADDLE_ENFORCE_LT( + neg_idx, + 0, + phi::errors::InvalidArgument( + "The [num_or_sections] should only has one -1! But here " + "found more than one.")); neg_idx = i; } else { sum += sections[i]; } } if (neg_idx > 0) { - CHECK_LT(sum, dim) << "The sum of [num_or_sections] should less than to " + PADDLE_ENFORCE_LT(sum, + dim, + phi::errors::InvalidArgument( + "The sum of [num_or_sections] should less than to " "the dimension of split [axis] when -1 " "found in [num_or_sections]! But here " - << cinn::utils::Join(sections, ", "); + "found more than one.")); sections[neg_idx] = dim - sum; } else { - CHECK_EQ(sum, dim) << "The sum of [num_or_sections] should equal to the " + PADDLE_ENFORCE_EQ(sum, + dim, + phi::errors::InvalidArgument( + "The sum of [num_or_sections] should equal to the " "dimension of split [axis]! But here " - << cinn::utils::Join(sections, ", "); + "found more than one.")); } auto outs = ctx.Builder()->Split(x, sections, axis); diff --git a/paddle/cinn/frontend/op_mappers/paddle/constant.cc b/paddle/cinn/frontend/op_mappers/paddle/constant.cc index dde799eef8e88e..822ae52329adc1 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/constant.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/constant.cc @@ -23,16 +23,21 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" #include "paddle/cinn/frontend/var_type_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void AssignOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("The input of op must be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of op must be 1.")); auto out_name = op_desc.Output("Out").front(); auto x = ctx.GetVar(x_name); @@ -44,9 +49,14 @@ void AssignOpMapper(const paddle::cpp::OpDesc& op_desc, void ShapeOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("Input").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), + 1UL, + phi::errors::InvalidArgument("The input of op must be 1.")); auto x_name = op_desc.Input("Input").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of op must be 1.")); auto out_name = op_desc.Output("Out").front(); auto x = ctx.GetVar(x_name); @@ -59,7 +69,10 @@ void ShapeOpMapper(const paddle::cpp::OpDesc& op_desc, void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of op must be 1.")); auto y_name = op_desc.Output("Out").front(); const auto& cinn_name = cinn::utils::TransValidVarName(y_name); @@ -80,7 +93,10 @@ void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, absl::optional out; if (op_desc.HasInput("ValueTensor") && !op_desc.Input("ValueTensor").empty()) { - CHECK_EQ(op_desc.Input("ValueTensor").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("ValueTensor").size(), + 1UL, + phi::errors::InvalidArgument("The input of ValueTensor should be 1.")); auto value_name = op_desc.Input("ValueTensor").front(); auto value_tensor = ctx.GetVar(value_name); @@ -88,9 +104,10 @@ void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, << " with shape (" << cinn::utils::Join(shape, ",") << ") and dtype [" << dtype << "]"; - CHECK(value_tensor->shape == cinn::utils::ShapeType{1}) - << "The shape of [ValueTensor] should be [1], but here [" - << cinn::utils::Join(value_tensor->shape, ", ") << "]"; + PADDLE_ENFORCE_EQ( + value_tensor->shape == cinn::utils::ShapeType{1}, + true, + phi::errors::InvalidArgument("The shape of ValueTensor should be 1.")); if (cinn::common::Type2Str(value_tensor->type) != dtype) { value_tensor = ctx.Builder()->Cast(value_tensor, dtype); } @@ -118,11 +135,16 @@ void FillConstantOpMapper(const paddle::cpp::OpDesc& op_desc, void FillAnyLikeOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("The input of op must be 1.")); auto x_name = op_desc.Input("X").front(); auto x = ctx.GetVar(x_name); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of op must be 1.")); auto y_name = op_desc.Output("Out").front(); auto shape = utils::ToShapeType(x->shape); @@ -163,7 +185,10 @@ std::pair IsArithmeticSequence(const std::vector& vec) { void AssignValueOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("The output of op must be 1.")); auto out_name = op_desc.Output("Out").front(); const auto& cinn_out_name = cinn::utils::TransValidVarName(out_name); diff --git a/paddle/cinn/frontend/op_mappers/paddle/conv2d.cc b/paddle/cinn/frontend/op_mappers/paddle/conv2d.cc index c44c77e6f0a1f0..65569883f9cfe6 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/conv2d.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/conv2d.cc @@ -15,19 +15,28 @@ #include "paddle/cinn/backends/cuda_util.h" #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void Conv2dOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("Input").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Input").size(), + 1UL, + phi::errors::InvalidArgument("Input size of conv2d op should be 1.")); auto x_name = op_desc.Input("Input").front(); - CHECK_EQ(op_desc.Input("Filter").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Filter").size(), + 1UL, + phi::errors::InvalidArgument("Filter size of conv2d op should be 1.")); auto y_name = op_desc.Input("Filter").front(); - CHECK_EQ(op_desc.Output("Output").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Output").size(), + 1UL, + phi::errors::InvalidArgument("Output size of conv2d op should be 1.")); auto out_name = op_desc.Output("Output").front(); auto strides = @@ -49,12 +58,16 @@ void Conv2dOpMapper(const paddle::cpp::OpDesc& op_desc, auto x = ctx.GetVar(x_name); auto y = ctx.GetVar(y_name); - CHECK_EQ(x->shape.size(), 4) << "CINN conv2d operator only support 4-D " - "tensor now, but Input's shape is [" - << cinn::utils::Join(x->shape, ", ") << "]"; - CHECK_EQ(y->shape.size(), 4) << "CINN conv2d operator only support 4-D " - "tensor now, but Filter's shape is [" - << cinn::utils::Join(y->shape, ", ") << "]"; + PADDLE_ENFORCE_EQ( + x->shape.size(), + 4UL, + phi::errors::InvalidArgument( + "CINN conv2d operator's x only support 4-D tensor now.")); + PADDLE_ENFORCE_EQ( + y->shape.size(), + 4, + phi::errors::InvalidArgument( + "CINN conv2d operator's y only support 4-D tensor now.")); if (data_format == "NHWC") { // the weight in paddle always be NCHW, but cudnn need the same as input, // transpose before @@ -82,12 +95,22 @@ void DepthwiseConv2dOpMapperImpl(common::UnknownArch, void DepthwiseConv2dOpMapperImpl(common::X86Arch, const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("Input").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), + 1UL, + phi::errors::InvalidArgument( + "Input size of depthwise_conv2d op should be 1.")); auto x_name = op_desc.Input("Input").front(); - CHECK_EQ(op_desc.Input("Filter").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), + 1UL, + phi::errors::InvalidArgument( + "Filter size of depthwise_conv2d op should be 1.")); + auto y_name = op_desc.Input("Filter").front(); - CHECK_EQ(op_desc.Output("Output").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Output").size(), + 1UL, + phi::errors::InvalidArgument( + "Output size of depthwise_conv2d op should be 1.")); auto out_name = op_desc.Output("Output").front(); auto strides = @@ -131,12 +154,21 @@ void DepthwiseConv2dOpMapperImpl(common::ARMArch, void DepthwiseConv2dOpMapperImpl(common::NVGPUArch, const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("Input").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), + 1UL, + phi::errors::InvalidArgument( + "Input size of depthwise_conv2d op should be 1.")); auto x_name = op_desc.Input("Input").front(); - CHECK_EQ(op_desc.Input("Filter").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), + 1UL, + phi::errors::InvalidArgument( + "Filter size of depthwise_conv2d op should be 1.")); auto y_name = op_desc.Input("Filter").front(); - CHECK_EQ(op_desc.Output("Output").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Output").size(), + 1UL, + phi::errors::InvalidArgument( + "Output size of depthwise_conv2d op should be 1.")); auto out_name = op_desc.Output("Output").front(); auto strides = @@ -190,24 +222,39 @@ void DepthwiseConv2dOpMapper(const paddle::cpp::OpDesc& op_desc, void Conv2dGradOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { // get dy - CHECK_EQ(op_desc.Input(paddle::GradVarName("Output")).size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input(paddle::GradVarName("Output")).size(), + 1UL, + phi::errors::InvalidArgument( + "Input size of conv2d_grad op should be 1.")); auto dy_name = op_desc.Input(paddle::GradVarName("Output")).front(); // get intput input,filter - CHECK_EQ(op_desc.Input("Input").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), + 1UL, + phi::errors::InvalidArgument( + "Input size of conv2d_grad op should be 1.")); auto x_name = op_desc.Input("Input").front(); - CHECK_EQ(op_desc.Input("Filter").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), + 1UL, + phi::errors::InvalidArgument( + "Filter size of conv2d_grad op should be 1.")); auto w_name = op_desc.Input("Filter").front(); // get d_x std::string dx_name; bool has_dx = !op_desc.Output(paddle::GradVarName("Input")).empty(); if (has_dx) { - CHECK_EQ(op_desc.Output(paddle::GradVarName("Input")).size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output(paddle::GradVarName("Input")).size(), + 1UL, + phi::errors::InvalidArgument( + "Output size of conv2d_grad op should be 1.")); dx_name = op_desc.Output(paddle::GradVarName("Input")).front(); } // get d_filter - CHECK_EQ(op_desc.Output(paddle::GradVarName("Filter")).size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output(paddle::GradVarName("Filter")).size(), + 1UL, + phi::errors::InvalidArgument( + "Output size of conv2d_grad op should be 1.")); auto dw_name = op_desc.Output(paddle::GradVarName("Filter")).front(); auto strides = @@ -231,17 +278,22 @@ void Conv2dGradOpMapper(const paddle::cpp::OpDesc& op_desc, auto x = ctx.GetVar(x_name); auto weight = ctx.GetVar(w_name); - CHECK_EQ(x->shape.size(), 4) << "CINN conv2d_grad operator only support 4-D " - "tensor now, but Input's shape is [" - << cinn::utils::Join(x->shape, ", ") << "]"; - CHECK_EQ(dy->shape.size(), 4) - << "CINN conv2d_grad operator only support 4-D tensor now, but " - << paddle::GradVarName("Output") << "'s shape is [" - << cinn::utils::Join(dy->shape, ", ") << "]"; - CHECK_EQ(weight->shape.size(), 4) - << "CINN conv2d_grad operator only support 4-D tensor now, but Filter's " - "shape is [" - << cinn::utils::Join(weight->shape, ", ") << "]"; + PADDLE_ENFORCE_EQ( + x->shape.size(), + 4UL, + phi::errors::InvalidArgument( + "CINN conv2d_grad operator's x only support 4-D tensor now.")); + PADDLE_ENFORCE_EQ( + dy->shape.size(), + 4UL, + phi::errors::InvalidArgument( + "CINN conv2d_grad operator's dy only support 4-D tensor now.")); + PADDLE_ENFORCE_EQ( + weight->shape.size(), + 4UL, + phi::errors::InvalidArgument( + "CINN conv2d_grad operator's weight only support 4-D tensor now.")); + if (data_format == "NHWC") { // the weight in paddle always be NCHW, but cudnn need the same as input, // transpose before diff --git a/paddle/cinn/frontend/op_mappers/paddle/cumsum.cc b/paddle/cinn/frontend/op_mappers/paddle/cumsum.cc index 3482bacded2216..5bd0de5cf5a2e8 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/cumsum.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/cumsum.cc @@ -15,17 +15,23 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" #include "paddle/cinn/frontend/var_type_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void CumsumOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of cumsum op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("Output(Out) of cumsum op should be 1.")); auto out_name = op_desc.Output("Out").front(); auto input = ctx.GetVar(x_name); diff --git a/paddle/cinn/frontend/op_mappers/paddle/dropout.cc b/paddle/cinn/frontend/op_mappers/paddle/dropout.cc index 0e88af83c718d6..b27b2396d3ed9a 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/dropout.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/dropout.cc @@ -14,16 +14,22 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void DropoutInferOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of dropout op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("Output(Out) of dropout op should be 1.")); auto out_name = op_desc.Output("Out").front(); auto dropout_prob = diff --git a/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc b/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc index 63f9316fc99907..48a7f0f10d90c5 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc @@ -17,7 +17,7 @@ #include "paddle/cinn/frontend/op_mappers/common_utils.h" #include "paddle/cinn/frontend/paddle/cpp/desc_api.h" #include "paddle/cinn/frontend/var_type_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { @@ -75,11 +75,20 @@ ELTWISE_SPEC(EltwiseType::kMin, NetBuilder::Min); void AddOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of add op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Input("Y").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Y").size(), + 1UL, + phi::errors::InvalidArgument("Input(Y) of add op should be 1.")); auto y_name = op_desc.Input("Y").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("Output(Out) of add op should be 1.")); auto out_name = op_desc.Output("Out").front(); VLOG(4) << out_name << " = " << x_name << " + " << y_name; @@ -96,11 +105,20 @@ template void ElementwiseOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { VLOG(5) << "Elementwise operator mapping type: " << static_cast(Type); - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of elementwise op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Input("Y").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Y").size(), + 1UL, + phi::errors::InvalidArgument("Input(Y) of elementwise op should be 1.")); auto y_name = op_desc.Input("Y").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument( + "Output(Out) of elementwise op should be 1.")); auto out_name = op_desc.Output("Out").front(); auto axis = utils::GetAttrOrDefault(op_desc, "axis", -1); @@ -118,11 +136,21 @@ void ElementwiseOpMapper(const paddle::cpp::OpDesc& op_desc, void ElementwiseAddGradOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument( + "Input(X) of elementwise_add_grad op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Input("Y").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), + 1UL, + phi::errors::InvalidArgument( + "Input(Y) of elementwise_add_grad op should be 1.")); auto y_name = op_desc.Input("Y").front(); - CHECK_EQ(op_desc.Input(paddle::GradVarName("Out")).size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input(paddle::GradVarName("Out")).size(), + 1UL, + phi::errors::InvalidArgument( + "Input(Out@GRAD) of elementwise_add_grad op should be " + "1.")); auto dout_name = op_desc.Input(paddle::GradVarName("Out")).front(); std::string dx_name, dy_name; @@ -145,7 +173,10 @@ void ElementwiseAddGradOpMapper(const paddle::cpp::OpDesc& op_desc, auto y = ctx.GetVar(y_name); auto dout = ctx.GetVar(dout_name); auto outs = ctx.Builder()->ElementwiseAddGrad(dout, x, y, axis); - CHECK_EQ(outs.size(), 2) << "elementwise_add_grad should return 2 variables"; + PADDLE_ENFORCE_EQ(outs.size(), + 2UL, + phi::errors::InvalidArgument( + "elementwise_add_grad should return 2 variables")); if (has_dx) { auto dx = outs.front(); @@ -161,9 +192,15 @@ void ElementwiseAddGradOpMapper(const paddle::cpp::OpDesc& op_desc, void SumOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_GE(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_GE( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of sum op should be at least 1.")); auto x_names = op_desc.Input("X"); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("Output(Out) of sum op should be 1.")); auto out_name = op_desc.Output("Out").front(); std::vector xs; @@ -181,13 +218,21 @@ void SumOpMapper(const paddle::cpp::OpDesc& op_desc, void CastOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of cast op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("Output(Out) of cast op should be 1.")); auto out_name = op_desc.Output("Out").front(); - CHECK(op_desc.HasAttr("out_dtype")) - << "The cast op should has [out_dtype] attribute!"; + PADDLE_ENFORCE_EQ(op_desc.HasAttr("out_dtype"), + true, + phi::errors::InvalidArgument( + "The cast op should has [out_dtype] attribute!")); auto dtype = utils::GetPaddleDtype( op_desc, "out_dtype", paddle::cpp::VarDescAPI::Type::FP32); CHECK(!dtype.empty()) << "The op \"cast\"'s attribute \"out_dtype\" should " @@ -205,16 +250,25 @@ void CastOpMapper(const paddle::cpp::OpDesc& op_desc, void PowOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of pow op should be 1.")); auto x_name = op_desc.Input("X").front(); auto x = ctx.GetVar(x_name); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("Output(Out) of pow op should be 1.")); auto out_name = op_desc.Output("Out").front(); absl::optional y; if (op_desc.HasInput("FactorTensor") && !op_desc.Input("FactorTensor").empty()) { - CHECK_EQ(op_desc.Input("FactorTensor").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("FactorTensor").size(), + 1UL, + phi::errors::InvalidArgument( + "Input(FactorTensor) of pow op should be 1.")); auto y_name = op_desc.Input("FactorTensor").front(); y = ctx.GetVar(y_name); @@ -231,9 +285,11 @@ void PowOpMapper(const paddle::cpp::OpDesc& op_desc, } VLOG(4) << out_name << " = pow(" << x_name << ", " << y.value()->id << ")"; - CHECK_EQ(x->type, y.value()->type) - << "The data type of pow's inputs should be equal, but here x:" << x->type - << " != y:" << y.value()->type; + PADDLE_ENFORCE_EQ( + x->type, + y.value()->type, + phi::errors::InvalidArgument( + "The data type of pow's inputs x should be equal to y.")); auto out = ctx.Builder()->Pow(x, y.value()); @@ -243,19 +299,34 @@ void PowOpMapper(const paddle::cpp::OpDesc& op_desc, void FloorDivideOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of floor_divide op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Input("Y").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("Y").size(), + 1UL, + phi::errors::InvalidArgument("Input(Y) of floor_divide op should be 1.")); auto y_name = op_desc.Input("Y").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument( + "Output(Out) of floor_divide op should be 1.")); auto out_name = op_desc.Output("Out").front(); auto x = ctx.GetVar(x_name); auto y = ctx.GetVar(y_name); VLOG(4) << out_name << " = ⌊ " << x_name << " / " << y_name << " ⌋"; - CHECK_EQ(x->type, y->type) << "Type of input x and y must be the same."; - CHECK(x->type.is_int()) << "Type of inputs must be int32 or int64."; + PADDLE_ENFORCE_EQ( + x->type, + y->type, + phi::errors::InvalidArgument("Type of input x and y must be the same.")); + PADDLE_ENFORCE_EQ( + x->type.is_int(), + true, + phi::errors::InvalidArgument("Type of inputs must be int32 or int64.")); auto out = ctx.Builder()->FloorDivide(x, y); diff --git a/paddle/cinn/frontend/op_mappers/paddle/expand.cc b/paddle/cinn/frontend/op_mappers/paddle/expand.cc index e05e573b82fae6..31e487e1c99f6f 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/expand.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/expand.cc @@ -14,19 +14,28 @@ #include "paddle/cinn/frontend/op_mapper_registry.h" #include "paddle/cinn/frontend/op_mappers/common_utils.h" - +#include "paddle/common/enforce.h" namespace cinn { namespace frontend { namespace paddle_mappers { void ExpandOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of expand op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("Output(Out) of expand op should be 1.")); auto out_name = op_desc.Output("Out").front(); - CHECK(op_desc.HasAttr("expand_times")); + PADDLE_ENFORCE_EQ( + op_desc.HasAttr("expand_times"), + true, + phi::errors::InvalidArgument("expand op should have attr expand_times.")); auto expand_times = utils::GetAttrOrDefault>(op_desc, "expand_times"); @@ -37,9 +46,11 @@ void ExpandOpMapper(const paddle::cpp::OpDesc& op_desc, VLOG(4) << "expand: attr expand_times: " << cinn::utils::Join(expand_times, ", "); - CHECK_EQ(expand_times.size(), x_shape.size()) - << "The size of `expand_times' should == the rank[" << x_shape.size() - << "] of x's shape, but got " << expand_times.size(); + PADDLE_ENFORCE_EQ(expand_times.size(), + x_shape.size(), + phi::errors::InvalidArgument( + "The size of `expand_times' should equal to the " + "x's shape.")); std::vector out_shape(x_shape.size()); for (size_t i = 0; i < x_shape.size(); ++i) { @@ -56,12 +67,21 @@ void ExpandOpMapper(const paddle::cpp::OpDesc& op_desc, void ExpandV2OpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) { - CHECK_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Input("X").size(), + 1UL, + phi::errors::InvalidArgument("Input(X) of expand_v2 op should be 1.")); auto x_name = op_desc.Input("X").front(); - CHECK_EQ(op_desc.Output("Out").size(), 1UL); + PADDLE_ENFORCE_EQ( + op_desc.Output("Out").size(), + 1UL, + phi::errors::InvalidArgument("Output(Out) of expand_v2 op should be 1.")); auto out_name = op_desc.Output("Out").front(); - CHECK(op_desc.HasAttr("shape")); + PADDLE_ENFORCE_EQ( + op_desc.HasAttr("shape"), + true, + phi::errors::InvalidArgument("expand_v2 op should have attr shape.")); auto shape = utils::GetAttrOrDefault>(op_desc, "shape"); auto x = ctx.GetVar(x_name); @@ -70,9 +90,11 @@ void ExpandV2OpMapper(const paddle::cpp::OpDesc& op_desc, VLOG(4) << "expand_v2: x shape: " << cinn::utils::Join(x_shape, ", "); VLOG(4) << "expand_v2: attr shape: " << cinn::utils::Join(shape, ", "); - CHECK_GE(shape.size(), x_shape.size()) - << "The size of `shape' should >= the rank[" << x_shape.size() - << "] of x's shape, but got " << shape.size(); + PADDLE_ENFORCE_GE( + shape.size(), + x_shape.size(), + phi::errors::InvalidArgument( + "The size of `shape' should greater than rank of x's shape.")); auto diff = shape.size() - x_shape.size(); x_shape.insert(x_shape.begin(), diff, 1); @@ -80,28 +102,37 @@ void ExpandV2OpMapper(const paddle::cpp::OpDesc& op_desc, std::vector out_shape(x_shape.size()); for (size_t i = 0; i < x_shape.size(); ++i) { - CHECK_NE(shape[i], 0) << "The " << i - << "th element in shape cannot be zero."; + PADDLE_ENFORCE_NE(shape[i], + 0, + phi::errors::InvalidArgument("The element in shape " + "cannot be zero.")); if (i < diff) { - CHECK_GT(shape[i], 0) - << "The " << i << "th element[" << shape[i] - << "] for non-existing dimensions must be positive."; + PADDLE_ENFORCE_GT( + shape[i], + 0, + phi::errors::InvalidArgument("The element for non-existing " + "dimensions must be " + "positive.")); out_shape[i] = shape[i]; } else if (shape[i] > 0) { if (x_shape[i] != 1) { - CHECK_EQ(shape[i], x_shape[i]) - << "The " << i << "th element[" << shape[i] - << "] of the non-singleton dimension does not match" - " the corresponding element[" - << x_shape[i] << "] in x's shape."; + PADDLE_ENFORCE_EQ(shape[i], + x_shape[i], + phi::errors::InvalidArgument( + "The element of the non-singleton " + "dimension does not match the corresponding " + "element in x's shape.")); + out_shape[i] = shape[i]; } else { out_shape[i] = shape[i]; } } else { - CHECK_EQ(shape[i], -1) << "When the element in shape is negative for " - "expand_v2 op, only -1 is supported, but got " - << shape[i]; + PADDLE_ENFORCE_EQ(shape[i], + -1, + phi::errors::InvalidArgument( + "When the element in shape is negative for " + "expand_v2 op, only -1 is supported.")); out_shape[i] = x_shape[i]; } }