diff --git a/paddle/cinn/adt/adt.h b/paddle/cinn/adt/adt.h index 5af2a25cdd5972..2ab5837d24a04f 100644 --- a/paddle/cinn/adt/adt.h +++ b/paddle/cinn/adt/adt.h @@ -283,7 +283,7 @@ struct Ok final { bool operator!=(const Ok&) const { return false; } }; -#define ADT_TODO() LOG(FATAL) << "TODO" +#define ADT_TODO() PADDLE_THROW(phi::errors::Fatal("TODO")) inline std::size_t hash_combine(std::size_t lhs, std::size_t rhs) { return lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2); diff --git a/paddle/cinn/adt/equation_solver.cc b/paddle/cinn/adt/equation_solver.cc index 90675fb3db1615..b0eff3dc8355cc 100644 --- a/paddle/cinn/adt/equation_solver.cc +++ b/paddle/cinn/adt/equation_solver.cc @@ -273,7 +273,8 @@ void CheckEquationsSolvable( [&](const auto& opt_old_value, const auto& simplified_value) { LOG(ERROR) << "old_value: " << ToTxtString(opt_old_value); LOG(ERROR) << "simplified_value: " << ToTxtString(simplified_value); - LOG(FATAL) << "CheckEquationsSolvable Failed"; + PADDLE_THROW( + phi::errors::InvalidArgument("CheckEquationsSolvable Failed")); return tValueInferSuccess{false}; }); }; diff --git a/paddle/cinn/adt/get_sub_reshape_dim_ranges.cc b/paddle/cinn/adt/get_sub_reshape_dim_ranges.cc index f7f84a6e15e3a9..8dc63e319e6903 100644 --- a/paddle/cinn/adt/get_sub_reshape_dim_ranges.cc +++ b/paddle/cinn/adt/get_sub_reshape_dim_ranges.cc @@ -82,7 +82,7 @@ GetSubReshapeDimRanges(const List& lhs_dims, } else if (LhsAcc() > RhsAcc()) { rhs_end++; } else { - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } } CHECK(lhs_end == lhs_dims->size() && rhs_end == rhs_dims->size()); diff --git a/paddle/cinn/adt/igroup.cc b/paddle/cinn/adt/igroup.cc index 333721815d3489..328d194c11ba24 100644 --- a/paddle/cinn/adt/igroup.cc +++ b/paddle/cinn/adt/igroup.cc @@ -102,10 +102,10 @@ List IGroup::GetIndexIterators(const Index& index) const { } else if (arg_pos.Has()) { // do nothing } else { - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } } - LOG(FATAL) << "Can not find anchor iterators"; + PADDLE_THROW(phi::errors::Fatal("Can not find anchor iterators")); } } // namespace cinn::adt diff --git a/paddle/cinn/adt/m_ir.cc b/paddle/cinn/adt/m_ir.cc index 003b6880c813a6..5e4ffabd715488 100644 --- a/paddle/cinn/adt/m_ir.cc +++ b/paddle/cinn/adt/m_ir.cc @@ -38,12 +38,12 @@ void CollectTensorIndexIterators(const TensorIndexExpr& tensor_index_expr, void CollectTensorIndexIteratorsImpl(const Undefined& tensor_index_expr, std::unordered_set* ret) { - LOG(FATAL) << "Not Implemented"; + PADDLE_THROW(phi::errors::Unimplemented("Not Implemented")); } void CollectTensorIndexIteratorsImpl(const Ok& ok, std::unordered_set* ret) { - LOG(FATAL) << "Not Implemented"; + PADDLE_THROW(phi::errors::Unimplemented("Not Implemented")); } void CollectTensorIndexIteratorsImpl(const Iterator& iterator, @@ -134,7 +134,7 @@ LoopIterators GetAnchorTensorLoopIterators( namespace { Tensor GetTensorImpl(const OpStmt& op_stmt, const Undefined& undefined) { - LOG(FATAL) << "position not found"; + PADDLE_THROW(phi::errors::Fatal("position not found")); } Tensor GetTensorImpl(const OpStmt& op_stmt, const tIn& pos) { diff --git a/paddle/cinn/adt/naive_op_equation_context.cc b/paddle/cinn/adt/naive_op_equation_context.cc index a65ba537a68bc1..bc1dc11c7c3f96 100644 --- a/paddle/cinn/adt/naive_op_equation_context.cc +++ b/paddle/cinn/adt/naive_op_equation_context.cc @@ -240,7 +240,7 @@ std::optional GetArgDimSizeImpl( const Undefined&, const GetArgStaticDimT& GetInDim, const GetArgStaticDimT& GetOutDim) { - LOG(FATAL) << "position not found"; + PADDLE_THROW(phi::errors::Fatal("position not found")); } std::optional GetArgDimSize(const OpArgDimPos& arg_dim_pos, diff --git a/paddle/cinn/adt/print_utils/print_map_expr.cc b/paddle/cinn/adt/print_utils/print_map_expr.cc index 5d57bd457aaa45..1548771f139627 100644 --- a/paddle/cinn/adt/print_utils/print_map_expr.cc +++ b/paddle/cinn/adt/print_utils/print_map_expr.cc @@ -71,7 +71,7 @@ std::string ToTxtStringImpl(const adapter::DynamicTensor& tensor) { } std::string ToTxtStringImpl(const TempStorage& tensor) { - LOG(FATAL) << "Not supported yet"; + PADDLE_THROW(phi::errors::Unimplemented("Not supported yet")); } } // namespace diff --git a/paddle/cinn/adt/schedule_dim.cc b/paddle/cinn/adt/schedule_dim.cc index 4205bebef1aebc..6cc9ee0e66fffd 100644 --- a/paddle/cinn/adt/schedule_dim.cc +++ b/paddle/cinn/adt/schedule_dim.cc @@ -188,7 +188,7 @@ List GetReduceAxis(const List& loop_sizes) { } else if (sched_dim.Has>()) { // do nothing } else { - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } } return reduce_axis; @@ -203,7 +203,7 @@ List GetInjectiveAxis(const List& loop_sizes) { } else if (sched_dim.Has>()) { injective_axis->emplace_back(i); } else { - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } } return injective_axis; diff --git a/paddle/cinn/adt/schedule_mesh.cc b/paddle/cinn/adt/schedule_mesh.cc index 29665b918ed08f..6fe319e09e9925 100644 --- a/paddle/cinn/adt/schedule_mesh.cc +++ b/paddle/cinn/adt/schedule_mesh.cc @@ -370,7 +370,8 @@ std::tuple> CreateOptimizedScheduleMesh( return policy->Optimize(loop_sizes); } } - LOG(FATAL) << "Dead code, no valid schedule mesh policy found"; + PADDLE_THROW( + phi::errors::Fatal("Dead code, no valid schedule mesh policy found")); } ScheduleMesh MeshReshape(const ScheduleMesh& sched_mesh, diff --git a/paddle/cinn/adt/simplify_value.cc b/paddle/cinn/adt/simplify_value.cc index 923fdf6326ce1e..07420e7e647438 100644 --- a/paddle/cinn/adt/simplify_value.cc +++ b/paddle/cinn/adt/simplify_value.cc @@ -67,7 +67,7 @@ struct SimplifyRedundantBroadcastedIterator { const auto& simplified_bd = DimExpr{symbol::SimplifyDimExpr(bd)}; return BroadcastedIterator{inner_iterator, simplified_bd}; } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } }; @@ -368,7 +368,7 @@ struct SymbolicDim_SimplifyDotUndot { return IndexDotValue>{ SimplifyValue(list_get_item_values, ctx), dot_dims}; } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } }; @@ -415,7 +415,7 @@ struct SymbolicDim_SimplifyDotUndot_DimExpr { return IndexDotValue>{ SimplifyValue(list_get_item_values, ctx), dot_dims}; } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } }; diff --git a/paddle/cinn/adt/tree.h b/paddle/cinn/adt/tree.h index 9dfc4d66d31c4c..0e93e456720534 100644 --- a/paddle/cinn/adt/tree.h +++ b/paddle/cinn/adt/tree.h @@ -15,9 +15,9 @@ #pragma once #include - #include "paddle/cinn/adt/adt.h" #include "paddle/cinn/adt/tags.h" +#include "paddle/common/enforce.h" namespace cinn::adt { @@ -144,7 +144,7 @@ List MergeTwoInnerTreeImpl( List{new_lhs, new_rhs}); return List{ret}; } else { - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } } diff --git a/paddle/cinn/auto_schedule/database/database.cc b/paddle/cinn/auto_schedule/database/database.cc index 24d071a7df4e14..2036b44a83fef6 100644 --- a/paddle/cinn/auto_schedule/database/database.cc +++ b/paddle/cinn/auto_schedule/database/database.cc @@ -54,7 +54,7 @@ std::unique_ptr Database::Make(const DatabaseConfig& config) { config.capacity_per_task, config.record_file_path, true); } - LOG(FATAL) << "Unimplemented database type."; + PADDLE_THROW(phi::errors::Unimplemented("Unimplemented database type.")); return nullptr; } diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/reduction_factoring.h b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/reduction_factoring.h index 90963e831075c5..15422b1803e31e 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/reduction_factoring.h +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/reduction_factoring.h @@ -36,7 +36,8 @@ class ReductionFactoring : public AutoGenRule { } // In the future, we will no longer use this interface. void Apply(int index) override { - LOG(FATAL) << "This is a deprecated interface, please do not use it."; + PADDLE_THROW(phi::errors::InvalidArgument( + "This is a deprecated interface, please do not use it.")); return; } diff --git a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/test_helper.cc b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/test_helper.cc index 67d4c4ae3a0f74..994027dba0ee4d 100644 --- a/paddle/cinn/auto_schedule/search_space/auto_gen_rule/test_helper.cc +++ b/paddle/cinn/auto_schedule/search_space/auto_gen_rule/test_helper.cc @@ -145,7 +145,7 @@ void MemoryCopy(const float* src, float* dst, int numel, std::string type) { dst[i] = src[i]; } } else { - LOG(FATAL) << "Unknown memory copy type"; + PADDLE_THROW(phi::errors::InvalidArgument("Unknown memory copy type")); } } diff --git a/paddle/cinn/auto_schedule/search_space/block_sampler.cc b/paddle/cinn/auto_schedule/search_space/block_sampler.cc index 26b00d3a89fb39..93de31e6a5e368 100644 --- a/paddle/cinn/auto_schedule/search_space/block_sampler.cc +++ b/paddle/cinn/auto_schedule/search_space/block_sampler.cc @@ -40,7 +40,9 @@ std::unique_ptr BlockSampler::Make( all_blocks, default_remove_policy, rand_seed, weights); } - LOG(FATAL) << "Unimplemented strategy:" << strategy; + std::stringstream ss; + ss << "Unimplemented strategy:" << strategy; + PADDLE_THROW(phi::errors::Unimplemented(ss.str())); return nullptr; } diff --git a/paddle/cinn/auto_schedule/search_space/rule_sampler.cc b/paddle/cinn/auto_schedule/search_space/rule_sampler.cc index 500ae91deb89b5..3c0868d0748e5c 100644 --- a/paddle/cinn/auto_schedule/search_space/rule_sampler.cc +++ b/paddle/cinn/auto_schedule/search_space/rule_sampler.cc @@ -35,7 +35,9 @@ std::unique_ptr RuleSampler::Make( potential_rules, default_remove_policy, rand_seed, weights); } - LOG(FATAL) << "Unimplemented strategy:" << strategy; + std::stringstream ss; + ss << "Unimplemented strategy:" << strategy; + PADDLE_THROW(phi::errors::Unimplemented(ss.str())); return nullptr; } diff --git a/paddle/cinn/auto_schedule/search_space/search_space.cc b/paddle/cinn/auto_schedule/search_space/search_space.cc index eb672a78a6521e..650e1d572f8318 100644 --- a/paddle/cinn/auto_schedule/search_space/search_space.cc +++ b/paddle/cinn/auto_schedule/search_space/search_space.cc @@ -261,7 +261,8 @@ std::vector SearchSpace::GenerateSketches( } else if (strategy == "random_prune") { sketches = InitSketchWithRandomPrunedStrategy(); } else { - LOG(FATAL) << "Unimplemented init sketch strategy"; + PADDLE_THROW( + phi::errors::Unimplemented("Unimplemented init sketch strategy")); } // the more rules are applied, the greater the possibility of good results, diff --git a/paddle/cinn/auto_schedule/search_strategy/mutate_rule/mutate_rule.cc b/paddle/cinn/auto_schedule/search_strategy/mutate_rule/mutate_rule.cc index 9d41301df614c7..94fedc9f021e0e 100644 --- a/paddle/cinn/auto_schedule/search_strategy/mutate_rule/mutate_rule.cc +++ b/paddle/cinn/auto_schedule/search_strategy/mutate_rule/mutate_rule.cc @@ -23,7 +23,9 @@ std::unique_ptr MutateRule::Make(const std::string& name) { if (name == "mutate_tile_size") { return std::make_unique(); } else { - LOG(FATAL) << "MutateRule " << name << " is not supported."; + std::stringstream ss; + ss << "MutateRule " << name << " is not supported."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return nullptr; } diff --git a/paddle/cinn/auto_schedule/task_scheduler/task_scheduler.cc b/paddle/cinn/auto_schedule/task_scheduler/task_scheduler.cc index eed2ad3d669702..a8961e45b980d7 100644 --- a/paddle/cinn/auto_schedule/task_scheduler/task_scheduler.cc +++ b/paddle/cinn/auto_schedule/task_scheduler/task_scheduler.cc @@ -34,7 +34,9 @@ std::unique_ptr TaskScheduler::Make( return std::make_unique(tasks, config); } - LOG(FATAL) << "Unimplemented strategy:" << strategy; + std::stringstream ss; + ss << "Unimplemented strategy:" << strategy; + PADDLE_THROW(phi::errors::Unimplemented(ss.str())); return nullptr; } diff --git a/paddle/cinn/backends/codegen_c.cc b/paddle/cinn/backends/codegen_c.cc index ca80bcdddd0c01..84a92d65e94be0 100644 --- a/paddle/cinn/backends/codegen_c.cc +++ b/paddle/cinn/backends/codegen_c.cc @@ -76,7 +76,7 @@ std::string CodeGenC::Compile(const ir::Module &module, Compile(func); } } else { - LOG(FATAL) << "Not supported OutputKind"; + PADDLE_THROW(phi::errors::Unimplemented("Not supported OutputKind")); } return str_; } @@ -526,8 +526,9 @@ void CodeGenC::Visit(const ir::Let *op) { } void CodeGenC::Visit(const ir::Reduce *op) { - LOG(FATAL) << "Reduce IR is just for internal representation, should not be " - "used for CodeGen."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Reduce IR is just for internal representation, should not be " + "used for CodeGen.")); } void CodeGenC::Visit(const ir::Ramp *op) { @@ -731,7 +732,8 @@ void CodeGenC::PrintRuntimeType(const cinn_type_t &type) { } else if (type == cinn_float64_t()) { str_ += "cinn_float64_t()"; } else { - LOG(FATAL) << "Unknown type is not supported to print"; + PADDLE_THROW( + phi::errors::InvalidArgument("Unknown type is not supported to print")); } } @@ -806,7 +808,9 @@ void CodeGenC::Visit(const ir::intrinsics::PodValueToX *op) { } else if (to_type == type_of()) { str_ += runtime::intrinsic::pod_value_to_buffer_p; } else { - LOG(FATAL) << "Not supported type: " << to_type; + std::stringstream ss; + ss << "Not supported type: " << to_type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } str_ += "("; diff --git a/paddle/cinn/backends/codegen_cuda_dev.cc b/paddle/cinn/backends/codegen_cuda_dev.cc index aa58470ef93de2..6b6597b2e208c2 100644 --- a/paddle/cinn/backends/codegen_cuda_dev.cc +++ b/paddle/cinn/backends/codegen_cuda_dev.cc @@ -292,7 +292,7 @@ std::string CodeGenCUDA_Dev::Compile(const ir::Module &module, Compile(func); } } else { - LOG(FATAL) << "Not supported OutputKind"; + PADDLE_THROW(phi::errors::InvalidArgument("Not supported OutputKind")); } if (for_nvrtc_) { @@ -372,8 +372,10 @@ void CodeGenCUDA_Dev::PrintTempBufferCreation(const ir::Buffer &buffer) { print_gpu_memory(""); } } else { - LOG(FATAL) << "CUDA device codegen not support memory " << buffer->name - << ", type " << buffer->memory_type; + std::stringstream ss; + ss << "CUDA device codegen not support memory " << buffer->name << ", type " + << buffer->memory_type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } diff --git a/paddle/cinn/backends/cuda_util.h b/paddle/cinn/backends/cuda_util.h index 5175ba8e819c6b..26d4110b0a10c6 100644 --- a/paddle/cinn/backends/cuda_util.h +++ b/paddle/cinn/backends/cuda_util.h @@ -26,63 +26,76 @@ #include #include "paddle/cinn/runtime/cinn_runtime.h" - -#define CUDA_DRIVER_CALL(func) \ - { \ - auto status = func; \ - if (status != CUDA_SUCCESS) { \ - const char* msg; \ - cuGetErrorString(status, &msg); \ - LOG(FATAL) << "CUDA Driver Error: " #func " failed with error: " << msg; \ - } \ +#include "paddle/common/enforce.h" + +#define CUDA_DRIVER_CALL(func) \ + { \ + auto status = func; \ + if (status != CUDA_SUCCESS) { \ + const char* msg; \ + cuGetErrorString(status, &msg); \ + std::stringstream ss; \ + ss << "CUDA Driver Error: " #func " failed with error: " << msg; \ + PADDLE_THROW(phi::errors::Fatal(ss.str())); \ + } \ } -#define CUDA_CALL(func) \ - { \ - auto status = func; \ - if (status != cudaSuccess) { \ - LOG(FATAL) << "CUDA Error : " << cudaGetErrorString(status); \ - } \ +#define CUDA_CALL(func) \ + { \ + auto status = func; \ + if (status != cudaSuccess) { \ + std::stringstream ss; \ + ss << "CUDA Error : " << cudaGetErrorString(status); \ + PADDLE_THROW(phi::errors::Fatal(ss.str())); \ + } \ } -#define CURAND_CALL(func) \ - { \ - auto status = func; \ - if (status != CURAND_STATUS_SUCCESS) { \ - LOG(FATAL) << "CURAND Error : " << status; \ - } \ +#define CURAND_CALL(func) \ + { \ + auto status = func; \ + if (status != CURAND_STATUS_SUCCESS) { \ + std::stringstream ss; \ + ss << "CURAND Error : " << status; \ + PADDLE_THROW(phi::errors::Fatal(ss.str())); \ + } \ } #define CUSOLVER_CALL(func) \ { \ auto status = func; \ if (status != CUSOLVER_STATUS_SUCCESS) { \ - LOG(FATAL) << "CUSOLVER Error: " << status; \ + std::stringstream ss; \ + ss << "CUSOLVER Error: " << status; \ + PADDLE_THROW(phi::errors::Fatal(ss.str())); \ } \ } -#define CUBLAS_CALL(func) \ - { \ - auto status = func; \ - if (status != CUBLAS_STATUS_SUCCESS) { \ - LOG(FATAL) << "CUBLAS Error!"; \ - } \ +#define CUBLAS_CALL(func) \ + { \ + auto status = func; \ + if (status != CUBLAS_STATUS_SUCCESS) { \ + PADDLE_THROW(phi::errors::Fatal("CUBLAS Error!")); \ + } \ } -#define CUDNN_CALL(func) \ - { \ - auto status = func; \ - if (status != CUDNN_STATUS_SUCCESS) { \ - LOG(FATAL) << "CUDNN Error : " << cudnnGetErrorString(status); \ - } \ +#define CUDNN_CALL(func) \ + { \ + auto status = func; \ + if (status != CUDNN_STATUS_SUCCESS) { \ + std::stringstream ss; \ + ss << "CUDNN Error : " << cudnnGetErrorString(status); \ + PADDLE_THROW(phi::errors::Fatal(ss.str())); \ + } \ } -#define NVRTC_CALL(func) \ - { \ - auto status = func; \ - if (status != NVRTC_SUCCESS) { \ - LOG(FATAL) << "NVRTC Error : " << nvrtcGetErrorString(status); \ - } \ +#define NVRTC_CALL(func) \ + { \ + auto status = func; \ + if (status != NVRTC_SUCCESS) { \ + std::stringstream ss; \ + ss << "NVRTC Error : " << nvrtcGetErrorString(status); \ + PADDLE_THROW(phi::errors::Fatal(ss.str())); \ + } \ } namespace cinn { diff --git a/paddle/cinn/backends/llvm/codegen_llvm.cc b/paddle/cinn/backends/llvm/codegen_llvm.cc index 6147940075d8ab..e24b5220919cb5 100644 --- a/paddle/cinn/backends/llvm/codegen_llvm.cc +++ b/paddle/cinn/backends/llvm/codegen_llvm.cc @@ -264,7 +264,7 @@ llvm::Value *CodeGenLLVM::Visit(const ir::FloatImm *op) { } else if (op->type().is_float16()) { return llvm::ConstantFP::get(b_->getHalfTy(), op->value); } else { - LOG(FATAL) << "illegal float type."; + PADDLE_THROW(phi::errors::InvalidArgument("illegal float type.")); } return nullptr; } @@ -1379,7 +1379,7 @@ void CodeGenLLVM::InitTarget(const Target &target) { } else if (target.bits == Target::Bit::k64) { naive_vec_alignment_ = 512; } else { - LOG(FATAL) << "get unknown bits"; + PADDLE_THROW(phi::errors::InvalidArgument("get unknown bits")); } break; case Target::Arch::ARM: @@ -1389,7 +1389,7 @@ void CodeGenLLVM::InitTarget(const Target &target) { naive_vec_alignment_ = 128; break; case Target::Arch::Unk: - LOG(FATAL) << "unknown Arch found"; + PADDLE_THROW(phi::errors::InvalidArgument("unknown Arch found")); break; } } @@ -1669,7 +1669,9 @@ llvm::Value *CodeGenLLVM::Visit(const ir::intrinsics::PodValueToX *op) { } else if (to_type == type_of()) { callee = m_->getFunction(runtime::intrinsic::pod_value_to_buffer_p); } else { - LOG(FATAL) << "Not supported type: " << to_type; + std::stringstream ss; + ss << "Not supported type: " << to_type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } CHECK(callee); diff --git a/paddle/cinn/backends/nvrtc/nvrtc_util.cc b/paddle/cinn/backends/nvrtc/nvrtc_util.cc index 7af601f4ead232..4a68b9a82f61df 100644 --- a/paddle/cinn/backends/nvrtc/nvrtc_util.cc +++ b/paddle/cinn/backends/nvrtc/nvrtc_util.cc @@ -75,10 +75,12 @@ std::vector Compiler::FindCUDAIncludePaths() { return {cuda_include_path}; } #endif - LOG(FATAL) << "Cannot find cuda include path." - << "CUDA_PATH is not set or CUDA is not installed in the default " - "installation path." - << "In other than linux, it is necessary to set CUDA_PATH."; + std::stringstream ss; + ss << "Cannot find cuda include path." + << "CUDA_PATH is not set or CUDA is not installed in the default " + "installation path." + << "In other than linux, it is necessary to set CUDA_PATH."; + PADDLE_THROW(phi::errors::Fatal(ss.str())); return {cuda_include_path}; } diff --git a/paddle/cinn/common/cas.cc b/paddle/cinn/common/cas.cc index f2e93286a04a77..fac9e08befee9c 100644 --- a/paddle/cinn/common/cas.cc +++ b/paddle/cinn/common/cas.cc @@ -854,7 +854,7 @@ void CasSimplifyMutator::UnfoldBound(Expr* lower_bound, AddBaseAndSimplify(lower_bound, var); AddBaseAndSimplify(upper_bound, var); } else { - LOG(FATAL) << "can't get the bound"; + PADDLE_THROW(phi::errors::InvalidArgument("can't get the bound")); } } diff --git a/paddle/cinn/common/common.h b/paddle/cinn/common/common.h index 34623d904515b3..e5bb5d29cf181b 100644 --- a/paddle/cinn/common/common.h +++ b/paddle/cinn/common/common.h @@ -24,6 +24,8 @@ #include "paddle/cinn/common/shared.h" #include "paddle/cinn/common/target.h" #include "paddle/cinn/common/type.h" +#include "paddle/cinn/utils/error.h" +#include "paddle/common/enforce.h" namespace cinn { diff --git a/paddle/cinn/common/dim_expr_converter.cc b/paddle/cinn/common/dim_expr_converter.cc index c0cb71f408ddc3..a7c3eae14ccb3b 100644 --- a/paddle/cinn/common/dim_expr_converter.cc +++ b/paddle/cinn/common/dim_expr_converter.cc @@ -94,8 +94,8 @@ struct DimExprToIrExprVisitor { } ir::Expr operator()(const Broadcast& dim_expr) { - LOG(FATAL) - << "no support for converting from Broadcast to ir::Expr"; + PADDLE_THROW(phi::errors::Fatal( + "no support for converting from Broadcast to ir::Expr")); } }; diff --git a/paddle/cinn/common/float16_bfloat16_cuda_test.cu b/paddle/cinn/common/float16_bfloat16_cuda_test.cu index e8d9c7f534cc12..fd6c39cc51f8fc 100644 --- a/paddle/cinn/common/float16_bfloat16_cuda_test.cu +++ b/paddle/cinn/common/float16_bfloat16_cuda_test.cu @@ -17,19 +17,21 @@ #include #include - #include "paddle/cinn/common/bfloat16.h" #include "paddle/cinn/common/float16.h" +#include "paddle/common/enforce.h" namespace cinn { namespace common { -#define CUDA_CALL(func) \ - { \ - auto status = func; \ - if (status != cudaSuccess) { \ - LOG(FATAL) << "CUDA Error : " << cudaGetErrorString(status); \ - } \ +#define CUDA_CALL(func) \ + { \ + auto status = func; \ + if (status != cudaSuccess) { \ + std::stringstream ss; \ + ss << "CUDA Error : " << cudaGetErrorString(status); \ + PADDLE_THROW(phi::errors::Fatal(ss.str())); \ + } \ } class CudaMem { diff --git a/paddle/cinn/common/graph_utils.cc b/paddle/cinn/common/graph_utils.cc index 446c124124b9a1..b1110e8ca8aa0f 100755 --- a/paddle/cinn/common/graph_utils.cc +++ b/paddle/cinn/common/graph_utils.cc @@ -32,7 +32,7 @@ namespace { void DFSSortUtil(const GraphNode *node, std::vector *order) {} std::vector DFSSort(const std::vector &nodes) { - LOG(FATAL) << "not implemented"; + PADDLE_THROW(phi::errors::Unimplemented("Not Implemented")); return {}; } diff --git a/paddle/cinn/common/integer_set.cc b/paddle/cinn/common/integer_set.cc index 8c9998122373f4..5a1bbc6c625a9a 100644 --- a/paddle/cinn/common/integer_set.cc +++ b/paddle/cinn/common/integer_set.cc @@ -292,7 +292,7 @@ std::optional SymbolicExprAnalyzer::ProveDivisible( case cinn::ir::IrNodeTy::Minus: return ProveDivisible(lhs.As()->v(), rhs); default: - LOG(FATAL) << "Not supported yet!"; + PADDLE_THROW(phi::errors::InvalidArgument("Not supported yet!")); break; } } diff --git a/paddle/cinn/common/macros.h b/paddle/cinn/common/macros.h index dbae22549331c0..52d91c922ad6fd 100644 --- a/paddle/cinn/common/macros.h +++ b/paddle/cinn/common/macros.h @@ -23,7 +23,8 @@ void operator=(const TypeName&) = delete #ifndef CINN_NOT_IMPLEMENTED -#define CINN_NOT_IMPLEMENTED LOG(FATAL) << "Not Implemented"; +#define CINN_NOT_IMPLEMENTED \ + PADDLE_THROW(phi::errors::Unimplemented("Not Implemented")); #endif #define CINN_RESULT_SHOULD_USE __attribute__((warn_unused_result)) diff --git a/paddle/cinn/common/target.cc b/paddle/cinn/common/target.cc index fc01a56db481d4..c24c89c29ae1aa 100644 --- a/paddle/cinn/common/target.cc +++ b/paddle/cinn/common/target.cc @@ -24,6 +24,7 @@ #include "paddle/cinn/backends/cuda_util.h" #include "paddle/cinn/common/target.h" #include "paddle/cinn/runtime/cinn_runtime.h" +#include "paddle/common/enforce.h" namespace cinn { namespace common { @@ -51,7 +52,7 @@ int Target::runtime_arch() const { case Arch::ARM: return cinn_arm_device; default: - LOG(FATAL) << "Not supported arch"; + PADDLE_THROW(phi::errors::InvalidArgument("Not supported arch")); } return -1; } @@ -106,7 +107,7 @@ int Target::get_target_bits() const { case Bit::Unk: return 0; default: - LOG(FATAL) << "Not supported Bit"; + PADDLE_THROW(phi::errors::InvalidArgument("Not supported Bit")); } return -1; } diff --git a/paddle/cinn/common/type.cc b/paddle/cinn/common/type.cc index 67ee1b25a09e9b..41cfd9e638f904 100644 --- a/paddle/cinn/common/type.cc +++ b/paddle/cinn/common/type.cc @@ -18,7 +18,7 @@ #include #include #include - +#include "paddle/common/enforce.h" namespace cinn { namespace common { @@ -600,7 +600,9 @@ std::string Type2Str(const Type &type) { return "unk"; default: - LOG(FATAL) << "Not support type [" << type << "] ! Please Check.\n"; + std::stringstream ss; + ss << "Not support type [" << type << "] ! Please Check.\n"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return "unk"; } diff --git a/paddle/cinn/frontend/computation.cc b/paddle/cinn/frontend/computation.cc index 90c889c5996900..ee7d2ce6b3a826 100644 --- a/paddle/cinn/frontend/computation.cc +++ b/paddle/cinn/frontend/computation.cc @@ -251,9 +251,11 @@ hlir::framework::Tensor CinnComputation::GetTensor(const std::string &tname) { } auto it = context_->varmap_paddle2program.find(tname); if (it == context_->varmap_paddle2program.end()) { - LOG(FATAL) << "No variable called [" << tname - << "] found in computation\nThe existing vars: " - << utils::Join(context_->scope->var_names(), ", "); + std::stringstream ss; + ss << "No variable called [" << tname + << "] found in computation\nThe existing vars: " + << utils::Join(context_->scope->var_names(), ", "); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return context_->scope->GetTensor(it->second); } diff --git a/paddle/cinn/frontend/decomposer/batch_norm.cc b/paddle/cinn/frontend/decomposer/batch_norm.cc index b2d59053e43dee..5e40fddac7a012 100644 --- a/paddle/cinn/frontend/decomposer/batch_norm.cc +++ b/paddle/cinn/frontend/decomposer/batch_norm.cc @@ -42,7 +42,9 @@ struct BatchNormHelper { reduce_dim = {0, 1, 2}; element_count = x_shape[0] * x_shape[1] * x_shape[2]; } else { - LOG(FATAL) << data_layout << " setting is not support!"; + std::stringstream ss; + ss << data_layout << " setting is not support!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } num_instructions = builder->size(); diff --git a/paddle/cinn/frontend/decomposer/broadcast.cc b/paddle/cinn/frontend/decomposer/broadcast.cc index ece85caccc7dad..014a29f40e42a5 100644 --- a/paddle/cinn/frontend/decomposer/broadcast.cc +++ b/paddle/cinn/frontend/decomposer/broadcast.cc @@ -129,8 +129,9 @@ void elementwise_add_grad(const Instruction& instr, auto dy = instr->outputs[1]; int axis = instr.GetAttrs("axis"); if (axis < 0 && dx->shape.size() < dy->shape.size()) { - LOG(FATAL) << "Please make sure x'rank greater than or equal to y'rank " - "when axis = -1"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Please make sure x'rank greater than or equal to y'rank " + "when axis = -1")); } axis = axis >= 0 ? axis : dx->shape.size() - dy->shape.size(); auto* builder = context.builder(); diff --git a/paddle/cinn/frontend/decomposer/test_helper.h b/paddle/cinn/frontend/decomposer/test_helper.h index 4a7bb9b2f80918..072ca29151147a 100644 --- a/paddle/cinn/frontend/decomposer/test_helper.h +++ b/paddle/cinn/frontend/decomposer/test_helper.h @@ -89,8 +89,8 @@ void CopyFromVector(const std::vector& vec, #ifdef CINN_WITH_CUDA cudaMemcpy(data, vec.data(), numel * sizeof(T), cudaMemcpyHostToDevice); #else - LOG(FATAL) - << "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check."; + PADDLE_THROW(phi::errors::Fatal( + "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check.")); #endif } else { std::copy(vec.begin(), vec.end(), data); diff --git a/paddle/cinn/frontend/decomposer_registry.h b/paddle/cinn/frontend/decomposer_registry.h index a94708db631d53..27cecf54501b7b 100644 --- a/paddle/cinn/frontend/decomposer_registry.h +++ b/paddle/cinn/frontend/decomposer_registry.h @@ -38,18 +38,19 @@ class DecomposerContext { // Map the new var to the original var. void MapOutToOrigin(const Variable& new_var, const Variable& ori_var) const { if (new_var->shape != ori_var->shape) { - LOG(FATAL) - << "The output shape should be equal to the original. But received : " - << new_var->id << ".shape=[" << utils::Join(new_var->shape, ", ") - << "] and the original var " << ori_var->id << ".shape=[" - << utils::Join(ori_var->shape, ", ") << "]."; + std::stringstream ss; + ss << "The output shape should be equal to the original. But received : " + << new_var->id << ".shape=[" << utils::Join(new_var->shape, ", ") + << "] and the original var " << ori_var->id << ".shape=[" + << utils::Join(ori_var->shape, ", ") << "]."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } if (new_var->type != ori_var->type) { - LOG(FATAL) - << "The output type should be equal to the original. But received : " - << new_var->id << ".type=" << new_var->type - << " and the original var " << ori_var->id - << ".type=" << ori_var->type; + std::stringstream ss; + ss << "The output type should be equal to the original. But received : " + << new_var->id << ".type=" << new_var->type << " and the original var " + << ori_var->id << ".type=" << ori_var->type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } (*var_map_)[new_var->id] = ori_var; } diff --git a/paddle/cinn/frontend/interpreter.cc b/paddle/cinn/frontend/interpreter.cc index 12964fb8e79adc..ff8c4280b754f7 100644 --- a/paddle/cinn/frontend/interpreter.cc +++ b/paddle/cinn/frontend/interpreter.cc @@ -97,9 +97,11 @@ hlir::framework::Tensor Interpreter::GetTensor(const std::string& name) { auto it = impl_->var_map_paddle_to_cinn_.find(name); if (it == impl_->var_map_paddle_to_cinn_.end()) { - LOG(FATAL) << "No variable called [" << name - << "] found in executor\nThe existing vars: " - << utils::Join(impl_->scope_->var_names(), ", "); + std::stringstream ss; + ss << "No variable called [" << name + << "] found in executor\nThe existing vars: " + << utils::Join(impl_->scope_->var_names(), ", "); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return impl_->scope_->GetTensor(it->second); } diff --git a/paddle/cinn/frontend/net_builder.cc b/paddle/cinn/frontend/net_builder.cc index b9f6135bdd5b5e..0388fb6e42e0c9 100644 --- a/paddle/cinn/frontend/net_builder.cc +++ b/paddle/cinn/frontend/net_builder.cc @@ -285,8 +285,9 @@ Variable NetBuilder::FillConstant(const std::vector& shape, } else if (type.is_bool()) { value = !cinn::runtime::CheckStringFlagFalse(str_value); } else { - LOG(FATAL) << "FillConstant only support int/float/bool, but here " - << dtype; + std::stringstream ss; + ss << "FillConstant only support int/float/bool, but here " << dtype; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } auto out = CustomInstr("fill_constant", {}, @@ -676,7 +677,9 @@ std::vector UpdatePool2dKernelSize(const std::vector& x_shape, height_axis = 1; width_axis = 2; } else { - LOG(FATAL) << "Unsupport data_format: " << data_format; + std::stringstream ss; + ss << "Unsupport data_format: " << data_format; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } if (global_pooling) { new_ksize[0] = x_shape[height_axis]; @@ -709,7 +712,9 @@ std::vector UpdatePool2dPaddings(const std::vector& paddings, height_axis = 1; width_axis = 2; } else { - LOG(FATAL) << "Unsupport data_format: " << data_format; + std::stringstream ss; + ss << "Unsupport data_format: " << data_format; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } // When padding_algorithm is VALID, set paddings to [0, 0, 0, 0]. // When padding_algorithm is SAME, the calculation formula of padding is as diff --git a/paddle/cinn/frontend/op_mapper_registry.cc b/paddle/cinn/frontend/op_mapper_registry.cc index 883ac8104d9ae5..702888ce62bd25 100644 --- a/paddle/cinn/frontend/op_mapper_registry.cc +++ b/paddle/cinn/frontend/op_mapper_registry.cc @@ -83,7 +83,9 @@ Variable OpMapperContext::GetVar(const std::string& origin_name) const { return local_var; } - LOG(FATAL) << "No var called [" << origin_name << "] exists"; + std::stringstream ss; + ss << "No var called [" << origin_name << "] exists"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return Variable(); } diff --git a/paddle/cinn/frontend/op_mappers/common_utils.h b/paddle/cinn/frontend/op_mappers/common_utils.h index 61e9dc2cda93f4..58202c991c4c02 100644 --- a/paddle/cinn/frontend/op_mappers/common_utils.h +++ b/paddle/cinn/frontend/op_mappers/common_utils.h @@ -62,10 +62,11 @@ inline T GetAttrOrDefault(const paddle::cpp::OpDesc& op_desc, << " here we will return a empty vector."; \ return {}; \ } else { \ - LOG(FATAL) << "Op \"" << op_desc.Type() << "\"'s attribute \"" \ - << name << "\" should be " << #ATTR_TYPE \ - << "S. But here " << static_cast(attr_type) \ - << " Please Check!"; \ + std::stringstream ss; \ + ss << "Op \"" << op_desc.Type() << "\"'s attribute \"" << name \ + << "\" should be " << #ATTR_TYPE << "S. But here " \ + << static_cast(attr_type) << " Please Check!"; \ + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); \ } \ } \ } \ @@ -94,8 +95,10 @@ inline bool GetAttrOrDefault(const paddle::cpp::OpDesc& op_desc, case AttrType::LONG: return static_cast(op_desc.GetAttr(name)); default: - LOG(FATAL) << "Op " << op_desc.Type() << "'s attribute " << name - << " should be BOOLEAN. Please Check!"; + std::stringstream ss; + ss << "Op " << op_desc.Type() << "'s attribute " << name + << " should be BOOLEAN. Please Check!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } return default_value; @@ -114,8 +117,10 @@ inline int64_t GetAttrOrDefault(const paddle::cpp::OpDesc& op_desc, case AttrType::INT: return static_cast(op_desc.GetAttr(name)); default: - LOG(FATAL) << "Op " << op_desc.Type() << "'s attribute " << name - << " should be LONG. Please Check!"; + std::stringstream ss; + ss << "Op " << op_desc.Type() << "'s attribute " << name + << " should be LONG. Please Check!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } return default_value; @@ -150,8 +155,10 @@ inline std::vector GetAttrOrDefault( return {}; } default: - LOG(FATAL) << "Op " << op_desc.Type() << "'s attribute " << name - << " should be LONGS. Please Check!"; + std::stringstream ss; + ss << "Op " << op_desc.Type() << "'s attribute " << name + << " should be LONGS. Please Check!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } return default_value; diff --git a/paddle/cinn/frontend/op_mappers/paddle/concat.cc b/paddle/cinn/frontend/op_mappers/paddle/concat.cc index 6904cb85f6c6a7..d7181f3ac1a60b 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/concat.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/concat.cc @@ -63,8 +63,9 @@ void StackOpMapper(const paddle::cpp::OpDesc& op_desc, CHECK_EQ(op_desc.Output("Y").size(), 1UL); out_name = op_desc.Output("Y").front(); } else { - LOG(FATAL) << "The output argument name of [stack] should be 'Out' or 'Y', " - "but here cannot found! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The output argument name of [stack] should be 'Out' or 'Y', " + "but here cannot found! Please check.")); } cinn::utils::ShapeType input_shape(ctx.GetVar(x_names.front())->shape); diff --git a/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc b/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc index 792ae1e922904d..63f9316fc99907 100644 --- a/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc +++ b/paddle/cinn/frontend/op_mappers/paddle/elementwise.cc @@ -225,8 +225,9 @@ void PowOpMapper(const paddle::cpp::OpDesc& op_desc, cinn::UniqName(x_name + "_factor"), cinn::common::Type2Str(x->type)); } else { - LOG(FATAL) << "Cannot found [FactorTensor] input or [factor] attribute in " - "paddle.pow! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Cannot found [FactorTensor] input or [factor] attribute in " + "paddle.pow! Please check.")); } VLOG(4) << out_name << " = pow(" << x_name << ", " << y.value()->id << ")"; diff --git a/paddle/cinn/frontend/op_mappers/science/transform.cc b/paddle/cinn/frontend/op_mappers/science/transform.cc index 412ec1ddf8ce14..fa23c354061f05 100644 --- a/paddle/cinn/frontend/op_mappers/science/transform.cc +++ b/paddle/cinn/frontend/op_mappers/science/transform.cc @@ -91,11 +91,13 @@ void SplitOpMapper(const paddle::cpp::OpDesc& op_desc, } else if (sec == -1 && !has_neg) { has_neg = true; } else if (sec == 0) { - LOG(FATAL) << "The attribute 'num_or_sections' of split should not has " - "0 ! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The attribute 'num_or_sections' of split should not has " + "0 ! Please check.")); } else { - LOG(FATAL) << "The attribute 'num_or_sections' of split can only have " - "at most one '-1' ! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The attribute 'num_or_sections' of split can only have " + "at most one '-1' ! Please check.")); } } CHECK(!has_neg && sec_sum == x_shape[axis]) diff --git a/paddle/cinn/frontend/optimize.cc b/paddle/cinn/frontend/optimize.cc index bc3d1388cf368f..3440d3f2b6f4fa 100644 --- a/paddle/cinn/frontend/optimize.cc +++ b/paddle/cinn/frontend/optimize.cc @@ -172,8 +172,9 @@ std::shared_ptr Optimize( enable_fusion = true; } } else { - LOG(FATAL) << "Pass " << pass - << " unsupported in CINN! Please check.\n"; + std::stringstream ss; + ss << "Pass " << pass << " unsupported in CINN! Please check.\n"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } diff --git a/paddle/cinn/frontend/paddle/compatible_pb.cc b/paddle/cinn/frontend/paddle/compatible_pb.cc index 68ad3ae514ac52..711e78889a9b06 100644 --- a/paddle/cinn/frontend/paddle/compatible_pb.cc +++ b/paddle/cinn/frontend/paddle/compatible_pb.cc @@ -128,7 +128,9 @@ void OpAttrsAnyToCpp(const OpDescType &any_desc, cpp::OpDesc *cpp_desc) { break; } default: - LOG(FATAL) << "Unsupported attr type found " << static_cast(type); + std::stringstream ss; + ss << "Unsupported attr type found " << static_cast(type); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } }; @@ -157,7 +159,9 @@ void OpAttrsCppToAny(const cpp::OpDesc &cpp_desc, OpDescType *any_desc) { IMPL_ONE(LONG, int64_t); IMPL_ONE(LONGS, std::vector); default: - LOG(FATAL) << "Unsupported attr type found: " << static_cast(type); + std::stringstream ss; + ss << "Unsupported attr type found: " << static_cast(type); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } }; #undef IMPL_ONE diff --git a/paddle/cinn/frontend/paddle/model_parser.cc b/paddle/cinn/frontend/paddle/model_parser.cc index c54c772d803fe7..086cf11fe34b58 100644 --- a/paddle/cinn/frontend/paddle/model_parser.cc +++ b/paddle/cinn/frontend/paddle/model_parser.cc @@ -42,7 +42,9 @@ int SizeOfType(framework_proto::VarType::Type type) { DO(INT64, int64_t); #undef DO default: - LOG(FATAL) << "unknown data type " << type; + std::stringstream ss; + ss << "unknown data type " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return -1; } @@ -90,14 +92,17 @@ void TensorFromStream(std::istream &is, SET_TENSOR(INT64, int64_t, Int(64)); #undef SET_TENSOR default: - LOG(FATAL) << "unknown type " << desc.data_type(); + std::stringstream ss; + ss << "unknown type " << desc.data_type(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } // tensor->set_persistable(true); is.read(static_cast(buf), size); } else if (target.arch == Target::Arch::NVGPU) { #ifdef CINN_WITH_CUDA if (desc.data_type() != Type::VarType_Type_FP32) - LOG(FATAL) << "[CUDA] The type is not fp32!!"; + PADDLE_THROW( + phi::errors::InvalidArgument("[CUDA] The type is not fp32!!")); auto *data = tensor->mutable_data(target); tensor->set_type(Float(32)); std::vector temp(tensor->shape().numel()); @@ -108,7 +113,8 @@ void TensorFromStream(std::istream &is, tensor->shape().numel() * sizeof(float), cudaMemcpyHostToDevice)); #else - LOG(FATAL) << "To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal( + "To use CUDA backends, you need to set WITH_CUDA ON!")); #endif } else { CINN_NOT_IMPLEMENTED @@ -281,7 +287,7 @@ void LoadModelPb(const std::string &model_dir, target); break; default: - LOG(FATAL) << "unknown weight type"; + PADDLE_THROW(phi::errors::InvalidArgument("unknown weight type")); } } } diff --git a/paddle/cinn/frontend/paddle/pb/op_desc.h b/paddle/cinn/frontend/paddle/pb/op_desc.h index 82e1477270fa45..222bdda4da2b23 100644 --- a/paddle/cinn/frontend/paddle/pb/op_desc.h +++ b/paddle/cinn/frontend/paddle/pb/op_desc.h @@ -17,6 +17,7 @@ #include "paddle/cinn/frontend/paddle/cpp/op_desc.h" #include "paddle/cinn/frontend/paddle/framework.pb.h" +#include "paddle/common/enforce.h" namespace cinn::frontend::paddle::pb { @@ -106,7 +107,7 @@ class OpDesc : public cpp::OpDescAPI { DEF_ONE(BLOCKS); DEF_ONE(LONGS); default: - LOG(FATAL) << "Unknown attribute type"; + PADDLE_THROW(phi::errors::InvalidArgument("Unknown attribute type")); return static_cast(-1); } #undef DEF_ONE diff --git a/paddle/cinn/frontend/paddle/pb/var_desc.cc b/paddle/cinn/frontend/paddle/pb/var_desc.cc index efee4f211d6629..c6069daa1f67d2 100644 --- a/paddle/cinn/frontend/paddle/pb/var_desc.cc +++ b/paddle/cinn/frontend/paddle/pb/var_desc.cc @@ -15,9 +15,9 @@ #include "paddle/cinn/frontend/paddle/pb/var_desc.h" #include - #include "paddle/cinn/frontend/paddle/cpp/desc_api.h" #include "paddle/cinn/frontend/paddle/framework.pb.h" +#include "paddle/common/enforce.h" namespace cinn::frontend::paddle::pb { @@ -39,7 +39,7 @@ cpp::VarDescAPI::Type VarDesc::GetType() const { GET_TYPE_CASE_ITEM(PLACE_LIST); GET_TYPE_CASE_ITEM(READER); default: - LOG(FATAL) << "Unknown var type"; + PADDLE_THROW(phi::errors::InvalidArgument("Unknown var type")); return VarDescAPI::Type(); } #undef GET_TYPE_CASE_ITEM @@ -62,7 +62,7 @@ void VarDesc::SetType(VarDescAPI::Type type) { SET_TYPE_CASE_ITEM(PLACE_LIST); SET_TYPE_CASE_ITEM(READER); default: - LOG(FATAL) << "Unknown var type"; + PADDLE_THROW(phi::errors::InvalidArgument("Unknown var type")); } #undef SET_TYPE_CASE_ITEM } @@ -83,9 +83,11 @@ void VarDesc::SetTensorDescNum(size_t num) { return; } break; default: - LOG(FATAL) << "Setting 'sub_tensor_number' is not supported by the type " - "of var %s." - << this->Name(); + std::stringstream ss; + ss << "Setting 'sub_tensor_number' is not supported by the type " + "of var %s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -95,9 +97,11 @@ size_t VarDesc::GetTensorDescNum() const { return desc_->type().reader().lod_tensor_size(); break; default: - LOG(FATAL) << "Getting 'sub_tensor_number' is not supported by the type " - "of var %s." - << this->Name(); + std::stringstream ss; + ss << "Getting 'sub_tensor_number' is not supported by the type " + "of var %s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return 0; } @@ -151,7 +155,9 @@ void VarDesc::SetDataType(VarDescAPI::VarDataType data_type) { SET_DATA_TYPE_CASE_ITEM(FP32); SET_DATA_TYPE_CASE_ITEM(FP64); default: - LOG(FATAL) << "Unknown var type: " << static_cast(data_type); + std::stringstream ss; + ss << "Unknown var type: " << static_cast(data_type); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } #undef SET_DATA_TYPE_CASE_ITEM } @@ -200,7 +206,9 @@ cpp::VarDescAPI::VarDataType VarDesc::GetDataType() const { GET_DATA_TYPE_CASE_ITEM(FP32); GET_DATA_TYPE_CASE_ITEM(FP64); default: - LOG(FATAL) << "Unknown var type: " << static_cast(type); + std::stringstream ss; + ss << "Unknown var type: " << static_cast(type); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return VarDescAPI::Type(); } #undef GET_DATA_TYPE_CASE_ITEM @@ -225,9 +233,10 @@ void VarDesc::SetLoDLevel(int32_t lod_level) { desc_->mutable_type()->mutable_tensor_array()->set_lod_level(lod_level); break; default: - LOG(FATAL) - << "Setting 'lod_level' is not supported by the type of var %s." - << this->Name(); + std::stringstream ss; + ss << "Setting 'lod_level' is not supported by the type of var %s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -249,9 +258,10 @@ void VarDesc::SetLoDLevels(const std::vector &multiple_lod_level) { } } break; default: - LOG(FATAL) - << "Setting 'lod_levels' is not supported by the type of var %s." - << this->Name(); + std::stringstream ss; + ss << "Setting 'lod_levels' is not supported by the type of var %s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -262,9 +272,10 @@ int32_t VarDesc::GetLoDLevel() const { case framework_proto::VarType::LOD_TENSOR_ARRAY: return desc_->type().tensor_array().lod_level(); default: - LOG(FATAL) - << "Getting 'lod_level' is not supported by the type of var %s." - << this->Name(); + std::stringstream ss; + ss << "Getting 'lod_level' is not supported by the type of var %s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return 0; } @@ -280,9 +291,10 @@ std::vector VarDesc::GetLoDLevels() const { return res; break; default: - LOG(FATAL) - << "Getting 'lod_levels' is not supported by the type of var %s." - << this->Name(); + std::stringstream ss; + ss << "Getting 'lod_levels' is not supported by the type of var %s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return std::vector(); } @@ -298,9 +310,10 @@ const framework_proto::VarType::TensorDesc &VarDesc::tensor_desc() const { case framework_proto::VarType::LOD_TENSOR_ARRAY: return desc_->type().tensor_array().tensor(); default: - LOG(FATAL) - << "Getting 'tensor_desc' is not supported by the type of var %s." - << this->Name(); + std::stringstream ss; + ss << "Getting 'tensor_desc' is not supported by the type of var %s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return framework_proto::VarDesc().type().lod_tensor().tensor(); } @@ -317,10 +330,11 @@ std::vector VarDesc::tensor_descs() } return res; default: - LOG(FATAL) - << "Getting 'tensor_descs' is not supported by the type of var " - "%s." - << this->Name(); + std::stringstream ss; + ss << "Getting 'tensor_descs' is not supported by the type of var " + "%s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return std::vector(); } @@ -336,10 +350,12 @@ framework_proto::VarType::TensorDesc *VarDesc::mutable_tensor_desc() { case framework_proto::VarType::LOD_TENSOR_ARRAY: return desc_->mutable_type()->mutable_tensor_array()->mutable_tensor(); default: - LOG(FATAL) << "Getting 'mutable_tensor_desc' is not supported by the " - "type of var " - "%s." - << this->Name(); + std::stringstream ss; + ss << "Getting 'mutable_tensor_desc' is not supported by the " + "type of var " + "%s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return nullptr; } @@ -358,10 +374,11 @@ VarDesc::mutable_tensor_descs() { } return res; default: - LOG(FATAL) - << "Getting 'tensor_descs' is not supported by the type of var " - "%s." - << this->Name(); + std::stringstream ss; + ss << "Getting 'tensor_descs' is not supported by the type of var " + "%s." + << this->Name(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return std::vector(); } diff --git a/paddle/cinn/frontend/paddle_model_convertor_test.cc b/paddle/cinn/frontend/paddle_model_convertor_test.cc index 30364c05e417e7..5e69cdef80cc2d 100644 --- a/paddle/cinn/frontend/paddle_model_convertor_test.cc +++ b/paddle/cinn/frontend/paddle_model_convertor_test.cc @@ -84,7 +84,8 @@ void RunProgram(const Target& target, Program* prog) { } else if (inputs[i]->type.is_bool()) { RandomInput(target, tensor, 0, inputs[i]->shape[0]); } else { - LOG(FATAL) << "Only support float/int/bool! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Only support float/int/bool! Please check.")); } } diff --git a/paddle/cinn/frontend/paddle_model_to_program.cc b/paddle/cinn/frontend/paddle_model_to_program.cc index 52c91216dd9011..7249c35f19d26c 100644 --- a/paddle/cinn/frontend/paddle_model_to_program.cc +++ b/paddle/cinn/frontend/paddle_model_to_program.cc @@ -104,7 +104,8 @@ void PaddleModelToProgram::AddOpMapper_scale() { if (op_desc.HasAttr("bias")) { // the old model format bias = op_desc.GetAttr("bias"); } else { - LOG(FATAL) << "Didn't find [bias] attr in Scale operator!!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Didn't find [bias] attr in Scale operator!!")); } absl::flat_hash_map attrs; auto out = net_builder_->Scale(x, scale, bias); @@ -243,7 +244,9 @@ void PaddleModelToProgram::AddOpMapper_fill_constant() { DO(INT32, int); #undef DO default: - LOG(FATAL) << "unknown data type " << dtype; + std::stringstream ss; + ss << "unknown data type " << dtype; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } AddVar(TransValidVarName(out_name), out); var_model_to_program_map_[out_name] = out->id; @@ -622,7 +625,9 @@ void PaddleModelToProgram::AddOp(const paddle::cpp::OpDesc& op_desc) { return; } // feed op's output is a input of the model - LOG(FATAL) << "Not supported op [" << op_desc.Type() << "] found"; + std::stringstream ss; + ss << "Not supported op [" << op_desc.Type() << "] found"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } void PaddleModelToProgram::TransposeVar(const std::string& name) { @@ -658,7 +663,8 @@ void PaddleModelToProgram::TransposeVar(const std::string& name) { cudaMemcpyHostToDevice)); #endif #else - LOG(FATAL) << "To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal( + "To use CUDA backends, you need to set WITH_CUDA ON!")); #endif } else { CINN_NOT_IMPLEMENTED @@ -674,7 +680,9 @@ void PaddleModelToProgram::TransposeVar(const std::string& name) { var->type = Float(32); AddVar(name, var, true); } else { - LOG(FATAL) << "No var called [" << name << "] exists"; + std::stringstream ss; + ss << "No var called [" << name << "] exists"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -707,13 +715,16 @@ void PaddleModelToProgram::ReverseHWVar(const std::string& name) { tensor->shape().numel() * sizeof(float), cudaMemcpyHostToDevice)); #else - LOG(FATAL) << "To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal( + "To use CUDA backends, you need to set WITH_CUDA ON!")); #endif } else { CINN_NOT_IMPLEMENTED } } else { - LOG(FATAL) << "No var called [" << name << "] exists"; + std::stringstream ss; + ss << "No var called [" << name << "] exists"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -736,7 +747,9 @@ Variable PaddleModelToProgram::GetVar(const std::string& name) { return var; } - LOG(FATAL) << "No var called [" << name << "] exists"; + std::stringstream ss; + ss << "No var called [" << name << "] exists"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return Variable(); } diff --git a/paddle/cinn/frontend/pass/fill_constant_rewriter.cc b/paddle/cinn/frontend/pass/fill_constant_rewriter.cc index 81b331042096e8..f1a8a9db01e297 100644 --- a/paddle/cinn/frontend/pass/fill_constant_rewriter.cc +++ b/paddle/cinn/frontend/pass/fill_constant_rewriter.cc @@ -37,7 +37,8 @@ namespace pass { else if (absl::holds_alternative(OLD_VALUE)) \ NEW_VALUE = FUNC(absl::get(OLD_VALUE)); \ else \ - LOG(FATAL) << "fill_constant Only support float32/float64/int32/int64"; + PADDLE_THROW(phi::errors::InvalidArgument( \ + "fill_constant Only support float32/float64/int32/int64")); #define MATH_FUNC_REWRITER(op_name) \ { \ diff --git a/paddle/cinn/frontend/pass/transpose_folding_input.cc b/paddle/cinn/frontend/pass/transpose_folding_input.cc index 3c50ce3f2d6c93..1353848ff8985d 100644 --- a/paddle/cinn/frontend/pass/transpose_folding_input.cc +++ b/paddle/cinn/frontend/pass/transpose_folding_input.cc @@ -111,7 +111,8 @@ class TransposeFoldingInputPass : public TransposeFoldingBase { : false; dot->SetAttr("trans_b", static_cast(trans_b ^ true)); } else { - LOG(FATAL) << "The matmul should only have two inputs."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The matmul should only have two inputs.")); } // shape has changed, the ignore op should update shape diff --git a/paddle/cinn/frontend/var_type_utils.h b/paddle/cinn/frontend/var_type_utils.h index 85a70ee4f53a99..fa539b1085f866 100644 --- a/paddle/cinn/frontend/var_type_utils.h +++ b/paddle/cinn/frontend/var_type_utils.h @@ -83,9 +83,10 @@ inline cinn::common::Type CppVarType2CommonType( // so here need convert back to unkown type. SET_TYPE_CASE_ITEM(RAW, Type) default: - LOG(FATAL) << "Unknown VarDesc type: " - << var_type_names_[static_cast(type)] << "(" - << static_cast(type) << ")"; + std::stringstream ss; + ss << "Unknown VarDesc type: " << var_type_names_[static_cast(type)] + << "(" << static_cast(type) << ")"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } #undef SET_DATA_TYPE_CASE_ITEM return cinn::common::Type(); diff --git a/paddle/cinn/hlir/dialect/operator/transforms/group_merge/convert_static_dim_to_dynamic_pass.cc b/paddle/cinn/hlir/dialect/operator/transforms/group_merge/convert_static_dim_to_dynamic_pass.cc index aa71ebb2954588..e67cb5aacabfa8 100644 --- a/paddle/cinn/hlir/dialect/operator/transforms/group_merge/convert_static_dim_to_dynamic_pass.cc +++ b/paddle/cinn/hlir/dialect/operator/transforms/group_merge/convert_static_dim_to_dynamic_pass.cc @@ -18,6 +18,7 @@ #include "paddle/cinn/hlir/dialect/operator/ir/op_dialect.h" #include "paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h" #include "paddle/cinn/runtime/flags.h" +#include "paddle/common/enforce.h" #include "paddle/common/flags.h" #include "paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h" #include "paddle/pir/include/core/builtin_type.h" @@ -379,7 +380,7 @@ struct StaticDimToDynamicConverter { symbol::TensorShapeOrDataDimExprs(old)}; } } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } template diff --git a/paddle/cinn/hlir/dialect/operator/transforms/group_merge/group_with_group_merge_pass.cc b/paddle/cinn/hlir/dialect/operator/transforms/group_merge/group_with_group_merge_pass.cc index 5c3e9a9670ced6..d08219e13d862d 100644 --- a/paddle/cinn/hlir/dialect/operator/transforms/group_merge/group_with_group_merge_pass.cc +++ b/paddle/cinn/hlir/dialect/operator/transforms/group_merge/group_with_group_merge_pass.cc @@ -30,6 +30,7 @@ #include "paddle/common/flags.h" #include "paddle/cinn/common/is_reachable_predicator.h" +#include "paddle/common/enforce.h" PD_DECLARE_bool(enhance_vertical_fusion_with_recompute); @@ -1296,7 +1297,7 @@ class GeneralFusionMergePassHelper { } } if (is_ring) { - LOG(FATAL) << "Exists Ring, Please Check!"; + PADDLE_THROW(phi::errors::Fatal("Exists Ring, Please Check!")); } } } diff --git a/paddle/cinn/hlir/dialect/operator/transforms/split_generate_shape_into_shape_ops_pass.cc b/paddle/cinn/hlir/dialect/operator/transforms/split_generate_shape_into_shape_ops_pass.cc index 6a1fb2b7cb2e36..19e7f5060eb961 100644 --- a/paddle/cinn/hlir/dialect/operator/transforms/split_generate_shape_into_shape_ops_pass.cc +++ b/paddle/cinn/hlir/dialect/operator/transforms/split_generate_shape_into_shape_ops_pass.cc @@ -19,6 +19,7 @@ #include "paddle/cinn/hlir/dialect/operator/ir/manual_op.h" #include "paddle/cinn/hlir/framework/pir/utils.h" #include "paddle/common/ddim.h" +#include "paddle/common/enforce.h" #include "paddle/fluid/pir/dialect/operator/ir/manual_op.h" #include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" #include "paddle/fluid/pir/dialect/operator/ir/op_type.h" @@ -128,14 +129,16 @@ struct CachedDimExprToValueConverter { pir::Value ConvertToValueImpl( const symbol::Negative& dim_expr) { - LOG(FATAL) << "Dead code. This logical should handled by " - "ConvertToValueImpl(symbol::Add)"; + PADDLE_THROW( + phi::errors::Fatal("Dead code. This logical should handled by " + "ConvertToValueImpl(symbol::Add)")); } pir::Value ConvertToValueImpl( const symbol::Reciprocal& dim_expr) { - LOG(FATAL) << "Dead code. This logical should handled by " - "ConvertToValueImpl(symbol::Mul)"; + PADDLE_THROW( + phi::errors::Fatal("Dead code. This logical should handled by " + "ConvertToValueImpl(symbol::Mul)")); } pir::Value ConvertToValueImpl(const symbol::Add& dim_expr) { diff --git a/paddle/cinn/hlir/framework/graph_compiler.cc b/paddle/cinn/hlir/framework/graph_compiler.cc index 4ed9ff14d217b5..1cbe88f9d98c54 100644 --- a/paddle/cinn/hlir/framework/graph_compiler.cc +++ b/paddle/cinn/hlir/framework/graph_compiler.cc @@ -422,7 +422,8 @@ std::vector GetFuncFromImpl( } else if (funcs.size() == expr_pack.size()) { funcs_after_schedule = funcs; } else { - LOG(FATAL) << "The number of funcs should not less than expr_pack's"; + PADDLE_THROW(phi::errors::InvalidArgument( + "The number of funcs should not less than expr_pack's")); } CHECK_EQ(funcs_after_schedule.size(), expr_pack.size()); std::vector res; diff --git a/paddle/cinn/hlir/framework/instruction_test.cc b/paddle/cinn/hlir/framework/instruction_test.cc index f665c628b5a0a6..e7952a4ca160c2 100644 --- a/paddle/cinn/hlir/framework/instruction_test.cc +++ b/paddle/cinn/hlir/framework/instruction_test.cc @@ -267,7 +267,7 @@ class TestInstruction : public Instruction { args_[18], stream_); } else { - LOG(FATAL) << "Unkown Conv Type!"; + PADDLE_THROW(phi::errors::InvalidArgument("Unkown Conv Type!")); } CUDA_CALL(cudaStreamSynchronize(stream_)); } diff --git a/paddle/cinn/hlir/framework/op_lowering_impl.cc b/paddle/cinn/hlir/framework/op_lowering_impl.cc index a9bb46c8a4f26e..b11ae5cdf89d4b 100644 --- a/paddle/cinn/hlir/framework/op_lowering_impl.cc +++ b/paddle/cinn/hlir/framework/op_lowering_impl.cc @@ -74,7 +74,8 @@ std::vector OpLowererImpl::Lower(const GroupPtr& group, apply_pass, &OpLowererImpl::ReduceScheduleDetermineFunction); case framework::kOutFusible: - LOG(FATAL) << "Group Pattern Kind kOutFusible Is Not Implemented!"; + PADDLE_THROW(phi::errors::Unimplemented( + "Group Pattern Kind kOutFusible Is Not Implemented!")); case framework::kNonFusible: return LowerGroup(group, apply_op_schedule, @@ -82,7 +83,8 @@ std::vector OpLowererImpl::Lower(const GroupPtr& group, apply_pass, &OpLowererImpl::NonFusibleScheduleDetermineFunction); default: - LOG(FATAL) << "Group Pattern Kind Is Unknown!"; + PADDLE_THROW( + phi::errors::InvalidArgument("Group Pattern Kind Is Unknown!")); } } diff --git a/paddle/cinn/hlir/framework/op_lowering_util.cc b/paddle/cinn/hlir/framework/op_lowering_util.cc index ed9e29d7ac8d6d..aa3935d1671975 100644 --- a/paddle/cinn/hlir/framework/op_lowering_util.cc +++ b/paddle/cinn/hlir/framework/op_lowering_util.cc @@ -86,7 +86,9 @@ ir::Tensor GetTensor( return lang::Placeholder(node_data->id(), shape_dict.at(node_data->id())); } else { - LOG(FATAL) << "Unsupport dtype: " << dtype; + std::stringstream ss; + ss << "Unsupport dtype: " << dtype; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -739,8 +741,8 @@ void LoopAssignReduceWithLast(ir::IRSchedule& ir_sch, // NOLINT } lane *= inshape[axes[index]]; if (index == 0 && lane <= max_num_threads) { - LOG(FATAL) - << "Error! lane is less equal than max_num_threads, Please check!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Error! lane is less equal than max_num_threads, Please check!")); } if (lane >= max_num_threads / 2) { if (lane <= max_num_threads) { @@ -1181,7 +1183,7 @@ void LoopAssignReduce( // copy loop info form rloops. copy_loop_info(nloops, rloops); } else { - LOG(FATAL) << "Error! Unkown Reduce Type!"; + PADDLE_THROW(phi::errors::InvalidArgument("Error! Unkown Reduce Type!")); } } } @@ -1398,7 +1400,8 @@ void MergeReduceToReduce( n_loops.size() - 1); } } else { - LOG(FATAL) << "not support this type fusion!"; + PADDLE_THROW( + phi::errors::InvalidArgument("not support this type fusion!")); } } } else { @@ -1502,7 +1505,8 @@ void MergeReduceToReduce( ir_sch.SimpleComputeAt(block, loops.back()); } } else { - LOG(FATAL) << "Error! Unkown Reduce Type, Please Check!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Error! Unkown Reduce Type, Please Check!")); } } } diff --git a/paddle/cinn/hlir/framework/pir/op_lowering_impl.cc b/paddle/cinn/hlir/framework/pir/op_lowering_impl.cc index 57105cbde87d98..d7f0ca6fdb7f9d 100644 --- a/paddle/cinn/hlir/framework/pir/op_lowering_impl.cc +++ b/paddle/cinn/hlir/framework/pir/op_lowering_impl.cc @@ -267,14 +267,16 @@ std::vector OpLowererImpl::Lower(const GroupPtr& group, apply_group_schedule, &OpLowererImpl::ReduceScheduleDetermineFunction); case framework::kOutFusible: - LOG(FATAL) << "Group Pattern Kind kOutFusible Is Not Implemented!"; + PADDLE_THROW(phi::errors::Unimplemented( + "Group Pattern Kind kOutFusible Is Not Implemented!")); case framework::kNonFusible: return LowerGroup(group, apply_op_schedule, apply_group_schedule, &OpLowererImpl::NonFusibleScheduleDetermineFunction); default: - LOG(FATAL) << "Group Pattern Kind Is Unknown!"; + PADDLE_THROW( + phi::errors::InvalidArgument("Group Pattern Kind Is Unknown!")); } } BucketLoweredFuncsWrapper OpLowererImpl::BucketLower(const GroupPtr& group, diff --git a/paddle/cinn/hlir/framework/pir/op_lowering_util.cc b/paddle/cinn/hlir/framework/pir/op_lowering_util.cc index d493f0a99b67d9..59f749a7999dfd 100644 --- a/paddle/cinn/hlir/framework/pir/op_lowering_util.cc +++ b/paddle/cinn/hlir/framework/pir/op_lowering_util.cc @@ -601,8 +601,8 @@ void LoopAssignReduceWithLast(ir::IRSchedule& ir_sch, // NOLINT } lane *= inshape[axes[index]]; if (index == 0 && lane <= max_num_threads) { - LOG(FATAL) - << "Error! lane is less equal than max_num_threads, Please check!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Error! lane is less equal than max_num_threads, Please check!")); } if (lane >= max_num_threads / 2) { if (lane <= max_num_threads) { @@ -1008,7 +1008,8 @@ void MergeReduceToReduce( n_loops.size() - 1); } } else { - LOG(FATAL) << "not support this type fusion!"; + PADDLE_THROW( + phi::errors::InvalidArgument("not support this type fusion!")); } } } else { @@ -1112,7 +1113,8 @@ void MergeReduceToReduce( ir_sch.SimpleComputeAt(block, loops.back()); } } else { - LOG(FATAL) << "Error! Unkown Reduce Type, Please Check!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Error! Unkown Reduce Type, Please Check!")); } } } @@ -1506,7 +1508,7 @@ void LoopAssignReduce( // copy loop info form rloops. copy_loop_info(nloops, rloops); } else { - LOG(FATAL) << "Error! Unkown Reduce Type!"; + PADDLE_THROW(phi::errors::InvalidArgument("Error! Unkown Reduce Type!")); } } } diff --git a/paddle/cinn/hlir/framework/pir/utils.cc b/paddle/cinn/hlir/framework/pir/utils.cc index 66e654a5369aff..8ee9350d773f17 100644 --- a/paddle/cinn/hlir/framework/pir/utils.cc +++ b/paddle/cinn/hlir/framework/pir/utils.cc @@ -23,6 +23,7 @@ #include "paddle/cinn/hlir/dialect/operator/ir/op_dialect.h" #include "paddle/cinn/hlir/framework/op.h" #include "paddle/cinn/hlir/framework/pir/op_mapper.h" +#include "paddle/common/enforce.h" #include "paddle/common/flags.h" #include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" #include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" @@ -471,15 +472,17 @@ static utils::Attribute ConvertArrayAttribute( element.dyn_cast<::pir::StrAttribute>().AsString()); } } else { - LOG(FATAL) - << "only support bool/int32/int64/float/double/string attribute in " - "ArrayAttribute"; + PADDLE_THROW(phi::errors::InvalidArgument( + "only support bool/int32/int64/float/double/string attribute in " + "ArrayAttribute")); } } } else if (src_attr.isa<::pir::shape::SymbolAttribute>()) { // do nothing for now } else { - LOG(FATAL) << "unknown Attribute: " << src_attr; + std::stringstream ss; + ss << "unknown Attribute: " << src_attr; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return dst_attr; } @@ -548,7 +551,9 @@ cinn::common::Type CompatibleInfo::ConvertIRType(::pir::Type type) { CASE_TYPE(IndexType, I32) CASE_TYPE(BoolType, UI1) - LOG(FATAL) << "unknown ir::Type " << type; + std::stringstream ss; + ss << "unknown ir::Type " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } #undef CASE_TYPE diff --git a/paddle/cinn/hlir/op/broadcast.cc b/paddle/cinn/hlir/op/broadcast.cc index 120113c4a159dc..28cc2da723af58 100644 --- a/paddle/cinn/hlir/op/broadcast.cc +++ b/paddle/cinn/hlir/op/broadcast.cc @@ -431,8 +431,9 @@ std::shared_ptr StrategyForBroadcastGrad( const std::vector &out_type, const std::vector> &output_shapes, const Target &target) { - LOG(FATAL) << "Gradient operator will be decomposed into several primitive " - "operators. Please Use Decomposer Program Pass."; + PADDLE_THROW(phi::errors::Fatal( + "Gradient operator will be decomposed into several primitive " + "operators. Please Use Decomposer Program Pass.")); } std::shared_ptr StrategyForIsClose( diff --git a/paddle/cinn/hlir/op/contrib/argmax.cc b/paddle/cinn/hlir/op/contrib/argmax.cc index 7de32179b52a0b..b3c6a647c4bc3f 100644 --- a/paddle/cinn/hlir/op/contrib/argmax.cc +++ b/paddle/cinn/hlir/op/contrib/argmax.cc @@ -106,7 +106,7 @@ std::shared_ptr StrategyForArgmax( if (attrs.attr_store.count("axis")) { axis = absl::get(attrs.attr_store.at("axis")); } else { - LOG(FATAL) << "reduce dimension is not set!"; + PADDLE_THROW(phi::errors::Fatal("reduce dimension is not set!")); } if (attrs.attr_store.count("keep_dim")) { keep_dims = absl::get(attrs.attr_store.at("keep_dim")); diff --git a/paddle/cinn/hlir/op/contrib/argmin.cc b/paddle/cinn/hlir/op/contrib/argmin.cc index 8f9d2ec9f45fd3..dff137f0d99526 100644 --- a/paddle/cinn/hlir/op/contrib/argmin.cc +++ b/paddle/cinn/hlir/op/contrib/argmin.cc @@ -105,7 +105,7 @@ std::shared_ptr StrategyForArgmin( if (attrs.attr_store.count("axis")) { axis = absl::get(attrs.attr_store.at("axis")); } else { - LOG(FATAL) << "reduce dimension is not set!"; + PADDLE_THROW(phi::errors::Fatal("reduce dimension is not set!")); } if (attrs.attr_store.count("keep_dim")) { keep_dims = absl::get(attrs.attr_store.at("keep_dim")); diff --git a/paddle/cinn/hlir/op/contrib/bitcast_convert.cc b/paddle/cinn/hlir/op/contrib/bitcast_convert.cc index dc8516b160bd24..4ddcb52f44922f 100644 --- a/paddle/cinn/hlir/op/contrib/bitcast_convert.cc +++ b/paddle/cinn/hlir/op/contrib/bitcast_convert.cc @@ -111,9 +111,10 @@ std::vector InferShapeForBitcastConvert( } else { if (output_shape.back().back() != (output_data_type.bits() / input_data_type.bits())) { - LOG(FATAL) << "The rightmost dimension of input must be equal to " - "sizeof(output_data_type)/sizeof(input_data_type) when " - "sizeof(output_data_type) > sizeof(input_data_type)"; + PADDLE_THROW(phi::errors::InvalidArgument( + "The rightmost dimension of input must be equal to " + "sizeof(output_data_type)/sizeof(input_data_type) when " + "sizeof(output_data_type) > sizeof(input_data_type)")); } output_shape.back().pop_back(); } diff --git a/paddle/cinn/hlir/op/contrib/resize.cc b/paddle/cinn/hlir/op/contrib/resize.cc index d74f4647878b00..91319ef7e5ac18 100644 --- a/paddle/cinn/hlir/op/contrib/resize.cc +++ b/paddle/cinn/hlir/op/contrib/resize.cc @@ -61,7 +61,8 @@ ir::Tensor Resize(const ir::Tensor &input, } else if (target.arch == cinn::common::Target::Arch::X86) { func_name.assign("cinn_host_resize_"); } else { - LOG(FATAL) << "Resize only supports X86 and NVGPU ! Please Check.\n"; + PADDLE_THROW(phi::errors::Fatal( + "Resize only supports X86 and NVGPU ! Please Check.\n")); } if (mode == "bilinear") { diff --git a/paddle/cinn/hlir/op/contrib/sort.cc b/paddle/cinn/hlir/op/contrib/sort.cc index 8adc618e352e61..49f50a13ab6c93 100644 --- a/paddle/cinn/hlir/op/contrib/sort.cc +++ b/paddle/cinn/hlir/op/contrib/sort.cc @@ -56,7 +56,8 @@ std::vector ArgSort(const ir::Tensor &A, } else if (target.arch == cinn::common::Target::Arch::X86) { find_func_name.assign("cinn_host_next_smallest_int32"); } else { - LOG(FATAL) << "ArgSort only supports X86 and NVGPU ! Please Check.\n"; + PADDLE_THROW(phi::errors::Fatal( + "ArgSort only supports X86 and NVGPU ! Please Check.\n")); } if (is_ascend) { index_func_name = diff --git a/paddle/cinn/hlir/op/custom_call.cc b/paddle/cinn/hlir/op/custom_call.cc index 91c3ee6db08986..9da4079b38fdfe 100644 --- a/paddle/cinn/hlir/op/custom_call.cc +++ b/paddle/cinn/hlir/op/custom_call.cc @@ -238,7 +238,7 @@ std::vector CustomCallArgsForCublas( << "The K dimension of mul shold be equal! Please check."; } } else { - LOG(FATAL) << "Unkown Matmul Setting!"; + PADDLE_THROW(phi::errors::InvalidArgument("Unkown Matmul Setting!")); } CHECK_EQ(a_shape.size(), 4); @@ -372,7 +372,7 @@ std::vector CustomCallArgsForBatchedCublas( << "The K dimension of mul shold be equal! Please check."; } } else { - LOG(FATAL) << "Unkown Matmul Setting!"; + PADDLE_THROW(phi::errors::InvalidArgument("Unkown Matmul Setting!")); } CHECK_EQ(a_shape.size(), 4); @@ -878,10 +878,12 @@ std::vector CustomCallArgsForMemset( void operator()(int64_t v) { *scalar_ = static_cast(v); } void operator()(bool v) { *scalar_ = v ? 0xFFFFFFFF : 0; } -#define EXPAND_MEMSET_TYPE_UNSUPPORT(TYPE) \ - void operator()(const TYPE &) { \ - LOG(FATAL) << "The type of \"value\" of memset custom_call not support: " \ - << #TYPE; \ +#define EXPAND_MEMSET_TYPE_UNSUPPORT(TYPE) \ + void operator()(const TYPE &) { \ + std::stringstream ss; \ + ss << "The type of \"value\" of memset custom_call not support: " \ + << #TYPE; \ + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); \ } EXPAND_MEMSET_TYPE_UNSUPPORT(std::string) diff --git a/paddle/cinn/hlir/op/elementwise.cc b/paddle/cinn/hlir/op/elementwise.cc index 5c9b61fb5230d8..9e6503cfbba4dc 100644 --- a/paddle/cinn/hlir/op/elementwise.cc +++ b/paddle/cinn/hlir/op/elementwise.cc @@ -337,22 +337,27 @@ Expr GetScalarExpr(const framework::NodeAttr::attr_t &attr) { void operator()(bool v) { scalar_ = Expr(v); } void operator()(const std::string &v) { scalar_ = Expr(v); } void operator()(const std::vector &) { - LOG(FATAL) << "wrong type std::vector"; + PADDLE_THROW(phi::errors::InvalidArgument("wrong type std::vector")); } void operator()(const std::vector &) { - LOG(FATAL) << "wrong type std::vector"; + PADDLE_THROW( + phi::errors::InvalidArgument("wrong type std::vector")); } void operator()(const std::vector &) { - LOG(FATAL) << "wrong type std::vector"; + PADDLE_THROW( + phi::errors::InvalidArgument("wrong type std::vector")); } void operator()(const std::vector &) { - LOG(FATAL) << "wrong type std::vector"; + PADDLE_THROW( + phi::errors::InvalidArgument("wrong type std::vector")); } void operator()(const std::vector &) { - LOG(FATAL) << "wrong type std::vector"; + PADDLE_THROW( + phi::errors::InvalidArgument("wrong type std::vector")); } void operator()(const std::vector &) { - LOG(FATAL) << "wrong type std::vector"; + PADDLE_THROW( + phi::errors::InvalidArgument("wrong type std::vector")); } }; absl::visit(Visitor{scalar}, attr); @@ -436,8 +441,9 @@ std::shared_ptr StrategyForSum( const std::vector &out_type, const std::vector> &output_shapes, const Target &target) { - LOG(FATAL) << "The operator will be decomposed into several primitive " - "operators. Please Use Decomposer Program Pass."; + PADDLE_THROW(phi::errors::Fatal( + "The operator will be decomposed into several primitive " + "operators. Please Use Decomposer Program Pass.")); } std::vector InferShapeForSum(const std::vector &inputs_shape, @@ -446,10 +452,11 @@ std::vector InferShapeForSum(const std::vector &inputs_shape, auto shape = inputs_shape[0]; for (size_t i = 1; i < inputs_shape.size(); ++i) { if (inputs_shape[i] != shape) { - LOG(FATAL) << "The input shapes must be the same. But received: the i-th(" - << i << ") input shape is " - << utils::Join(inputs_shape[i], ",") - << " and the first input shape is " << utils::Join(shape, ","); + std::stringstream ss; + ss << "The input shapes must be the same. But received: the i-th(" << i + << ") input shape is " << utils::Join(inputs_shape[i], ",") + << " and the first input shape is " << utils::Join(shape, ","); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } std::vector out_shape{shape}; @@ -463,9 +470,11 @@ std::vector InferDtypeForSum(const std::vector &inputs_type, auto type = inputs_type[0]; for (size_t i = 1; i < inputs_type.size(); ++i) { if (inputs_type[i] != type) { - LOG(FATAL) << "The input types must be the same. But received: the i-th(" - << i << ") input type is " << inputs_type[i] - << " and the first input type is " << type; + std::stringstream ss; + ss << "The input types must be the same. But received: the i-th(" << i + << ") input type is " << inputs_type[i] + << " and the first input type is " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } std::vector res{type}; @@ -656,7 +665,9 @@ std::shared_ptr StrategyForAssignValue( } EXPAND_ATTR_TYPE(EXPAND_VALUE_TO_TENSOR) else { // NOLINT - LOG(FATAL) << "Assign value not support the type " << out_type[0]; + std::stringstream ss; + ss << "Assign value not support the type " << out_type[0]; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } #undef EXPAND_VALUE_TO_TENSOR @@ -697,7 +708,8 @@ std::vector InferShapeForAssignValue( } EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_SHAPE) else { // NOLINT - LOG(FATAL) << "assign_value not support the type!"; + PADDLE_THROW( + phi::errors::InvalidArgument("assign_value not support the type!")); } #undef EXPAND_ATTR_TO_GET_SHAPE @@ -738,7 +750,8 @@ std::vector InferDtypeForAssignValue( } EXPAND_ATTR_TYPE(EXPAND_ATTR_TO_GET_DTYPE) else { // NOLINT - LOG(FATAL) << "assign_value not support the type!"; + PADDLE_THROW( + phi::errors::InvalidArgument("assign_value not support the type!")); } #undef EXPAND_ATTR_TO_GET_DTYPE } @@ -1085,9 +1098,12 @@ std::vector> InferShapeForReshape( } else if (output_shape[i] == -1 && flag_index == -1) { flag_index = i; } else if (output_shape[i] == -1) { - LOG(FATAL) << "More than one -1 in output_shape of op reshape."; + PADDLE_THROW(phi::errors::InvalidArgument( + "More than one -1 in output_shape of op reshape.")); } else { - LOG(FATAL) << "Unsupported output_shape " << output_shape[i]; + std::stringstream ss; + ss << "Unsupported output_shape " << output_shape[i]; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } if (flag_index >= 0) output_shape[flag_index] = tensor_size; diff --git a/paddle/cinn/hlir/op/nn.cc b/paddle/cinn/hlir/op/nn.cc index 60cbc1c89e2222..2b1ce342e08105 100644 --- a/paddle/cinn/hlir/op/nn.cc +++ b/paddle/cinn/hlir/op/nn.cc @@ -305,7 +305,8 @@ std::shared_ptr StrategyForConv2d( dilation[1], tensor_name); } else { - LOG(FATAL) << "Only support NCHW and NHWC data layout\n"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Only support NCHW and NHWC data layout\n")); } auto stages = CreateStages({A.as_tensor_ref(), B.as_tensor_ref()}); @@ -368,7 +369,9 @@ std::shared_ptr StrategyForConv2d( } else if (target.arch == Target::Arch::X86) { CINN_NOT_IMPLEMENTED } - LOG(FATAL) << "This target [" << target << "] is not supported yet."; + std::stringstream ss; + ss << "This target [" << target << "] is not supported yet."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); }); auto strategy = std::make_shared(); @@ -713,8 +716,8 @@ std::shared_ptr StrategyForConv2dNCHWc( strategy->AddImpl( conv2d_compute, conv2d_schedule, "strategy.conv2d_NCHWc.x86", 1); } else { - LOG(FATAL) - << "conv2d_NCHWc op with dtype != float32 is not implemented yet!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "conv2d_NCHWc op with dtype != float32 is not implemented yet!")); } return strategy; } @@ -894,7 +897,8 @@ std::shared_ptr StrategyForDepthwiseConv2d( stride[1], tensor_name); } else { - LOG(FATAL) << "Only support NCHW and NHWC data layout\n"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Only support NCHW and NHWC data layout\n")); } auto stages = CreateStages({A.as_tensor_ref(), B.as_tensor_ref()}); @@ -1008,7 +1012,8 @@ std::vector InferShapeForDepthwiseConv2d( out_shape_w, inputs_shape[1][1] * inputs_shape[0][3]}}; } else { - LOG(FATAL) << "Only support NCHW and NHWC data layout\n"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Only support NCHW and NHWC data layout\n")); } return res; } @@ -1093,7 +1098,8 @@ std::shared_ptr StrategyForBatchNorm( "strategy.batchnorm.x86", 1); } else { - LOG(FATAL) << "BatchNorm op with dtype != float32 is not implemented yet!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "BatchNorm op with dtype != float32 is not implemented yet!")); } return strategy; } @@ -1303,7 +1309,9 @@ std::vector> InferShapeForPool1d( } else if (data_format == "NWC") { width_axis = 1; } else { - LOG(FATAL) << "unsupported data_format: " << data_format << std::endl; + std::stringstream ss; + ss << "unsupported data_format: " << data_format << std::endl; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } if (ceil_mode) { @@ -1406,8 +1414,8 @@ std::shared_ptr StrategyForPool2d( width_index = 3; data_format = "NCHW"; } else { - LOG(FATAL) - << "Only support 'NCHW' or 'NHWC' or 'AnyLayout' data_format.\n"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Only support 'NCHW' or 'NHWC' or 'AnyLayout' data_format.\n")); } kernel_size = {A_tensor->shape[height_index].as_int32(), A_tensor->shape[width_index].as_int32()}; @@ -2206,7 +2214,8 @@ std::vector InferShapeForBatchNormTrain( if (attrs.find("data_layout") != attrs.end()) { data_layout = absl::get(attrs.at("data_layout")); } else { - LOG(FATAL) << "data_layout is not found, please check!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "data_layout is not found, please check!")); } CHECK_EQ(inputs_shape[0].size(), 4) << "x dimension size is not required!"; @@ -2237,7 +2246,9 @@ std::vector InferShapeForBatchNormTrain( CHECK_EQ(inputs_shape[0][3], inputs_shape[4][0]) << "x and moving_variance dimension size is not equal!"; } else { - LOG(FATAL) << "data_layout " << data_layout << " is not support!"; + std::stringstream ss; + ss << "data_layout " << data_layout << " is not support!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return {inputs_shape[0], @@ -2271,8 +2282,9 @@ std::shared_ptr StrategyForGradOp( const std::vector &out_type, const std::vector> &output_shapes, const Target &target) { - LOG(FATAL) << "Gradient operator will be decomposed into several primitive " - "operators. Please Use Decomposer Program Pass."; + PADDLE_THROW(phi::errors::Fatal( + "Gradient operator will be decomposed into several primitive " + "operators. Please Use Decomposer Program Pass.")); } // batch norm grad @@ -2285,7 +2297,8 @@ std::vector InferShapeForBatchNormGrad( if (attrs.find("data_layout") != attrs.end()) { data_layout = absl::get(attrs.at("data_layout")); } else { - LOG(FATAL) << "data_layout is not found, please check!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "data_layout is not found, please check!")); } CHECK_EQ(inputs_shape[0].size(), 4) << "dy dimension size is not required!"; @@ -2313,7 +2326,9 @@ std::vector InferShapeForBatchNormGrad( CHECK_EQ(inputs_shape[0][3], inputs_shape[4][0]) << "dy and moving_variance dimension size is not equal!"; } else { - LOG(FATAL) << "data_layout " << data_layout << " is not support!"; + std::stringstream ss; + ss << "data_layout " << data_layout << " is not support!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return {inputs_shape[0], inputs_shape[2], inputs_shape[2]}; diff --git a/paddle/cinn/hlir/op/op_util.cc b/paddle/cinn/hlir/op/op_util.cc index cddbbba8cf14a6..b0976f22c38cbd 100644 --- a/paddle/cinn/hlir/op/op_util.cc +++ b/paddle/cinn/hlir/op/op_util.cc @@ -100,8 +100,9 @@ std::string GetExternFuncName(const cinn::common::Target& target, } else if (target.arch == cinn::common::Target::Arch::X86) { func_proto_name.append("host_"); } else { - LOG(FATAL) << func_name - << " only supports X86 and NVGPU! Please Check.\n"; + std::stringstream ss; + ss << func_name << " only supports X86 and NVGPU! Please Check.\n"; + PADDLE_THROW(phi::errors::Fatal(ss.str())); } } func_proto_name.append(func_name); @@ -138,8 +139,10 @@ std::string GetExternFuncName(const cinn::common::Target& target, } else if (type.is_uint(64)) { func_proto_name.append("uint64"); } else { - LOG(FATAL) << "Can not find type: " << type - << " for extern function. Please Check.\n"; + std::stringstream ss; + ss << "Can not find type: " << type + << " for extern function. Please Check.\n"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return func_proto_name; } diff --git a/paddle/cinn/hlir/op/op_util.h b/paddle/cinn/hlir/op/op_util.h index 5c946239c835c5..ee5ec1cad45312 100644 --- a/paddle/cinn/hlir/op/op_util.h +++ b/paddle/cinn/hlir/op/op_util.h @@ -128,7 +128,9 @@ std::vector ToPodVector(const std::vector &args) { shape_v.push_back(static_cast(e.as_double())); } } else { - LOG(FATAL) << "Not support " << type; + std::stringstream ss; + ss << "Not support " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return shape_v; } diff --git a/paddle/cinn/hlir/op/reduction.cc b/paddle/cinn/hlir/op/reduction.cc index a8fda43e0ceb5c..d5a378dc809e66 100644 --- a/paddle/cinn/hlir/op/reduction.cc +++ b/paddle/cinn/hlir/op/reduction.cc @@ -88,7 +88,7 @@ std::shared_ptr StrategyForReduce( CHECK_NE(reduce_axes[idx - 1], reduce_axes[idx]); } } else { - LOG(FATAL) << "reduce dimension is not set!"; + PADDLE_THROW(phi::errors::InvalidArgument("reduce dimension is not set!")); } bool keep_dim = false; @@ -270,7 +270,7 @@ std::shared_ptr StrategyForReduce( CINNValue(ir_sch.GetModule().GetExprs().at(0))}; *ret = CINNValuePack{res}; } else { - LOG(FATAL) << "Unkown Reduce Type!"; + PADDLE_THROW(phi::errors::InvalidArgument("Unkown Reduce Type!")); } } else { if (arg_pack.size() == 2) { @@ -304,7 +304,7 @@ std::shared_ptr StrategyForReduce( CINNValue(ir_sch.GetModule().GetExprs().at(0))}; *ret = CINNValuePack{res}; } else { - LOG(FATAL) << "Unkown Reduce Type!"; + PADDLE_THROW(phi::errors::InvalidArgument("Unkown Reduce Type!")); } } } else { @@ -352,7 +352,7 @@ std::shared_ptr StrategyForReduceSymbolic( CHECK_NE(reduce_axes[idx - 1], reduce_axes[idx]); } } else { - LOG(FATAL) << "reduce dimension is not set!"; + PADDLE_THROW(phi::errors::InvalidArgument("reduce dimension is not set!")); } bool keep_dim = false; diff --git a/paddle/cinn/hlir/op/transform.cc b/paddle/cinn/hlir/op/transform.cc index 729f3c84174233..21754487e7846f 100644 --- a/paddle/cinn/hlir/op/transform.cc +++ b/paddle/cinn/hlir/op/transform.cc @@ -289,9 +289,9 @@ std::vector> InferShapeForSplit( if (attrs.find("num_or_sections") != attrs.end()) { sections = absl::get>(attrs.at("num_or_sections")); } else { - LOG(FATAL) - << "The Split op doesn't find [num_or_sections] attribute! It it " - "a mandatory attribute ! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The Split op doesn't find [num_or_sections] attribute! It it " + "a mandatory attribute ! Please check.")); } if (inputs_shape.empty()) { @@ -340,11 +340,13 @@ std::vector> InferShapeForSplit( neg_index = i; } else { if (sections[i] == 0) { - LOG(FATAL) << "The attribute 'num_or_sections' should not has 0 ! " - "Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The attribute 'num_or_sections' should not has 0 ! " + "Please check.")); } else { - LOG(FATAL) << "The attribute 'num_or_sections' can only have at most " - "one '-1' ! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The attribute 'num_or_sections' can only have at most " + "one '-1' ! Please check.")); } } } @@ -376,9 +378,9 @@ std::vector InferDtypeForSplit(const std::vector &inputs_type, if (attrs.find("num_or_sections") != attrs.end()) { sections = absl::get>(attrs.at("num_or_sections")); } else { - LOG(FATAL) - << "The Split op doesn't find [num_or_sections] attribute! It it " - "a mandatory attribute ! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The Split op doesn't find [num_or_sections] attribute! It it " + "a mandatory attribute ! Please check.")); } int output_size = sections.size(); @@ -402,9 +404,9 @@ std::vector> InferLayoutForSplit( sections = absl::get>(attrs.attr_store.at("num_or_sections")); } else { - LOG(FATAL) - << "The Split op doesn't find [num_or_sections] attribute! It it " - "a mandatory attribute ! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The Split op doesn't find [num_or_sections] attribute! It it " + "a mandatory attribute ! Please check.")); } int output_size = sections.size(); @@ -926,7 +928,8 @@ std::shared_ptr StrategyForReverse( for (auto &e : axis) { if (e >= static_cast(output_shapes[0].size()) || e < -1 * static_cast(output_shapes[0].size())) { - LOG(FATAL) << "axis is not in [0, n_dim), Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "axis is not in [0, n_dim), Please check.")); } if (e < 0) { e += output_shapes[0].size(); @@ -973,7 +976,8 @@ std::vector InferShapeForReverse( for (auto &e : axis) { if (e >= static_cast(inputs_shape[0].size()) || e < -1 * static_cast(inputs_shape[0].size())) { - LOG(FATAL) << "axis is not in [-n_dim, n_dim), Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "axis is not in [-n_dim, n_dim), Please check.")); } if (e < 0) { e += inputs_shape[0].size(); @@ -993,7 +997,8 @@ std::vector> InferLayoutForReverse( for (auto &e : axis) { if (e >= static_cast(input_shapes[0].size()) || e < -1 * static_cast(input_shapes[0].size())) { - LOG(FATAL) << "axis is not in [-n_dim, n_dim), Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "axis is not in [-n_dim, n_dim), Please check.")); } } } @@ -1046,7 +1051,8 @@ std::shared_ptr StrategyForTranspose( << "output shape is not equal! Please check!\n"; } } else { - LOG(FATAL) << "axis is not be set! Please check."; + PADDLE_THROW( + phi::errors::InvalidArgument("axis is not be set! Please check.")); } framework::CINNCompute transpose_compute([=](lang::Args args, @@ -1173,7 +1179,8 @@ std::vector InferShapeForTranspose( } result.push_back(output_shape); } else { - LOG(FATAL) << "axis is not be set! Please check."; + PADDLE_THROW( + phi::errors::InvalidArgument("axis is not be set! Please check.")); } return result; } @@ -1198,7 +1205,8 @@ std::vector> InferLayoutForTranspose( } } } else { - LOG(FATAL) << "axis is not be set! Please check."; + PADDLE_THROW( + phi::errors::InvalidArgument("axis is not be set! Please check.")); } std::vector new_input_layouts = input_layouts; diff --git a/paddle/cinn/hlir/pass/alterlayout.cc b/paddle/cinn/hlir/pass/alterlayout.cc index 438a7e997d3f94..0a4b71e7964142 100644 --- a/paddle/cinn/hlir/pass/alterlayout.cc +++ b/paddle/cinn/hlir/pass/alterlayout.cc @@ -261,9 +261,10 @@ void AlterLayoutPass(Graph* graph) { } else if (input_shape.size() == 5) { ic = input_shape[1] * input_shape[4]; } else { - LOG(FATAL) - << "conv2d's input shape should be 4D/5D. Wrong input shape: " - << utils::Join(input_shape, ", "); + std::stringstream ss; + ss << "conv2d's input shape should be 4D/5D. Wrong input shape: " + << utils::Join(input_shape, ", "); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } if (weight_shape.size() == 4) { @@ -273,9 +274,10 @@ void AlterLayoutPass(Graph* graph) { oc = weight_shape[0] * weight_shape[5]; fc = weight_shape[1] * weight_shape[4]; } else { - LOG(FATAL) - << "conv2d's weight shape should be 4D/6D. Wrong weight shape: " - << utils::Join(weight_shape, ", "); + std::stringstream ss; + ss << "conv2d's weight shape should be 4D/6D. Wrong weight shape: " + << utils::Join(weight_shape, ", "); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } VLOG(3) << "oc: " << oc; VLOG(3) << "ic: " << ic; diff --git a/paddle/cinn/hlir/pass/fusion_merge_pass.cc b/paddle/cinn/hlir/pass/fusion_merge_pass.cc index 472cbd9a07e07e..83b8c8e50299c4 100644 --- a/paddle/cinn/hlir/pass/fusion_merge_pass.cc +++ b/paddle/cinn/hlir/pass/fusion_merge_pass.cc @@ -170,7 +170,7 @@ class FusionMergePassHelper : public FusionHelperBase { } } if (is_ring) { - LOG(FATAL) << "Exists Ring, Please Check!"; + PADDLE_THROW(phi::errors::Fatal("Exists Ring, Please Check!")); } } } diff --git a/paddle/cinn/hlir/pass/general_fusion_merge_pass.cc b/paddle/cinn/hlir/pass/general_fusion_merge_pass.cc index bf0ffd22653629..b9d553019a459c 100644 --- a/paddle/cinn/hlir/pass/general_fusion_merge_pass.cc +++ b/paddle/cinn/hlir/pass/general_fusion_merge_pass.cc @@ -212,7 +212,7 @@ class GeneralFusionMergePassHelper : public FusionHelperBase { } } if (is_ring) { - LOG(FATAL) << "Exists Ring, Please Check!"; + PADDLE_THROW(phi::errors::Fatal("Exists Ring, Please Check!")); } } } diff --git a/paddle/cinn/hlir/pass/opfusion.cc b/paddle/cinn/hlir/pass/opfusion.cc index b4e2eec247f21d..c8690c0625fbba 100644 --- a/paddle/cinn/hlir/pass/opfusion.cc +++ b/paddle/cinn/hlir/pass/opfusion.cc @@ -412,7 +412,8 @@ class GraphPartition { parent->master_node = child->master_node; if (child->pattern > framework::kBroadcast && parent->pattern > framework::kBroadcast) { - LOG(FATAL) << "can't fuse 2 groups both with complex pattern"; + PADDLE_THROW(phi::errors::InvalidArgument( + "can't fuse 2 groups both with complex pattern")); } else { parent->pattern = child->pattern > parent->pattern ? child->pattern : parent->pattern; diff --git a/paddle/cinn/hlir/pe/broadcast.cc b/paddle/cinn/hlir/pe/broadcast.cc index 23485461496691..fb47ed737fdf3b 100644 --- a/paddle/cinn/hlir/pe/broadcast.cc +++ b/paddle/cinn/hlir/pe/broadcast.cc @@ -146,9 +146,11 @@ void GetBroadcastShape(const std::vector& shape1, broadcast_flag1->emplace_back(true); broadcast_flag2->emplace_back(false); } else { - LOG(FATAL) << "Incompatible broadcast dims " << shape1_new[size1 - i] - << " and " << shape2_new[size2 - i] << " in: " << shape1_new - << " and " << shape2_new << std::endl; + std::stringstream ss; + ss << "Incompatible broadcast dims " << shape1_new[size1 - i] << " and " + << shape2_new[size2 - i] << " in: " << shape1_new << " and " + << shape2_new << std::endl; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } } @@ -364,8 +366,10 @@ Tensor BroadcastTo(const Tensor& A, } else if (a_shape_i == out_shape[axes[idx]]) { broadcast_indice.push_back(indice[axes[idx]]); } else { - LOG(FATAL) << "fail to broad cast input shape " << a_shape_i - << " to output shape " << out_shape[axes[idx]]; + std::stringstream ss; + ss << "fail to broad cast input shape " << a_shape_i + << " to output shape " << out_shape[axes[idx]]; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } return A(broadcast_indice); @@ -396,8 +400,10 @@ Tensor BroadcastTo(const Tensor& A, } else if (MathEqual(a_shape_i, out_shape[idx])) { broadcast_indice.push_back(indice[idx]); } else { - LOG(FATAL) << "fail to broad cast input shape " << a_shape_i - << " to output shape " << out_shape[idx]; + std::stringstream ss; + ss << "fail to broad cast input shape " << a_shape_i + << " to output shape " << out_shape[idx]; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } return A(broadcast_indice); diff --git a/paddle/cinn/hlir/pe/ir_schedule_pe.cc b/paddle/cinn/hlir/pe/ir_schedule_pe.cc index 71b52d12493e94..d224a5fd1e1ca1 100644 --- a/paddle/cinn/hlir/pe/ir_schedule_pe.cc +++ b/paddle/cinn/hlir/pe/ir_schedule_pe.cc @@ -784,7 +784,8 @@ void IRCudaScheduleBlockShuffleReduce(ir::IRSchedule &ir_sch, // NOLINT } return loop_var_count; } - LOG(FATAL) << "Can't find var in tensor indexes!"; + PADDLE_THROW( + phi::errors::InvalidArgument("Can't find var in tensor indexes!")); }; auto loop_var_count = get_loop_index(ir_sch.GetLoops(reduce_out->name).back(), ir_sch.GetBlock(reduce_out->name)); diff --git a/paddle/cinn/hlir/pe/map_expr_to_ir.cc b/paddle/cinn/hlir/pe/map_expr_to_ir.cc index 2f1e854672fd4e..e7a2de5150026c 100644 --- a/paddle/cinn/hlir/pe/map_expr_to_ir.cc +++ b/paddle/cinn/hlir/pe/map_expr_to_ir.cc @@ -158,8 +158,9 @@ class MapExprToIrTranslator { DoEach(expr); break; default: - LOG(FATAL) << "Visit node_type = " << expr.node_type() - << ", not supported!"; + std::stringstream ss; + ss << "Visit node_type = " << expr.node_type() << ", not supported!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); break; } } @@ -220,7 +221,7 @@ class MapExprToIrTranslator { } else { return NoInlineTranslator::Call(internal_stmt); } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } std::optional TranslateOpExprImpl( @@ -233,7 +234,8 @@ class MapExprToIrTranslator { std::vector TranslateTensorIndexImpl( const OpCall& op_call, const IterExprs4TensorT& IterExprs4Tensor) const { - LOG(FATAL) << "Dead code, no TensorIndexExpr for OpCall"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Dead code, no TensorIndexExpr for OpCall")); } std::vector TranslateTensorIndexImpl( @@ -381,7 +383,7 @@ class MapExprToIrTranslator { return (this->*make_store_rvalue_expr)( store_rvalue, op_expr_children, IterExprs4Tensor); } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } std::optional TranslateOpCallImpl( @@ -685,13 +687,13 @@ class MapExprToIrTranslator { std::tuple GetForTypeAndInfoImpl(const Vectorize& loop_type, const LoopDescriptor& ld) const { - LOG(FATAL) << "Vectorize not supported yet"; + PADDLE_THROW(phi::errors::InvalidArgument("Vectorize not supported yet")); } std::tuple GetForTypeAndInfoImpl(const Unroll& loop_type, const LoopDescriptor& ld) const { - LOG(FATAL) << "Unroll not supported yet"; + PADDLE_THROW(phi::errors::InvalidArgument("Unroll not supported yet")); } std::tuple GetForTypeAndInfo( @@ -704,7 +706,7 @@ class MapExprToIrTranslator { ir::Expr Accumulate(const std::vector& ir_exprs) const { if (ir_exprs.size() == 0) { - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } else if (ir_exprs.size() == 1) { return ir_exprs.at(0); } else { @@ -714,12 +716,12 @@ class MapExprToIrTranslator { } return ret; } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } ir::Expr Multiply(const std::vector& ir_exprs) const { if (ir_exprs.size() == 0) { - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } else if (ir_exprs.size() == 1) { return ir_exprs.at(0); } else { @@ -729,7 +731,7 @@ class MapExprToIrTranslator { } return ret; } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } ir::Expr GetStride(const List& dims, int start) const { @@ -820,16 +822,16 @@ class MapExprToIrTranslator { } ir::Expr TranslateDimExprImpl(const ::symbol::Max& dim_expr) const { - LOG(FATAL) << "Not Supported yet"; + PADDLE_THROW(phi::errors::Unimplemented("Not supported yet")); } ir::Expr TranslateDimExprImpl(const ::symbol::Min& dim_expr) const { - LOG(FATAL) << "Not Supported yet"; + PADDLE_THROW(phi::errors::Unimplemented("Not supported yet")); } ir::Expr TranslateDimExprImpl( const ::symbol::Broadcast& dim_expr) const { - LOG(FATAL) << "Not Supported yet"; + PADDLE_THROW(phi::errors::Unimplemented("Not supported yet")); } ir::Expr TranslateDimExpr(const Value& value) const { @@ -859,7 +861,9 @@ class MapExprToIrTranslator { } else if (Match(value)) { return TranslateBI(value); } else { - LOG(FATAL) << "Not supported yet! " << ToTxtString(value); + std::stringstream ss; + ss << "Not supported yet! " << ToTxtString(value); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } diff --git a/paddle/cinn/hlir/pe/nn.cc b/paddle/cinn/hlir/pe/nn.cc index 9c10e1ad137c24..9e48b26ae93921 100644 --- a/paddle/cinn/hlir/pe/nn.cc +++ b/paddle/cinn/hlir/pe/nn.cc @@ -54,7 +54,9 @@ std::string Type2StrForNN(cinn::common::Type type) { } else if (type.is_float16()) { return "fp16"; } - LOG(FATAL) << "NN Not Support " << type; + std::stringstream ss; + ss << "NN Not Support " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return ""; } @@ -1397,7 +1399,9 @@ std::vector Pool1d(const Tensor &tensor, } else if (data_format == "NWC") { width_axis = 1; } else { - LOG(FATAL) << "Unsupported data format: " << data_format << std::endl; + std::stringstream ss; + ss << "Unsupported data format: " << data_format << std::endl; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } CHECK_EQ(tensor->shape.size(), 3U) << "pool1d requires tensor's shape_size to be 3\n"; @@ -1459,7 +1463,7 @@ std::vector GlobalPool2d(const Tensor &tensor, UniqName(output_name)); return {ret, temp}; } else { - LOG(FATAL) << "unsupported pooling type."; + PADDLE_THROW(phi::errors::InvalidArgument("unsupported pooling type.")); } return {}; } @@ -1486,7 +1490,9 @@ std::vector Pool2d(const Tensor &tensor, height_axis = 2; width_axis = 3; } else { - LOG(FATAL) << "Unsupported data format: " << data_format << std::endl; + std::stringstream ss; + ss << "Unsupported data format: " << data_format << std::endl; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } CHECK(tensor->shape.size() == 4U || tensor->shape.size() == 5U) << "pool2d requires tensor's shape_size to be 4 or 5\n"; @@ -1524,7 +1530,9 @@ std::vector Pool3d(const Tensor &tensor, height_axis = 2; width_axis = 3; } else { - LOG(FATAL) << "Unsupported data format: " << data_format << std::endl; + std::stringstream ss; + ss << "Unsupported data format: " << data_format << std::endl; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } CHECK_EQ(tensor->shape.size(), 5U) << "pool1d requires tensor's shape_size to be 5\n"; @@ -1558,8 +1566,9 @@ Tensor DropoutInfer(const ir::Tensor &tensor, // fusion schedule. return Identity(tensor, output_name).front(); } else { - LOG(FATAL) << "dropout_implementation attr must be 'downgrade_in_infer' or " - "'upscale_in_train'\n"; + PADDLE_THROW(phi::errors::InvalidArgument( + "dropout_implementation attr must be 'downgrade_in_infer' or " + "'upscale_in_train'\n")); } } diff --git a/paddle/cinn/hlir/pe/reduction.cc b/paddle/cinn/hlir/pe/reduction.cc index 08e9641f9658ab..b831d1b5884729 100644 --- a/paddle/cinn/hlir/pe/reduction.cc +++ b/paddle/cinn/hlir/pe/reduction.cc @@ -90,7 +90,9 @@ std::string Type2StrForReduce(cinn::common::Type type) { } else if (type.is_bool()) { return ""; } - LOG(FATAL) << "Reduce Not Support " << type; + std::stringstream ss; + ss << "Reduce Not Support " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return ""; } @@ -1118,7 +1120,9 @@ std::string CrossThreadReduceExternalFuncName(const ir::Expr& op, } else if (op.As()) { return "cinn_block_reduce_any_internal_shm"; } else { - LOG(FATAL) << "Reduce type: " << op << " Not supported yet!"; + std::stringstream ss; + ss << "Reduce type: " << op << " Not supported yet!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return ""; } diff --git a/paddle/cinn/hlir/pe/schedule.cc b/paddle/cinn/hlir/pe/schedule.cc index aea041783114a4..3e4af70e1b1cca 100644 --- a/paddle/cinn/hlir/pe/schedule.cc +++ b/paddle/cinn/hlir/pe/schedule.cc @@ -47,8 +47,8 @@ ScheduleParam::ScheduleParam(cinn::common::Target::Arch arch) { break; } default: { - LOG(FATAL) - << "Schedule params must be initialized with target x86 or nvgpu."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Schedule params must be initialized with target x86 or nvgpu.")); } } } @@ -2454,8 +2454,9 @@ void CudaScheduleConv2(poly::StageMap stages, } else if (stages[PR]->n_out_dims() == 19) { stages[PR]->Fuse({13, 14, 15, 16, 17, 18}); } else { - LOG(FATAL) << "PR number of output dims is wrong: " - << stages[PR]->n_out_dims(); + std::stringstream ss; + ss << "PR number of output dims is wrong: " << stages[PR]->n_out_dims(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } if (stages[KR]->n_out_dims() == 18) { @@ -2463,8 +2464,9 @@ void CudaScheduleConv2(poly::StageMap stages, } else if (stages[KR]->n_out_dims() == 19) { stages[KR]->Fuse({13, 14, 15, 16, 17, 18}); } else { - LOG(FATAL) << "KR number of output dims is wrong: " - << stages[KR]->n_out_dims(); + std::stringstream ss; + ss << "KR number of output dims is wrong: " << stages[KR]->n_out_dims(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } int thread_z = f_param[2]; int thread_x = x_param[2]; @@ -2768,8 +2770,11 @@ void CudaScheduleInjective(poly::Stage *stage, if (new_num_thread % 32 != 0) { new_num_thread = MaxFactorLessThan(prod_size, num_thread); } - if (new_num_thread == 1) - LOG(FATAL) << "prod_size out of range: " << prod_size; + if (new_num_thread == 1) { + std::stringstream ss; + ss << "prod_size out of range: " << prod_size; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); + } CHECK_GT(prod_size, new_num_thread); stage->Split(0, new_num_thread); diff --git a/paddle/cinn/hlir/pe/transform.cc b/paddle/cinn/hlir/pe/transform.cc index 2e78caca832060..b91a509b7a1f5f 100644 --- a/paddle/cinn/hlir/pe/transform.cc +++ b/paddle/cinn/hlir/pe/transform.cc @@ -1269,7 +1269,8 @@ ir::Tensor ScatterAssign(const ir::Tensor& input, } else if (target.arch == cinn::common::Target::Arch::X86) { extern_fun_name.assign("cinn_host_find_int"); } else { - LOG(FATAL) << "ScatterAssign only support X86 and NVGPU ! Please Check.\n"; + PADDLE_THROW(phi::errors::Fatal( + "ScatterAssign only support X86 and NVGPU ! Please Check.\n")); } auto pos_axis = axis; diff --git a/paddle/cinn/ir/group_schedule/st_shape_group_scheduler.cc b/paddle/cinn/ir/group_schedule/st_shape_group_scheduler.cc index bde8a7e609d547..1dc21ce8a31807 100644 --- a/paddle/cinn/ir/group_schedule/st_shape_group_scheduler.cc +++ b/paddle/cinn/ir/group_schedule/st_shape_group_scheduler.cc @@ -1017,8 +1017,9 @@ void StaticShapeGroupScheduler::AllocateStorage() { consumer_block_name)) { // TODO(BiynXu): Return error information to the front-end instead of // terminating the program. - LOG(FATAL) << "Fusion requires synchronization across blocks, but " - "currently we do not support it."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Fusion requires synchronization across blocks, but " + "currently we do not support it.")); break; } else if (IsCrossThread(store_indice_value, load_indice_value, diff --git a/paddle/cinn/ir/group_schedule/tactic/arrange_storage_tactic.cc b/paddle/cinn/ir/group_schedule/tactic/arrange_storage_tactic.cc index 8484c0c62210e8..661ab9e624d94c 100644 --- a/paddle/cinn/ir/group_schedule/tactic/arrange_storage_tactic.cc +++ b/paddle/cinn/ir/group_schedule/tactic/arrange_storage_tactic.cc @@ -397,11 +397,12 @@ void ArrangeStorageTactic::Apply(ir::IRSchedule* sch, } else if (cross_type.value() == CudaAxisType::kCudaThread) { memory_type = ir::MemoryType::GPUShared; } else if (cross_type.value() == CudaAxisType::kCudaBlock) { - LOG(FATAL) << "Fusion requires synchronization across blocks, but " - "currently we do not support it."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Fusion requires synchronization across blocks, but " + "currently we do not support it.")); break; } else { - LOG(FATAL) << "dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } } diff --git a/paddle/cinn/ir/ir_analyzer/ir_analyzer.cc b/paddle/cinn/ir/ir_analyzer/ir_analyzer.cc index b75f12712853f2..9b2fba77e63aeb 100644 --- a/paddle/cinn/ir/ir_analyzer/ir_analyzer.cc +++ b/paddle/cinn/ir/ir_analyzer/ir_analyzer.cc @@ -34,8 +34,8 @@ #include "paddle/cinn/ir/schedule/schedule_desc.h" #include "paddle/cinn/ir/tensor.h" #include "paddle/cinn/ir/utils/ir_nodes_collector.h" -#include "paddle/cinn/utils/error.h" #include "paddle/cinn/utils/random_engine.h" +#include "paddle/common/enforce.h" namespace cinn { namespace ir { @@ -74,9 +74,12 @@ std::vector GetLoops(const std::vector& exprs, const Expr& block) { FindLoopsVisitor visitor(block); auto find_loops = visitor(&it_expr); if (!find_loops.empty()) { - if (!result.empty()) - LOG(FATAL) << "Find block with name: \n" - << block_name << " appeared in more than one AST!"; + if (!result.empty()) { + std::stringstream ss; + ss << "Find block with name: \n" + << block_name << " appeared in more than one AST!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); + } result = find_loops; } } @@ -120,8 +123,10 @@ Expr GetBlock(const std::vector& exprs, const std::string& block_name) { return result; } } - LOG(FATAL) << "Didn't find a block with name " << block_name - << " in this ModuleExpr!"; + std::stringstream ss; + ss << "Didn't find a block with name " << block_name + << " in this ModuleExpr!"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } Expr GetRootBlock(const std::vector& exprs, const Expr& expr) { @@ -139,9 +144,9 @@ Expr GetRootBlock(const std::vector& exprs, const Expr& expr) { return it_expr.As()->stmts[0]; } } - LOG(FATAL) << "Didn't find expr \n" - << expr << "in StScheduleImpl:\n" - << exprs[0]; + std::stringstream ss; + ss << "Didn't find expr \n" << expr << "in StScheduleImpl:\n" << exprs[0]; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } DeviceAPI GetDeviceAPI(const std::vector& exprs) { @@ -208,9 +213,10 @@ Expr AddUnitLoop(const std::vector& exprs, const Expr& block) { visitor.target_->As()->body = loop; return loop; } else { - LOG(FATAL) << "Can't find block's parent!"; + PADDLE_THROW(phi::errors::InvalidArgument("Can't find block's parent!")); } - LOG(FATAL) << "Shouldn't reach code here in AddUnitLoop"; + PADDLE_THROW( + phi::errors::InvalidArgument("Shouldn't reach code here in AddUnitLoop")); return Expr{nullptr}; } diff --git a/paddle/cinn/ir/ir_base.cc b/paddle/cinn/ir/ir_base.cc index e4b1b2f95b1809..c1b0580d16562e 100644 --- a/paddle/cinn/ir/ir_base.cc +++ b/paddle/cinn/ir/ir_base.cc @@ -22,7 +22,7 @@ #include "paddle/cinn/ir/ir_visitor.h" #include "paddle/cinn/ir/module.h" #include "paddle/cinn/ir/tensor.h" -#include "paddle/cinn/utils/error.h" +#include "paddle/common/enforce.h" namespace cinn { namespace ir { @@ -51,7 +51,7 @@ std::ostream &operator<<(std::ostream &os, IrNodeTy type) { #undef __m default: - LOG(FATAL) << "unknown IrNodeTy found"; + PADDLE_THROW(phi::errors::InvalidArgument("unknown IrNodeTy found")); } return os; diff --git a/paddle/cinn/ir/ir_base.h b/paddle/cinn/ir/ir_base.h index 24a7c2271d1fd5..236e8afb67fe86 100644 --- a/paddle/cinn/ir/ir_base.h +++ b/paddle/cinn/ir/ir_base.h @@ -492,7 +492,7 @@ static std::ostream& operator<<(std::ostream& os, MemoryType t) { MEMORY_TYPE_FOR_ALL(__) default: - LOG(FATAL) << "Not supported memory type"; + PADDLE_THROW(phi::errors::InvalidArgument("Not supported memory type")); #undef __ } return os; @@ -500,7 +500,7 @@ static std::ostream& operator<<(std::ostream& os, MemoryType t) { template Expr ExprNode::Copy() const { - LOG(FATAL) << "Not Implemented"; + PADDLE_THROW(phi::errors::Unimplemented("Not Implemented")); return Expr(); } diff --git a/paddle/cinn/ir/ir_printer.cc b/paddle/cinn/ir/ir_printer.cc index 61b90ec6c78255..abd3515a8308a2 100644 --- a/paddle/cinn/ir/ir_printer.cc +++ b/paddle/cinn/ir/ir_printer.cc @@ -60,7 +60,9 @@ void IrPrinter::Visit(const IntImm *x) { str_ += "(int8_t)"; str_ += std::to_string(x->value); } else { - LOG(FATAL) << "Not support int type: " << x->type(); + std::stringstream ss; + ss << "Not support int type: " << x->type(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } void IrPrinter::Visit(const UIntImm *x) { @@ -82,7 +84,9 @@ void IrPrinter::Visit(const UIntImm *x) { str_ += "false"; } } else { - LOG(FATAL) << "Not support uint type: " << x->type(); + std::stringstream ss; + ss << "Not support uint type: " << x->type(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } void IrPrinter::Visit(const FloatImm *x) { @@ -119,7 +123,9 @@ void IrPrinter::Visit(const FloatImm *x) { ss << std::showpoint; ss << x->value; } else { - LOG(FATAL) << "Not support float type: " << x->type(); + std::stringstream ss; + ss << "Not support float type: " << x->type(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } str_ += ss.str(); } diff --git a/paddle/cinn/ir/ir_visitor.h b/paddle/cinn/ir/ir_visitor.h index 87705597a7b1b7..c5377401bbbb55 100644 --- a/paddle/cinn/ir/ir_visitor.h +++ b/paddle/cinn/ir/ir_visitor.h @@ -48,8 +48,10 @@ class IRVisitorRequireReImpl { NODETY_FORALL(__) default: - LOG(FATAL) << "not supported NodeTy, the expr->node_type() = " - << expr->node_type(); + std::stringstream ss; + ss << "not supported NodeTy, the expr->node_type() = " + << expr->node_type(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); #undef __ } return RetTy(); diff --git a/paddle/cinn/ir/layout.cc b/paddle/cinn/ir/layout.cc index f4e4585aa2145a..ba0f07d520916c 100644 --- a/paddle/cinn/ir/layout.cc +++ b/paddle/cinn/ir/layout.cc @@ -59,7 +59,9 @@ Layout::Layout(const std::string& name) { axes.push_back(ir::Var(factor, std::string(1, c))); factor = 0; } else { - LOG(FATAL) << "Invalid layout: " << name; + std::stringstream ss; + ss << "Invalid layout: " << name; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } name_ = name; diff --git a/paddle/cinn/ir/op/ir_operators.cc b/paddle/cinn/ir/op/ir_operators.cc index fcb0e19a6bb95a..d11a26685851f1 100644 --- a/paddle/cinn/ir/op/ir_operators.cc +++ b/paddle/cinn/ir/op/ir_operators.cc @@ -88,7 +88,9 @@ Expr operator|(Expr a, Expr b) { auto func_name = hlir::GetExternFuncName(target, t_a, "bitwise_or"); return lang::CallExtern(func_name, {a, b}, {{"vectorizable", false}}); } else { - LOG(FATAL) << "Unsupport arch: " << target.arch_str() << " for bitwise_or."; + std::stringstream ss; + ss << "Unsupport arch: " << target.arch_str() << " for bitwise_or."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -111,8 +113,9 @@ Expr operator&(Expr a, Expr b) { auto func_name = hlir::GetExternFuncName(target, t_a, "bitwise_and"); return lang::CallExtern(func_name, {a, b}, {{"vectorizable", false}}); } else { - LOG(FATAL) << "Unsupport arch: " << target.arch_str() - << " for bitwise_and."; + std::stringstream ss; + ss << "Unsupport arch: " << target.arch_str() << " for bitwise_and."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -135,8 +138,9 @@ Expr operator^(Expr a, Expr b) { auto func_name = hlir::GetExternFuncName(target, t_a, "bitwise_xor"); return lang::CallExtern(func_name, {a, b}, {{"vectorizable", false}}); } else { - LOG(FATAL) << "Unsupport arch: " << target.arch_str() - << " for bitwise_xor."; + std::stringstream ss; + ss << "Unsupport arch: " << target.arch_str() << " for bitwise_xor."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } @@ -149,8 +153,9 @@ Expr operator~(Expr a) { auto func_name = hlir::GetExternFuncName(target, a->type(), "bitwise_not"); return lang::CallExtern(func_name, {a}, {{"vectorizable", false}}); } else { - LOG(FATAL) << "Unsupport arch: " << target.arch_str() - << " for bitwise_not."; + std::stringstream ss; + ss << "Unsupport arch: " << target.arch_str() << " for bitwise_not."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } diff --git a/paddle/cinn/ir/schedule/impl/base.cc b/paddle/cinn/ir/schedule/impl/base.cc index 1640ee2b9c8490..24583a67374e7f 100644 --- a/paddle/cinn/ir/schedule/impl/base.cc +++ b/paddle/cinn/ir/schedule/impl/base.cc @@ -26,10 +26,11 @@ * @param err_msg_level A ScheduleErrorMessageLevel enum, level of error message * printing */ -#define CINN_IR_SCHEDULE_END(err_msg_level) \ - } \ - catch (const utils::ErrorHandler& err_handler) { \ - CINN_THROW(err_handler.FormatErrorMessage(err_msg_level)); \ +#define CINN_IR_SCHEDULE_END(err_msg_level) \ + } \ + catch (const utils::ErrorHandler& err_handler) { \ + PADDLE_THROW(phi::errors::InvalidArgument( \ + err_handler.FormatErrorMessage(err_msg_level))); \ } namespace cinn { @@ -662,11 +663,13 @@ void StScheduleImpl::CopyTransformAndLoopInfo(const Expr& block, } } - if (new_iter_values.empty()) - LOG(FATAL) << "Cannot CopyTransformAndLoopInfo since shape[0] of source " - "and target is not equal! " - << vars[0]->upper_bound << " v.s " - << vars_target[0]->upper_bound; + if (new_iter_values.empty()) { + std::stringstream ss; + ss << "Cannot CopyTransformAndLoopInfo since shape[0] of source " + "and target is not equal! " + << vars[0]->upper_bound << " v.s " << vars_target[0]->upper_bound; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); + } int changed_loop_num = new_iter_values.size(); std::set used_target_loop_vars; diff --git a/paddle/cinn/ir/schedule/ir_schedule.cc b/paddle/cinn/ir/schedule/ir_schedule.cc index 93a2f0344a1146..6143de1f7b4334 100644 --- a/paddle/cinn/ir/schedule/ir_schedule.cc +++ b/paddle/cinn/ir/schedule/ir_schedule.cc @@ -85,10 +85,11 @@ std::unique_ptr ScheduleBase::Make(ModuleExpr&& module_expr, * @param err_msg_level A ScheduleErrorMessageLevel enum, level of error message * printing */ -#define CINN_IR_SCHEDULE_END(err_msg_level) \ - } \ - catch (const utils::ErrorHandler& err_handler) { \ - CINN_THROW(err_handler.FormatErrorMessage(err_msg_level)); \ +#define CINN_IR_SCHEDULE_END(err_msg_level) \ + } \ + catch (const utils::ErrorHandler& err_handler) { \ + PADDLE_THROW( \ + phi::errors::Fatal(err_handler.FormatErrorMessage(err_msg_level))); \ } void BaseInliner::operator()(Expr* expr) { @@ -663,7 +664,9 @@ void IRSchedule::Annotate(const Expr& block, TRACE_ANNOTATE_ITEM(std::string, AnnotateStringAttr) #undef TRACE_ANNOTATE_ITEM - LOG(FATAL) << "Value of attribute:" << key << " input unsupported data type"; + std::stringstream ss; + ss << "Value of attribute:" << key << " input unsupported data type"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } void IRSchedule::Unannotate(Expr& block, const std::string& key) { diff --git a/paddle/cinn/ir/schedule/ir_schedule_util.cc b/paddle/cinn/ir/schedule/ir_schedule_util.cc index 62f036d3583d96..4b826ce7b125ab 100644 --- a/paddle/cinn/ir/schedule/ir_schedule_util.cc +++ b/paddle/cinn/ir/schedule/ir_schedule_util.cc @@ -113,7 +113,8 @@ void SetCudaAxisInfo(Expr* lowered_func) { info.set_grid_dim(bind_info.offset, range); } } else { - LOG(FATAL) << "The for loop's bind info should be gpu block or thread!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "The for loop's bind info should be gpu block or thread!")); } } return (x->As() && x->As()->bind_info().valid()); @@ -338,10 +339,11 @@ std::vector GetLoopsOfExpr(const Expr& expr, const Expr& root) { root, [&](const Expr* x) { return x->As() && Contains(*x, expr); }); std::vector result(loop_nodes.begin(), loop_nodes.end()); - if (result.empty()) - LOG(FATAL) << "Didn't find expr's : \n" - << expr << "\n loops in root : \n" - << root; + if (result.empty()) { + std::stringstream ss; + ss << "Didn't find expr's : \n" << expr << "\n loops in root : \n" << root; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); + } std::sort(result.begin(), result.end(), [&](Expr i, Expr j) { return (utils::GetStreamCnt(i).size() > utils::GetStreamCnt(j).size()); }); @@ -589,8 +591,8 @@ const std::set CollectLoopsToSet( CHECK(i.As()) << "loops should be For node! Please check."; auto inserted = for_loops.insert(i); if (!inserted.second) { - LOG(FATAL) - << "There should be no duplicate elements in loops! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "There should be no duplicate elements in loops! Please check.")); } } return for_loops; @@ -616,8 +618,9 @@ std::pair GetBoundaryOfReorderRange( // Then loop_i should be the new top if (visited.count(v_for)) { if (v_for != top) { - LOG(FATAL) << "Loops in GetBoundaryOfReorderRange is not a chain! " - "Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Loops in GetBoundaryOfReorderRange is not a chain! " + "Please check.")); } top = loop_i; break; @@ -646,8 +649,8 @@ std::vector GetLoopsInRange(const Expr& top, const Expr& bottom) { for (auto loop_iter = top; loop_iter != bottom;) { Expr tmp = GetNextForLoop(loop_iter); if (!tmp.defined()) - LOG(FATAL) - << "Loops in GetLoopsInReorderRange is not a chain! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Loops in GetLoopsInReorderRange is not a chain! Please check.")); chain.push_back(loop_iter); loop_iter = tmp; } diff --git a/paddle/cinn/ir/schedule/schedule_desc.cc b/paddle/cinn/ir/schedule/schedule_desc.cc index 74b9693c80b7ed..fbf2a268054e13 100644 --- a/paddle/cinn/ir/schedule/schedule_desc.cc +++ b/paddle/cinn/ir/schedule/schedule_desc.cc @@ -117,9 +117,11 @@ class PackedStepContext { try { return absl::get(attrs_.at(idx)); } catch (absl::bad_variant_access& ex) { - LOG(FATAL) << "Attribute cast error, idx:" << idx - << ", get type:" << typeid(AttrType).name() - << ", real index:" << attrs_.at(idx).index(); + std::stringstream ss; + ss << "Attribute cast error, idx:" << idx + << ", get type:" << typeid(AttrType).name() + << ", real index:" << attrs_.at(idx).index(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); throw ex; } } @@ -601,7 +603,9 @@ void AttrVariantToProto(const utils::Attribute& attr, SET_DESC_REPEATED_ITEM(10, std::vector, LONGS, longs); SET_DESC_REPEATED_ITEM(11, std::vector, DOUBLES, doubles); default: - LOG(FATAL) << "Invalid index:" << attr.index(); + std::stringstream ss; + ss << "Invalid index:" << attr.index(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } #undef SET_DESC_SINGLE_ITEM @@ -635,7 +639,9 @@ utils::Attribute AttrProtoToVariant(const proto::ScheduleDesc_Attr& attr) { PARSE_DESC_REPEATED_ITEM(LONGS, longs, std::vector); PARSE_DESC_REPEATED_ITEM(DOUBLES, doubles, std::vector); default: - LOG(FATAL) << "Invalid type:" << attr.DebugString(); + std::stringstream ss; + ss << "Invalid type:" << attr.DebugString(); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } #undef PARSE_DESC_SINGLE_ITEM diff --git a/paddle/cinn/ir/tensor.cc b/paddle/cinn/ir/tensor.cc index dc19d4661fbe45..6c5ba14efe6801 100644 --- a/paddle/cinn/ir/tensor.cc +++ b/paddle/cinn/ir/tensor.cc @@ -508,7 +508,9 @@ void _Tensor_::WithBuffer(const std::string &memory_type, } else if (memory_type == "global") { this->buffer->memory_type = MemoryType::Heap; } else { - LOG(FATAL) << "Not supported memory type " << memory_type; + std::stringstream ss; + ss << "Not supported memory type " << memory_type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } else { lang::Buffer buf(buf_type, buffer_name); @@ -522,7 +524,9 @@ void _Tensor_::WithBuffer(const std::string &memory_type, } else if (memory_type == "global") { buf->memory_type = MemoryType::Heap; } else { - LOG(FATAL) << "Not supported memory type " << memory_type; + std::stringstream ss; + ss << "Not supported memory type " << memory_type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } } diff --git a/paddle/cinn/ir/utils/ir_nodes_collector.cc b/paddle/cinn/ir/utils/ir_nodes_collector.cc index e4ebaca653bae9..fc36e87cbfc316 100644 --- a/paddle/cinn/ir/utils/ir_nodes_collector.cc +++ b/paddle/cinn/ir/utils/ir_nodes_collector.cc @@ -59,7 +59,7 @@ struct IrNodesCollector : public IRVisitorRequireReImpl { NODETY_FORALL(__) default: - LOG(FATAL) << "not supported NodeTy"; + PADDLE_THROW(phi::errors::InvalidArgument("not supported NodeTy")); #undef __ } } diff --git a/paddle/cinn/lang/builtin.cc b/paddle/cinn/lang/builtin.cc index b50a49096847b5..00197a2270a84f 100644 --- a/paddle/cinn/lang/builtin.cc +++ b/paddle/cinn/lang/builtin.cc @@ -219,7 +219,9 @@ Expr Abs(Expr e) { } return ir::Select::Make(e > Zero(e->type()), e, -e); } else { - LOG(FATAL) << "Abs Not support data type " << type; + std::stringstream ss; + ss << "Abs Not support data type " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return e; } @@ -235,7 +237,9 @@ Expr IsNan(Expr e) { } return CallExtern("isnan", {e}, {{"vectorizable", false}}); } else { - LOG(FATAL) << type << "is not supported for isnan op."; + std::stringstream ss; + ss << type << "is not supported for isnan op."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return e; } } @@ -251,7 +255,9 @@ Expr Infinity(const Type& type) { return make_const(type, std::numeric_limits::infinity()); } } - LOG(FATAL) << "Cannot decide infinity for type " << type; + std::stringstream ss; + ss << "Cannot decide infinity for type " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return Expr(); } @@ -266,7 +272,9 @@ Expr IsInf(Expr e) { } return CallExtern("isinf", {e}, {{"vectorizable", false}}); } else { - LOG(FATAL) << type << "is not supported for isinf op."; + std::stringstream ss; + ss << type << "is not supported for isinf op."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return e; } } diff --git a/paddle/cinn/lang/lower_impl.cc b/paddle/cinn/lang/lower_impl.cc index 1b085c03e2240e..fecc10b7d3b0f0 100644 --- a/paddle/cinn/lang/lower_impl.cc +++ b/paddle/cinn/lang/lower_impl.cc @@ -586,7 +586,7 @@ std::vector LowerImpl::operator()() { for (auto& i : tensor_args_) { LOG(INFO) << i->name; } - LOG(FATAL) << "Fatal Error!"; + PADDLE_THROW(phi::errors::InvalidArgument("Fatal Error!")); } Reference(&arg)->buffer = tensor_map.at(arg->name)->buffer; } diff --git a/paddle/cinn/lang/lower_tensor_group.cc b/paddle/cinn/lang/lower_tensor_group.cc index 93453621e18393..c6b3ba5173565e 100644 --- a/paddle/cinn/lang/lower_tensor_group.cc +++ b/paddle/cinn/lang/lower_tensor_group.cc @@ -81,7 +81,7 @@ std::vector LowerTensorGroup::operator()() { for (auto& i : tensor_args_) { LOG(INFO) << i->name; } - LOG(FATAL) << "Fatal Error!"; + PADDLE_THROW(phi::errors::InvalidArgument("Fatal Error!")); } Reference(&arg)->buffer = tensor_map.at(arg->name)->buffer; } diff --git a/paddle/cinn/optim/eliminate_common_factor_of_local_index.cc b/paddle/cinn/optim/eliminate_common_factor_of_local_index.cc index 400bfb69b8208d..020c32b60845db 100644 --- a/paddle/cinn/optim/eliminate_common_factor_of_local_index.cc +++ b/paddle/cinn/optim/eliminate_common_factor_of_local_index.cc @@ -156,7 +156,7 @@ int ExtractNumberFromExpr(const ir::Expr& expr) { VLOG(6) << "Not supported for calculating gcd, expr = " << expr; return 1; } - LOG(FATAL) << "Dead code"; + PADDLE_THROW(phi::errors::Fatal("Dead code")); } int gcd(int a, int b) { diff --git a/paddle/cinn/optim/transform_gpu_forloop.cc b/paddle/cinn/optim/transform_gpu_forloop.cc index baf1f82c9bf8ce..4f8aa7b0e30b0c 100644 --- a/paddle/cinn/optim/transform_gpu_forloop.cc +++ b/paddle/cinn/optim/transform_gpu_forloop.cc @@ -261,7 +261,7 @@ class ReplaceLoopVarToGpu : public ir::IRMutator<> { ir::IRMutator<>::Visit(&for_ir->body, &for_ir->body); } void Visit(const ir::PolyFor *op, Expr *expr) override { - LOG(FATAL) << "Unkown PolyFor!"; + PADDLE_THROW(phi::errors::InvalidArgument("Unkown PolyFor!")); } }; diff --git a/paddle/cinn/optim/transform_polyfor_to_for.cc b/paddle/cinn/optim/transform_polyfor_to_for.cc index ff29bb0058801f..b9a4dfad69a230 100644 --- a/paddle/cinn/optim/transform_polyfor_to_for.cc +++ b/paddle/cinn/optim/transform_polyfor_to_for.cc @@ -109,7 +109,7 @@ struct PolyForWithSimpleConditionToForMutator : public ir::IRMutator { auto sub = lt->a().As(); node->condition = ir::LT::Make(sub->a(), sub->b()); } else { - LOG(FATAL) << "Unkown Type!"; + PADDLE_THROW(phi::errors::InvalidArgument("Unkown Type!")); } lt_n = node->condition.As(); diff --git a/paddle/cinn/poly/ast_gen.cc b/paddle/cinn/poly/ast_gen.cc index f71ec5fed9ed66..dad3f25fe1b4eb 100644 --- a/paddle/cinn/poly/ast_gen.cc +++ b/paddle/cinn/poly/ast_gen.cc @@ -359,8 +359,9 @@ void IslAstNodeToCinnExpr(const isl::ast_node& node, ir::Expr* expr) { // EatMark(node, expr); } break; default: - LOG(FATAL) << "Unexpected ISL node type " - << isl_ast_node_get_type(node.get()); + std::stringstream ss; + ss << "Unexpected ISL node type " << isl_ast_node_get_type(node.get()); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); break; } } @@ -566,7 +567,9 @@ void IslAstExprToCinnExpr(const isl::ast_expr& node, ir::Expr* expr) { *expr = ir::Select::Make(ops[0], ops[1], ops[2]); break; default: - LOG(FATAL) << "unsupported op " << op_type; + std::stringstream ss; + ss << "unsupported op " << op_type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } break; default: diff --git a/paddle/cinn/poly/poly_scheduler.cc b/paddle/cinn/poly/poly_scheduler.cc index 539be8221d8df5..7cfc7851a145a5 100644 --- a/paddle/cinn/poly/poly_scheduler.cc +++ b/paddle/cinn/poly/poly_scheduler.cc @@ -266,8 +266,9 @@ std::vector NaivePartitionGraph(cinn::common::Graph* graph) { auto* node0 = node; if (name2node.count(compute_at.stage->id()) == 0) { continue; - LOG(FATAL) << "Didn't find node with name " << compute_at.stage->id() - << " !"; + std::stringstream ss; + ss << "Didn't find node with name " << compute_at.stage->id() << " !"; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } auto* node1 = name2node[compute_at.stage->id()]; VLOG(3) << "a -> b: " << node0->id() << " -> " << node1->id(); diff --git a/paddle/cinn/pybind/framework.cc b/paddle/cinn/pybind/framework.cc index fde1f7dd8eba00..5122a61d9fc7bf 100644 --- a/paddle/cinn/pybind/framework.cc +++ b/paddle/cinn/pybind/framework.cc @@ -131,7 +131,8 @@ void BindFramework(pybind11::module *m) { t->shape().numel() * t->type().bytes(), cudaMemcpyDeviceToHost)); #else - LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal("To use CUDA backends, " + "you need to set WITH_CUDA ON!")); #endif } else { CINN_NOT_IMPLEMENTED @@ -175,7 +176,8 @@ void BindFramework(pybind11::module *m) { self->shape().numel() * self->type().bytes(), cudaMemcpyDeviceToHost)); #else - LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal("To use CUDA backends, " + "you need to set WITH_CUDA ON!")); #endif } else { CINN_NOT_IMPLEMENTED @@ -210,7 +212,8 @@ void BindFramework(pybind11::module *m) { self->shape().numel() * self->type().bytes(), cudaMemcpyHostToDevice)); #else - LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal("To use CUDA backends, " + "you need to set WITH_CUDA ON!")); #endif } else { CINN_NOT_IMPLEMENTED diff --git a/paddle/cinn/pybind/frontend.cc b/paddle/cinn/pybind/frontend.cc index 05e814ce107f80..f7eaf01a59f075 100644 --- a/paddle/cinn/pybind/frontend.cc +++ b/paddle/cinn/pybind/frontend.cc @@ -229,7 +229,8 @@ void BindFrontend(pybind11::module *m) { in_tensor->shape().numel() * dtype.bytes(), cudaMemcpyHostToDevice)); #else - LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal("To use CUDA backends, " + "you need to set WITH_CUDA ON!")); #endif } else if (target.arch == Target::Arch::X86) { memcpy(data, @@ -323,7 +324,8 @@ void BindFrontend(pybind11::module *m) { in_tensor->shape().numel() * sizeof(float), cudaMemcpyHostToDevice)); #else - LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal("To use CUDA backends, " + "you need to set WITH_CUDA ON!")); #endif } else if (target.arch == Target::Arch::X86) { for (size_t j = 0; j < in_tensor->shape().numel(); j++) { @@ -373,7 +375,8 @@ void BindFrontend(pybind11::module *m) { in_tensor->shape().numel() * sizeof(float), cudaMemcpyHostToDevice)); #else - LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal("To use CUDA backends, " + "you need to set WITH_CUDA ON!")); #endif } else if (target.arch == Target::Arch::X86) { for (size_t j = 0; j < in_tensor->shape().numel(); j++) { diff --git a/paddle/cinn/pybind/ir/ir.cc b/paddle/cinn/pybind/ir/ir.cc index 6118f7c8a5e695..d9f9bd5fcdf7f0 100644 --- a/paddle/cinn/pybind/ir/ir.cc +++ b/paddle/cinn/pybind/ir/ir.cc @@ -47,8 +47,8 @@ std::vector AxisMap(const std::string& kinds, } else if (c == 'R') { iter_var->is_reduce_axis = true; } else { - LOG(FATAL) - << "kind of axis setting error, must be R(Reduce) or S(Spatial)"; + PADDLE_THROW(phi::errors::InvalidArgument( + "kind of axis setting error, must be R(Reduce) or S(Spatial)")); } rets.push_back(SetScheduleBlockIterVar(iter_var, iter_expression[i])); } diff --git a/paddle/cinn/pybind/ir/ir_api.cc b/paddle/cinn/pybind/ir/ir_api.cc index efebf1206a8674..224bf87e09bfae 100644 --- a/paddle/cinn/pybind/ir/ir_api.cc +++ b/paddle/cinn/pybind/ir/ir_api.cc @@ -748,8 +748,9 @@ auto PackedFuncCall(lang::PackedFunc &self, py::args args) { // NOLINT } else if (py::isinstance(handle)) { cinn_args.Append(CINNValue(py::cast(handle))); } else { - LOG(FATAL) << "unsupported type: " - << std::string(py::str(handle.get_type())); + std::stringstream ss; + ss << "unsupported type: " << std::string(py::str(handle.get_type())); + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } } lang::RetValue ret_value; diff --git a/paddle/cinn/pybind/ir/ir_context.cc b/paddle/cinn/pybind/ir/ir_context.cc index 8b4d0a4cf1e1d3..14dad90d841b5f 100644 --- a/paddle/cinn/pybind/ir/ir_context.cc +++ b/paddle/cinn/pybind/ir/ir_context.cc @@ -59,10 +59,12 @@ void LowerFuncContextNode::ExitWithContext() { void IfContextNode::ExitWithContext() { IRContextNode::ExitWithContext(); if (!exprs.empty()) { - LOG(FATAL) << "Expr not be either in ThenBlock or ElseBlock in if"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Expr not be either in ThenBlock or ElseBlock in if")); } if (!true_case.defined()) { - LOG(FATAL) << "Expr not be defined in ThenBlock"; + PADDLE_THROW( + phi::errors::InvalidArgument("Expr not be defined in ThenBlock")); } LinkToParentContext(ir::IfThenElse::Make(condition, true_case, false_case)); } diff --git a/paddle/cinn/pybind/ir/ir_context.h b/paddle/cinn/pybind/ir/ir_context.h index 8cdf0ed85c0818..837d66e8c07606 100644 --- a/paddle/cinn/pybind/ir/ir_context.h +++ b/paddle/cinn/pybind/ir/ir_context.h @@ -21,7 +21,7 @@ #include "paddle/cinn/ir/ir.h" #include "paddle/cinn/ir/ir_base.h" #include "paddle/cinn/ir/lowered_func.h" -#include "paddle/cinn/utils/error.h" +#include "paddle/common/enforce.h" namespace cinn { namespace pybind { @@ -73,7 +73,7 @@ class IRContext { err_msg << "TypeConvertError: convert " << data_.get()->type_info() << " to " << TIRContextNode::__type_info__; - CINN_THROW(err_msg.str()); + PADDLE_THROW(phi::errors::InvalidArgument(err_msg.str())); } return ctx_node; } @@ -82,8 +82,10 @@ class IRContext { CHECK(data_.get()) << "IrContext holds null"; auto* ctx_node = data_.get()->safe_as(); if (!ctx_node) { - LOG(FATAL) << "TypeConvertError: convert " << data_.get()->type_info() - << " to " << TIRContextNode::__type_info__; + std::stringstream ss; + ss << "TypeConvertError: convert " << data_.get()->type_info() << " to " + << TIRContextNode::__type_info__; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return ctx_node; } @@ -235,8 +237,10 @@ void LinkToParentContext(ir::Expr); template IRContext IRBuilderNode::GetLastContext() const { if (!(contexts.back().As())) { - LOG(FATAL) << "TypeError: The last context is not " - << TIRContextNode::__type_info__; + std::stringstream ss; + ss << "TypeError: The last context is not " + << TIRContextNode::__type_info__; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return contexts.back(); } diff --git a/paddle/cinn/pybind/runtime.cc b/paddle/cinn/pybind/runtime.cc index 91db8af397ec29..0ef1ee542aa353 100644 --- a/paddle/cinn/pybind/runtime.cc +++ b/paddle/cinn/pybind/runtime.cc @@ -92,7 +92,8 @@ cinn_buffer_t *CreateBufferFromNumpy( buffer->memory, data.data(), data.nbytes(), cudaMemcpyHostToDevice)); return buffer; #else - LOG(FATAL) << "To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal( + "To use CUDA backends, you need to set WITH_CUDA ON!")); #endif } else { CINN_NOT_IMPLEMENTED @@ -108,7 +109,8 @@ void BufferCopyTo(const cinn_buffer_t &buffer, py::array array) { CUDA_CALL(cudaMemcpy( array_data, buffer.memory, array.nbytes(), cudaMemcpyDeviceToHost)); #else - LOG(FATAL) << "To use CUDA backends, you need to set WITH_CUDA ON!"; + PADDLE_THROW(phi::errors::Fatal( + "To use CUDA backends, you need to set WITH_CUDA ON!")); #endif } else { @@ -135,7 +137,7 @@ py::array BufferHostMemoryToNumpy(cinn_buffer_t &buffer) { // NOLINT } else if (buffer.type == cinn_bool_t()) { dt = py::dtype::of(); } else { - LOG(FATAL) << "Not supported type found"; + PADDLE_THROW(phi::errors::InvalidArgument("Not supported type found")); } py::array::ShapeContainer shape(buffer.dims, buffer.dims + buffer.dimensions); diff --git a/paddle/cinn/runtime/cpu/mkldnn_math.cc b/paddle/cinn/runtime/cpu/mkldnn_math.cc index b45ddedd2e890a..8468453fe20b30 100644 --- a/paddle/cinn/runtime/cpu/mkldnn_math.cc +++ b/paddle/cinn/runtime/cpu/mkldnn_math.cc @@ -50,7 +50,9 @@ void cinn_cpu_mkldnn_softmax_fp32(int batch, format_tag = tag::abcd; break; default: - LOG(FATAL) << "wrong dim: " << size; + std::stringstream ss; + ss << "wrong dim: " << size; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); break; } diff --git a/paddle/cinn/runtime/cpu/thread_backend.cc b/paddle/cinn/runtime/cpu/thread_backend.cc index 43804e33b1e60b..3878b49b9a314d 100644 --- a/paddle/cinn/runtime/cpu/thread_backend.cc +++ b/paddle/cinn/runtime/cpu/thread_backend.cc @@ -56,7 +56,8 @@ int cinn_backend_parallel_launch(FCINNParallelLambda flambda, (*flambda)(thread_num, num_task, datas); } #else - LOG(FATAL) << "CINN host parallel launch need OpenMP! Please check."; + PADDLE_THROW(phi::errors::Fatal( + "CINN host parallel launch need OpenMP! Please check.")); #endif // CINN_USE_OPENMP return 0; } diff --git a/paddle/cinn/runtime/cuda/cublas_util.h b/paddle/cinn/runtime/cuda/cublas_util.h index bdd21dafed544f..904678f2ce2e33 100644 --- a/paddle/cinn/runtime/cuda/cublas_util.h +++ b/paddle/cinn/runtime/cuda/cublas_util.h @@ -130,10 +130,12 @@ inline cublasStatus_t cublasGemm(cudaDataType_t dtype, CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); #else - LOG(FATAL) << "cublasGemmEx with bfloat16 is not supported on cuda <= 11"; + PADDLE_THROW(phi::errors::Fatal( + "cublasGemmEx with bfloat16 is not supported on cuda <= 11")); #endif } - LOG(FATAL) << "Unsupported cublasGemm precision."; + PADDLE_THROW( + phi::errors::InvalidArgument("Unsupported cublasGemm precision.")); } inline cublasStatus_t cublasGemmStridedBatched(cudaDataType_t dtype, @@ -269,11 +271,13 @@ inline cublasStatus_t cublasGemmStridedBatched(cudaDataType_t dtype, CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); #else - LOG(FATAL) << "cublasGemmStridedBatched with bfloat16 is not supported on " - "cuda <= 11"; + PADDLE_THROW(phi::errors::InvalidArgument( + "cublasGemmStridedBatched with bfloat16 is not supported on " + "cuda <= 11")); #endif } - LOG(FATAL) << "Unsupported cublasGemmStridedBatched precision."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Unsupported cublasGemmStridedBatched precision.")); } inline cublasStatus_t cublasGemmBatched(cudaDataType_t dtype, @@ -390,11 +394,12 @@ inline cublasStatus_t cublasGemmBatched(cudaDataType_t dtype, CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); #else - LOG(FATAL) - << "cublasGemmBatched with bfloat16 is not supported on cuda <= 11"; + PADDLE_THROW(phi::errors::Fatal( + "cublasGemmBatched with bfloat16 is not supported on cuda <= 11")); #endif } - LOG(FATAL) << "Unsupported cublasGemmBatched precision."; + PADDLE_THROW( + phi::errors::InvalidArgument("Unsupported cublasGemmBatched precision.")); } } // namespace cuda diff --git a/paddle/cinn/runtime/cuda/cuda_util.cc b/paddle/cinn/runtime/cuda/cuda_util.cc index 074c35f1ce9f93..cf7686d2de7af4 100644 --- a/paddle/cinn/runtime/cuda/cuda_util.cc +++ b/paddle/cinn/runtime/cuda/cuda_util.cc @@ -202,8 +202,10 @@ void cinn_call_cublas(void *v_args, } else if (is_bfloat16) { cuda_dtype = CUDA_R_16BF; } else { - LOG(FATAL) << "unsupported cublas data type: " - << static_cast(type_code) << ", bytes = " << bytes; + std::stringstream ss; + ss << "unsupported cublas data type: " << static_cast(type_code) + << ", bytes = " << bytes; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } if (a1 * a2 * b1 * b2 == 1) { @@ -424,8 +426,10 @@ void cinn_call_batched_cublas(void *v_args, } else if (is_bfloat16) { cuda_dtype = CUDA_R_16BF; } else { - LOG(FATAL) << "unsupported cublas data type: " - << static_cast(type_code) << ", bytes = " << bytes; + std::stringstream ss; + ss << "unsupported cublas data type: " << static_cast(type_code) + << ", bytes = " << bytes; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } int m = trans_o ? (trans_a ? a4 : a3) : (trans_b ? b3 : b4); @@ -630,7 +634,8 @@ cudnnDataType_t convert_to_cudnn_dtype(void *v_args, int num_args) { auto t = args[i].operator cinn_buffer_t *()->type.code; int b = args[0].operator cinn_buffer_t *()->type.bits; if (t != type_code || bits != b) { - LOG(FATAL) << "The types of all arguments need to be consistent."; + PADDLE_THROW(phi::errors::InvalidArgument( + "The types of all arguments need to be consistent.")); } } cudnnDataType_t data_type; @@ -645,8 +650,10 @@ cudnnDataType_t convert_to_cudnn_dtype(void *v_args, int num_args) { } else if (is_float && bits == 64) { data_type = CUDNN_DATA_DOUBLE; } else { - LOG(FATAL) << "unsupported cudnn data type: " << static_cast(type_code) - << ", bits = " << bits; + std::stringstream ss; + ss << "unsupported cudnn data type: " << static_cast(type_code) + << ", bits = " << bits; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return data_type; } @@ -660,8 +667,9 @@ cudnnDataType_t get_cudnn_compute_dtype(cudnnDataType_t data_type) { case CUDNN_DATA_DOUBLE: return CUDNN_DATA_DOUBLE; default: - LOG(FATAL) << "unsupported cudnn data type, only support " - "float16/bfloat16/float32/float64 now!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "unsupported cudnn data type, only support " + "float16/bfloat16/float32/float64 now!")); } return CUDNN_DATA_FLOAT; } @@ -673,7 +681,8 @@ std::string debug_cudnn_tensor_format(cudnnTensorFormat_t tensor_format) { case CUDNN_TENSOR_NHWC: return "NHWC"; default: - LOG(FATAL) << "Only support NCHW and NHWC data layout\n"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Only support NCHW and NHWC data layout\n")); } return ""; } @@ -689,7 +698,8 @@ std::string debug_cudnn_tensor_dtype(cudnnDataType_t tensor_dtype) { case CUDNN_DATA_DOUBLE: return "float64"; default: - LOG(FATAL) << "Only support float16/bfloat16/float32/float64 now!"; + PADDLE_THROW(phi::errors::InvalidArgument( + "Only support float16/bfloat16/float32/float64 now!")); } return ""; } @@ -705,7 +715,8 @@ std::string debug_cudnn_pool_mode(cudnnPoolingMode_t pool_mode) { case CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING: return "avg_exclude_padding"; default: - LOG(FATAL) << "Pool only support max and avg now!"; + PADDLE_THROW( + phi::errors::InvalidArgument("Pool only support max and avg now!")); } return ""; } @@ -2076,8 +2087,8 @@ void cinn_call_gaussian_random( double *ptr = reinterpret_cast(output->memory); CURAND_CALL(curandGenerateNormalDouble(generator, ptr, numel, mean, std)); } else { - LOG(FATAL) - << "gaussian_random only support float32 and float64! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "gaussian_random only support float32 and float64! Please check.")); } } @@ -2105,8 +2116,8 @@ void cinn_call_uniform_random( double *ptr = reinterpret_cast(output->memory); CURAND_CALL(curandGenerateUniformDouble(generator, ptr, numel)); } else { - LOG(FATAL) - << "uniform_random only support float32 and float64! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "uniform_random only support float32 and float64! Please check.")); } } @@ -2129,7 +2140,8 @@ void cinn_call_randint(void *v_args, int num_args, int seed, void *stream) { unsigned int *ptr = reinterpret_cast(output->memory); CURAND_CALL(curandGenerate(generator, ptr, numel)); } else { - LOG(FATAL) << "randint only support int32! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "randint only support int32! Please check.")); } } @@ -2152,21 +2164,25 @@ cudnnDataType_t convert_to_cudnn_dtype(cinn_buffer_t *input) { } else if (is_float && bits == 64) { data_type = CUDNN_DATA_DOUBLE; } else { - LOG(FATAL) << "unsupported cudnn data type: " << static_cast(type_code) - << ", bits = " << bits; + std::stringstream ss; + ss << "unsupported cudnn data type: " << static_cast(type_code) + << ", bits = " << bits; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); } return data_type; } } // namespace -#define GetAttrValue(attr_map, key_name, default_value) \ - int key_name = 0; \ - if (attr_map.count(#key_name) != 0) { \ - key_name = attr_map.find(#key_name)->second; \ - } else if (default_value >= 0) { \ - key_name = default_value; \ - } else { \ - LOG(FATAL) << #key_name << " is not exist in attr_map!"; \ +#define GetAttrValue(attr_map, key_name, default_value) \ + int key_name = 0; \ + if (attr_map.count(#key_name) != 0) { \ + key_name = attr_map.find(#key_name)->second; \ + } else if (default_value >= 0) { \ + key_name = default_value; \ + } else { \ + std::stringstream ss; \ + ss << #key_name << " is not exist in attr_map!"; \ + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); \ } void cinn_gpu_cudnn_conv2d(const absl::flat_hash_map &attr, diff --git a/paddle/cinn/runtime/custom_function.cc b/paddle/cinn/runtime/custom_function.cc index 08fe5c1bd7f351..05baa6fd548366 100644 --- a/paddle/cinn/runtime/custom_function.cc +++ b/paddle/cinn/runtime/custom_function.cc @@ -80,9 +80,9 @@ void AssertTrueMsgTool::InitFlagInfo() { // string type parameter flag_values_[flag_arg[0]] = std::stof(flag_arg[1]); } else { - LOG(FATAL) - << "The FLAGS_cinn_check_fusion_accuracy_pass only support parameter " - "\"only_warning/rtol/atol/equal_nan\" now"; + PADDLE_THROW(phi::errors::InvalidArgument( + "The FLAGS_cinn_check_fusion_accuracy_pass only support parameter " + "\"only_warning/rtol/atol/equal_nan\" now")); } } @@ -111,8 +111,8 @@ bool MemcpyToHost(void* dst, cudaStreamSynchronize(cuda_stream); return true; #else - LOG(FATAL) - << "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check."; + PADDLE_THROW(phi::errors::Fatal( + "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check.")); return false; #endif } @@ -120,9 +120,11 @@ bool MemcpyToHost(void* dst, memcpy(dst, src, bytes); return true; } - LOG(FATAL) << "MemcpyToHost Only support cpu or nvgpu -> cpu, but here the " - "input target is " - << input_target << "! Please check."; + std::stringstream ss; + ss << "MemcpyToHost Only support cpu or nvgpu -> cpu, but here the " + "input target is " + << input_target << "! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return false; } @@ -147,14 +149,17 @@ bool MemcpyToDevice(void* dst, static_cast(stream)); return true; } else { - LOG(FATAL) << "MemcpyToDevice only support cpu or nvgpu -> nvgpu, but here " - "the input target is " - << input_target << "! Please check."; + std::stringstream ss; + ss << "MemcpyToDevice only support cpu or nvgpu -> nvgpu, but here " + "the input target is " + << input_target << "! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return false; } #else - LOG(FATAL) << "MemcpyToDevice only support nvgpu, and NVGPU Target only " - "support when flag CINN_WITH_CUDA ON! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "MemcpyToDevice only support nvgpu, and NVGPU Target only " + "support when flag CINN_WITH_CUDA ON! Please check.")); return false; #endif } @@ -187,7 +192,7 @@ void CheckAssertTrue(const bool* x, if (only_warning) { LOG(WARNING) << error_info; } else { - LOG(FATAL) << error_info; + PADDLE_THROW(phi::errors::InvalidArgument(error_info)); } } else { VLOG(1) << "[AssertTrue] Check succeed!\n" diff --git a/paddle/cinn/runtime/custom_function_test.cc b/paddle/cinn/runtime/custom_function_test.cc index b2dc09b1862f07..350e7c85fb16a3 100644 --- a/paddle/cinn/runtime/custom_function_test.cc +++ b/paddle/cinn/runtime/custom_function_test.cc @@ -59,12 +59,15 @@ class CinnBufferAllocHelper { #ifdef CINN_WITH_CUDA cudaMalloc(&buffer_->memory, buffer_->num_elements() * sizeof(T)); #else - LOG(FATAL) << "NVGPU Target only support on flag CINN_WITH_CUDA ON! " - "Please check."; + PADDLE_THROW(phi::errors::Fatal( + "NVGPU Target only support on flag CINN_WITH_CUDA ON! " + "Please check.")); #endif } else { - LOG(FATAL) << "Only support nvgpu and cpu, but here " << target - << "! Please check."; + std::stringstream ss; + ss << "Only support nvgpu and cpu, but here " << target + << "! Please check."; + PADDLE_THROW(phi::errors::Fatal(ss.str())); } return reinterpret_cast(buffer_->memory); @@ -73,7 +76,7 @@ class CinnBufferAllocHelper { template const T* data() { if (target_ == cinn::common::UnkTarget()) { - LOG(FATAL) << "No memory had alloced! Please check."; + PADDLE_THROW(phi::errors::Fatal("No memory had alloced! Please check.")); } return reinterpret_cast(buffer_->memory); } @@ -88,12 +91,15 @@ class CinnBufferAllocHelper { #ifdef CINN_WITH_CUDA cudaFree(buffer_->memory); #else - LOG(FATAL) << "NVGPU Target only support on flag CINN_WITH_CUDA ON! " - "Please check."; + PADDLE_THROW(phi::errors::Fatal( + "NVGPU Target only support on flag CINN_WITH_CUDA ON! " + "Please check.")); #endif } else { - LOG(FATAL) << "Only support nvgpu and cpu, but here " << target_ - << "! Please check."; + std::stringstream ss; + ss << "Only support nvgpu and cpu, but here " << target_ + << "! Please check."; + PADDLE_THROW(phi::errors::Fatal(ss.str())); } delete buffer_; } @@ -121,8 +127,8 @@ void SetInputValue(T* input, #ifdef CINN_WITH_CUDA cudaMemcpy(input, input_h, num * sizeof(T), cudaMemcpyHostToDevice); #else - LOG(FATAL) - << "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check."; + PADDLE_THROW(phi::errors::Fatal( + "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check.")); #endif } } @@ -233,8 +239,8 @@ TEST(CustomCallGaussianRandom, test_target_nvgpu) { VLOG(6) << output_data[i]; } #else - LOG(FATAL) - << "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check."; + PADDLE_THROW(phi::errors::Fatal( + "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check.")); #endif } } @@ -269,8 +275,8 @@ TEST(CustomCallUniformRandom, test_target_nvgpu) { VLOG(6) << output_data[i]; } #else - LOG(FATAL) - << "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check."; + PADDLE_THROW(phi::errors::Fatal( + "NVGPU Target only support on flag CINN_WITH_CUDA ON! Please check.")); #endif } } diff --git a/paddle/cinn/runtime/flags.cc b/paddle/cinn/runtime/flags.cc index 349f94895bbb57..27ebc4fd25b21a 100644 --- a/paddle/cinn/runtime/flags.cc +++ b/paddle/cinn/runtime/flags.cc @@ -22,6 +22,7 @@ #include #include "paddle/cinn/common/target.h" +#include "paddle/common/enforce.h" #include "paddle/common/flags.h" #ifdef CINN_WITH_CUDNN @@ -294,7 +295,8 @@ bool GetCinnCudnnDeterministic() { #ifdef CINN_WITH_CUDNN return FLAGS_cinn_cudnn_deterministic; #else - LOG(FATAL) << "CINN is compiled without cuDNN, this api is invalid!"; + PADDLE_THROW(phi::errors::Fatal( + "CINN is compiled without cuDNN, this api is invalid!")); return false; #endif } @@ -341,8 +343,9 @@ cinn::common::Target CurrentTarget::target_ = cinn::common::DefaultTarget(); void CurrentTarget::SetCurrentTarget(const cinn::common::Target& target) { if (!IsCompiledWithCUDA() && target.arch == cinn::common::Target::Arch::NVGPU) { - LOG(FATAL) << "Current CINN version does not support NVGPU, please try to " - "recompile with -DWITH_CUDA."; + PADDLE_THROW(phi::errors::Fatal( + "Current CINN version does not support NVGPU, please try to " + "recompile with -DWITH_CUDA.")); } else { target_ = target; } diff --git a/paddle/cinn/runtime/intrinsic.cc b/paddle/cinn/runtime/intrinsic.cc index eb68cb5637cf3d..6bf5ac17c506e3 100644 --- a/paddle/cinn/runtime/intrinsic.cc +++ b/paddle/cinn/runtime/intrinsic.cc @@ -51,7 +51,9 @@ cinn_type_t ToRuntimeType(Type type) { SET_TYPE_CASE_ITEM(Float16().PointerOf, cinn_type_of); SET_TYPE_CASE_ITEM(BFloat16().PointerOf, cinn_type_of); - LOG(FATAL) << "Not supported type " << type; + std::stringstream ss; + ss << "Not supported type " << type; + PADDLE_THROW(phi::errors::InvalidArgument(ss.str())); return cinn_unk_t(); #undef SET_TYPE_CASE_ITEM } diff --git a/paddle/cinn/utils/error.h b/paddle/cinn/utils/error.h index 7b5af324d7081a..c64b32017e4b5c 100644 --- a/paddle/cinn/utils/error.h +++ b/paddle/cinn/utils/error.h @@ -113,14 +113,15 @@ struct EnforceNotMet : public std::exception { std::string err_str_; }; -#define CINN_THROW(...) \ - do { \ - try { \ - throw utils::enforce::EnforceNotMet(__VA_ARGS__, __FILE__, __LINE__); \ - } catch (const std::exception& e) { \ - std::cout << e.what() << std::endl; \ - throw; \ - } \ +#define CINN_THROW(...) \ + do { \ + try { \ + throw cinn::utils::enforce::EnforceNotMet( \ + __VA_ARGS__, __FILE__, __LINE__); \ + } catch (const std::exception& e) { \ + std::cout << e.what() << std::endl; \ + throw; \ + } \ } while (0) } // namespace enforce diff --git a/paddle/cinn/utils/event.cc b/paddle/cinn/utils/event.cc index ca06ae73c67666..7ec7769c992307 100644 --- a/paddle/cinn/utils/event.cc +++ b/paddle/cinn/utils/event.cc @@ -15,9 +15,9 @@ #include "paddle/cinn/utils/event.h" #include // for GLog - #include +#include "paddle/common/enforce.h" namespace cinn { namespace utils { inline std::string EventTypeToString(const EventType &type) { @@ -43,7 +43,7 @@ inline std::string EventTypeToString(const EventType &type) { case EventType::kInstruction: return "Instruction"; default: - LOG(FATAL) << "Unknown event type"; + PADDLE_THROW(phi::errors::InvalidArgument("Unknown event type")); } } diff --git a/paddle/cinn/utils/multi_threading.cc b/paddle/cinn/utils/multi_threading.cc index d4031431d0e343..2614db268fc50a 100644 --- a/paddle/cinn/utils/multi_threading.cc +++ b/paddle/cinn/utils/multi_threading.cc @@ -20,8 +20,8 @@ #include #include #include - #include "paddle/cinn/utils/string.h" +#include "paddle/common/enforce.h" namespace cinn { namespace utils { @@ -86,7 +86,9 @@ void parallel_run(const WorkerFuncType& fn, VLOG(4) << "Thread-" << tid << " process " << counter << " tasks."; } } catch (const std::exception& e) { - LOG(FATAL) << "parallel_run incurs error: " << e.what(); + std::stringstream ss; + ss << "parallel_run incurs error: " << e.what(); + PADDLE_THROW(phi::errors::Fatal(ss.str())); } // join threads diff --git a/paddle/cinn/utils/string.cc b/paddle/cinn/utils/string.cc index 5e6560551c068a..51813f2fcaf482 100644 --- a/paddle/cinn/utils/string.cc +++ b/paddle/cinn/utils/string.cc @@ -20,6 +20,7 @@ #include #include "glog/logging.h" +#include "paddle/common/enforce.h" namespace cinn { namespace utils { @@ -174,7 +175,8 @@ std::string Attribute2String(const utils::Attribute &attr) { } ss << "[" + cinn::utils::Join(attrs, ", ") + "]"; } else { - LOG(FATAL) << "Unkown attribute data type! Please check."; + PADDLE_THROW(phi::errors::InvalidArgument( + "Unkown attribute data type! Please check.")); } return ss.str(); }