From e9746007cc5b0e27132f53ba1f71fb25bbffebc3 Mon Sep 17 00:00:00 2001 From: co63oc Date: Tue, 13 Feb 2024 14:51:34 +0800 Subject: [PATCH] Fix --- paddle/cinn/ast_gen_ius/tensor_group.cc | 2 +- paddle/cinn/backends/codegen_c.cc | 2 +- paddle/cinn/backends/codegen_cuda_dev.cc | 6 +++--- paddle/cinn/backends/llvm/codegen_llvm.cc | 2 +- paddle/cinn/common/cas.cc | 4 ++-- paddle/cinn/hlir/framework/instruction.h | 2 +- paddle/cinn/ir/buffer.cc | 4 ++-- paddle/cinn/ir/lowered_func.cc | 2 +- paddle/cinn/lang/lower.cc | 8 ++++---- paddle/cinn/optim/compute_inline_expand.cc | 14 ++++++------- .../optim/replace_const_param_to_integer.cc | 2 +- paddle/cinn/optim/resize_buffer.cc | 4 ++-- .../cinn/poly/domain_add_unit_loop_mutator.cc | 8 ++++---- paddle/cinn/poly/isl_utils.cc | 2 +- paddle/cinn/poly/stage.cc | 12 +++++------ paddle/cinn/utils/string.cc | 6 +++--- paddle/cinn/utils/string.h | 4 ++-- paddle/cinn/utils/string_test.cc | 20 +++++++++---------- 18 files changed, 52 insertions(+), 52 deletions(-) diff --git a/paddle/cinn/ast_gen_ius/tensor_group.cc b/paddle/cinn/ast_gen_ius/tensor_group.cc index d94d83429eb073..2ccb84f3248ba0 100644 --- a/paddle/cinn/ast_gen_ius/tensor_group.cc +++ b/paddle/cinn/ast_gen_ius/tensor_group.cc @@ -244,7 +244,7 @@ TensorGroup ConvertStageMapToTensorGroup(const poly::StageMap& stage_map) { if (iter->second->has_expression()) { const std::string& tensor_name = iter->first; stage_tensors.push_back(ir::Tensor(iter->second->tensor())); - if (utils::Endswith(tensor_name, "_reshape")) { + if (utils::EndsWith(tensor_name, "_reshape")) { reshape_tensors.insert(ir::Tensor(iter->second->tensor())); } } diff --git a/paddle/cinn/backends/codegen_c.cc b/paddle/cinn/backends/codegen_c.cc index 40374258f59f29..ca80bcdddd0c01 100644 --- a/paddle/cinn/backends/codegen_c.cc +++ b/paddle/cinn/backends/codegen_c.cc @@ -120,7 +120,7 @@ std::string CodeGenC::GetTypeName(Type type) { auto customized_name = type.customized_type(); // get name of a cuda built-in vector type, it is started with a // 'CudaVectorType::' prefix - if (utils::Startswith( + if (utils::StartsWith( customized_name, cinn::common::customized_type::kcuda_builtin_vector_t)) { customized_name.erase( diff --git a/paddle/cinn/backends/codegen_cuda_dev.cc b/paddle/cinn/backends/codegen_cuda_dev.cc index ade74e46b71ab7..31d4625a3c6cca 100644 --- a/paddle/cinn/backends/codegen_cuda_dev.cc +++ b/paddle/cinn/backends/codegen_cuda_dev.cc @@ -183,8 +183,8 @@ void CodeGenCUDA_Dev::Visit(const ir::Free *op) { } void CodeGenCUDA_Dev::Visit(const ir::_Var_ *op) { - if (utils::Startswith(op->name, "threadIdx") || - utils::Startswith(op->name, "blockIdx")) { + if (utils::StartsWith(op->name, "threadIdx") || + utils::StartsWith(op->name, "blockIdx")) { str_ += "(int)"; str_ += op->name; } else { @@ -409,7 +409,7 @@ void CodeGenCUDA_Dev::Visit(const ir::Let *op) { // identify vectorized tensors by checking their dtypes are customized_type // with customized_type::kcuda_builtin_vector_t prefix, and save their names if (op->type().is_customized() && - utils::Startswith( + utils::StartsWith( op->type().customized_type(), cinn::common::customized_type::kcuda_builtin_vector_t)) { str_ += GetTypeRepr(op->type()); diff --git a/paddle/cinn/backends/llvm/codegen_llvm.cc b/paddle/cinn/backends/llvm/codegen_llvm.cc index e554eca8795a43..6147940075d8ab 100644 --- a/paddle/cinn/backends/llvm/codegen_llvm.cc +++ b/paddle/cinn/backends/llvm/codegen_llvm.cc @@ -1395,7 +1395,7 @@ void CodeGenLLVM::InitTarget(const Target &target) { } bool LLVM_WillVarLowerAsPointer(const std::string &var_name) { - return var_name == "_args" || utils::Endswith(var_name, "__ptr"); + return var_name == "_args" || utils::EndsWith(var_name, "__ptr"); } void CodeGenLLVM::AddTbaaMetadata(llvm::Instruction *inst, diff --git a/paddle/cinn/common/cas.cc b/paddle/cinn/common/cas.cc index c7391591aa37c3..fa12b293327632 100644 --- a/paddle/cinn/common/cas.cc +++ b/paddle/cinn/common/cas.cc @@ -1155,8 +1155,8 @@ inline bool IsVarNonnegative( // Return if the var is binded with thread or block in cuda(which implies it is // non-negative). inline bool IsVarBinded(const std::string& var_name) { - return utils::Startswith(var_name, "threadIdx") || - utils::Startswith(var_name, "blockIdx"); + return utils::StartsWith(var_name, "threadIdx") || + utils::StartsWith(var_name, "blockIdx"); } /** diff --git a/paddle/cinn/hlir/framework/instruction.h b/paddle/cinn/hlir/framework/instruction.h index 4e1a92e4b1c46a..b1241677c0f818 100644 --- a/paddle/cinn/hlir/framework/instruction.h +++ b/paddle/cinn/hlir/framework/instruction.h @@ -103,7 +103,7 @@ class Instruction { int flag = -1; void* stream = nullptr; for (int idx = 0; idx < 4; idx++) { - if (utils::Startswith(out_args_[idx][0], "kernel_pack")) { + if (utils::StartsWith(out_args_[idx][0], "kernel_pack")) { VLOG(3) << "PreRun " << idx << "-th function of fn_:" << fn_names_[idx]; flag = idx; auto& pod_args = args_cached_[idx]; diff --git a/paddle/cinn/ir/buffer.cc b/paddle/cinn/ir/buffer.cc index 69bf16bea297e7..15719c2751d127 100644 --- a/paddle/cinn/ir/buffer.cc +++ b/paddle/cinn/ir/buffer.cc @@ -27,7 +27,7 @@ namespace ir { std::string TensorGetBufferName(const _Tensor_ *tensor) { CHECK(!tensor->name.empty()); - CHECK(!utils::Startswith(tensor->name, "_")) + CHECK(!utils::StartsWith(tensor->name, "_")) << "the name with prefix _ is not allowed for tensor. Current tensor's " "name is: " << tensor->name; @@ -35,7 +35,7 @@ std::string TensorGetBufferName(const _Tensor_ *tensor) { } std::string BufferGetTensorName(const _Buffer_ *buffer) { CHECK(!buffer->name.empty()); - CHECK(utils::Startswith(buffer->name, "_")) + CHECK(utils::StartsWith(buffer->name, "_")) << "buffer's name should start with _"; return buffer->name.substr(1); } diff --git a/paddle/cinn/ir/lowered_func.cc b/paddle/cinn/ir/lowered_func.cc index 652d64f21e2449..f662fb1cfac8d6 100644 --- a/paddle/cinn/ir/lowered_func.cc +++ b/paddle/cinn/ir/lowered_func.cc @@ -184,7 +184,7 @@ std::vector _LoweredFunc_::PrepareCreateTempBufferExprs() const { std::vector _LoweredFunc_::CudaPrepareAllocTempBufferExprs() const { std::vector alloc_output_buffer_exprs; for (auto temp_buf : temp_bufs) { - if (utils::Startswith(temp_buf->name, "_")) { + if (utils::StartsWith(temp_buf->name, "_")) { temp_buf->name = temp_buf->name.substr(1); } if (!temp_buf->shape.empty() && temp_buf->type() != Void()) { diff --git a/paddle/cinn/lang/lower.cc b/paddle/cinn/lang/lower.cc index 162580c07b91c0..ac94803a2128a1 100644 --- a/paddle/cinn/lang/lower.cc +++ b/paddle/cinn/lang/lower.cc @@ -114,7 +114,7 @@ std::vector GetTempBuffers( return x->as_tensor() && x->as_tensor()->buffer.defined() && ((!buffer_arg_names.count(x->as_tensor()->buffer->name) && !tensor_arg_names.count(x->as_tensor()->name)) || - utils::Endswith(x->as_tensor()->buffer->name, "temp_buffer")); + utils::EndsWith(x->as_tensor()->buffer->name, "temp_buffer")); }); for (auto& e : all_temp_tensors) { auto buffer_name = e.as_tensor()->buffer->name; @@ -158,7 +158,7 @@ std::vector GetTempBuffers(const std::vector& tensor_args, (!tensor_group.Contain(x->as_tensor()->name) || ((!buffer_arg_names.count(x->as_tensor()->buffer->name) && !tensor_arg_names.count(x->as_tensor()->name)) || - utils::Endswith(x->as_tensor()->buffer->name, "temp_buffer"))); + utils::EndsWith(x->as_tensor()->buffer->name, "temp_buffer"))); }); for (auto& e : all_temp_tensors) { auto buffer_name = e.as_tensor()->buffer->name; @@ -202,7 +202,7 @@ std::vector GetTempBuffers(const std::vector& tensor_args, !stage_map[x->as_tensor()]->inlined()) && ((!buffer_arg_names.count(x->as_tensor()->buffer->name) && !tensor_arg_names.count(x->as_tensor()->name)) || - utils::Endswith(x->as_tensor()->buffer->name, "temp_buffer")); + utils::EndsWith(x->as_tensor()->buffer->name, "temp_buffer")); }); for (auto& e : all_temp_tensors) { auto buffer_name = e.as_tensor()->buffer->name; @@ -250,7 +250,7 @@ std::vector GetTempBuffers(const std::vector& args, ir::ir_utils::CollectIRNodesWithoutTensor(body, [&](const Expr* x) { return x->as_tensor() && x->as_tensor()->buffer.defined() && (!buffer_arg_names.count(x->as_tensor()->buffer->name) || - utils::Endswith(x->as_tensor()->buffer->name, "temp_buffer")); + utils::EndsWith(x->as_tensor()->buffer->name, "temp_buffer")); }); for (auto& e : all_temp_tensors) { auto buffer_name = e.as_tensor()->buffer->name; diff --git a/paddle/cinn/optim/compute_inline_expand.cc b/paddle/cinn/optim/compute_inline_expand.cc index 7f42a3500ee760..576d438280e34a 100644 --- a/paddle/cinn/optim/compute_inline_expand.cc +++ b/paddle/cinn/optim/compute_inline_expand.cc @@ -59,15 +59,15 @@ struct TensorInlineExpandMutator : public ir::IRMutator<> { void Visit(const ir::_Var_ *expr, Expr *op) override { if (inline_code && temp_buffer) { - if (utils::Startswith(expr->name, "blockIdx") || - (utils::Startswith(expr->name, "threadIdx") && memory_local)) { + if (utils::StartsWith(expr->name, "blockIdx") || + (utils::StartsWith(expr->name, "threadIdx") && memory_local)) { *op = ir::Expr(0); } } } void Visit(const ir::_Tensor_ *op, Expr *expr) override { - if (inline_code && utils::Endswith(op->name, "_write_cache") && + if (inline_code && utils::EndsWith(op->name, "_write_cache") && (*all_tensor_map_).at(op->name)->buffer->memory_type == ir::MemoryType::Heap) { auto no_cache_name = op->name.substr(0, op->name.size() - 12); @@ -101,7 +101,7 @@ struct TensorInlineExpandMutator : public ir::IRMutator<> { } else if (inline_code && tensor->buffer.defined()) { bool is_heap = (*all_tensor_map_).at(tensor->name)->buffer->memory_type == ir::MemoryType::Heap; - if (utils::Endswith(tensor->buffer->name, "_write_cache") && is_heap) { + if (utils::EndsWith(tensor->buffer->name, "_write_cache") && is_heap) { // temp fix: cache_write will change the tensor to the cache tensor // wrongly auto no_cache_name = @@ -120,9 +120,9 @@ struct TensorInlineExpandMutator : public ir::IRMutator<> { } } } - } else if (utils::Endswith(tensor->buffer->name, "_write_cache") || - utils::Endswith(tensor->buffer->name, "_read_cache") || - utils::Endswith(tensor->buffer->name, "_temp_buffer")) { + } else if (utils::EndsWith(tensor->buffer->name, "_write_cache") || + utils::EndsWith(tensor->buffer->name, "_read_cache") || + utils::EndsWith(tensor->buffer->name, "_temp_buffer")) { #ifdef CINN_WITH_CUDA auto axis_names = stages_[tensor]->axis_names(); auto compute_ats = stages_[tensor]->GetComputeAts(); diff --git a/paddle/cinn/optim/replace_const_param_to_integer.cc b/paddle/cinn/optim/replace_const_param_to_integer.cc index ad72439a1631a0..9d3740ad65f4e4 100644 --- a/paddle/cinn/optim/replace_const_param_to_integer.cc +++ b/paddle/cinn/optim/replace_const_param_to_integer.cc @@ -26,7 +26,7 @@ struct Mutator : public ir::IRMutator<> { using ir::IRMutator<>::Visit; void Visit(const ir::_Var_* op, Expr* expr) override { - if (utils::Startswith(op->name, poly::kIslParamConstPrefix)) { + if (utils::StartsWith(op->name, poly::kIslParamConstPrefix)) { std::string value = op->name.substr(strlen(poly::kIslParamConstPrefix)); *expr = Expr(std::stoi(value)); } diff --git a/paddle/cinn/optim/resize_buffer.cc b/paddle/cinn/optim/resize_buffer.cc index f36eef0704946d..c725d9d0c3c01f 100644 --- a/paddle/cinn/optim/resize_buffer.cc +++ b/paddle/cinn/optim/resize_buffer.cc @@ -40,8 +40,8 @@ class AnalyzeLoopVarRange : public ir::IRMutator<> { std::stringstream oss; oss << less_than_ir->a(); std::string var_name = oss.str(); - if (utils::Startswith(var_name, "blockIdx") || - utils::Startswith(var_name, "threadIdx")) { + if (utils::StartsWith(var_name, "blockIdx") || + utils::StartsWith(var_name, "threadIdx")) { var_name_to_extent_[var_name] = less_than_ir->b(); } } diff --git a/paddle/cinn/poly/domain_add_unit_loop_mutator.cc b/paddle/cinn/poly/domain_add_unit_loop_mutator.cc index d526012e7862d7..e9628e79ed8265 100644 --- a/paddle/cinn/poly/domain_add_unit_loop_mutator.cc +++ b/paddle/cinn/poly/domain_add_unit_loop_mutator.cc @@ -46,7 +46,7 @@ void DomainAddUnitLoopMutator::Visit(const ir::For* op, Expr* expr) { if (parent_for_.size() < dim_names_.size()) { std::string check_name = dim_names_[parent_for_.size()]; std::tuple t = dim_min_max_[parent_for_.size()]; - if (!utils::Startswith(node->loop_var->name, check_name) && + if (!utils::StartsWith(node->loop_var->name, check_name) && (std::get<2>(t) - std::get<1>(t) == 0)) { ir::Expr unit_loop = ir::For::Make(ir::Var(check_name), ir::Expr(0), @@ -96,7 +96,7 @@ void DomainAddUnitLoopMutator::Visit(const ir::PolyFor* op, Expr* expr) { if (parent_poly_for_.size() < dim_names_.size()) { std::string check_name = dim_names_[parent_poly_for_.size()]; std::tuple t = dim_min_max_[parent_poly_for_.size()]; - if (!utils::Startswith(node->iterator->name, check_name) && + if (!utils::StartsWith(node->iterator->name, check_name) && (std::get<2>(t) - std::get<1>(t) == 0)) { ir::Expr unit_loop = ir::PolyFor::Make(ir::Var(check_name), @@ -153,7 +153,7 @@ void DomainAddUnitLoopMutator::MutateAfterVisit(ir::Expr* expr) { std::tuple t = dim_min_max_[i]; if (longest_loop_[i].As()) { const ir::For* node = longest_loop_[i].As(); - if (utils::Startswith(node->loop_var->name, dim_names_[i]) && + if (utils::StartsWith(node->loop_var->name, dim_names_[i]) && node->min.is_constant() && node->min.as_int32() == std::get<1>(t) && node->extent.is_constant() && node->extent.as_int32() == std::get<2>(t)) { @@ -164,7 +164,7 @@ void DomainAddUnitLoopMutator::MutateAfterVisit(ir::Expr* expr) { } } else if (longest_loop_[i].As()) { const ir::PolyFor* node = longest_loop_[i].As(); - if (utils::Startswith(node->iterator->name, dim_names_[i]) && + if (utils::StartsWith(node->iterator->name, dim_names_[i]) && node->init.is_constant() && node->init.as_int32() == std::get<1>(t) && node->condition == ir::LE::Make(ir::Var(dim_names_[i]), ir::Expr(std::get<2>(t)))) { diff --git a/paddle/cinn/poly/isl_utils.cc b/paddle/cinn/poly/isl_utils.cc index 34f1dd21fac2f1..066f7b65fda6ce 100644 --- a/paddle/cinn/poly/isl_utils.cc +++ b/paddle/cinn/poly/isl_utils.cc @@ -522,7 +522,7 @@ std::vector GetRelatedInputAxies( auto transformed_domain = temp_set.apply(x); for (auto &i : dim_out_names) { out_set.insert(i); - if (utils::Endswith(i, "_inner") || utils::Endswith(i, "_outer")) { + if (utils::EndsWith(i, "_inner") || utils::EndsWith(i, "_outer")) { out_set_without_suffix.insert(utils::RemoveSuffix(i)); } } diff --git a/paddle/cinn/poly/stage.cc b/paddle/cinn/poly/stage.cc index e04c178805ae47..896e793d246901 100644 --- a/paddle/cinn/poly/stage.cc +++ b/paddle/cinn/poly/stage.cc @@ -751,8 +751,8 @@ void Stage::ComputeAt2(Stage *other, int level) { other->CtrlDepend(ir::Tensor(tensor())); if (this->tensor()->buffer.defined()) { std::string t_name = this->tensor()->buffer->name; - if (utils::Endswith(t_name, "_read_cache") || - utils::Endswith(t_name, "_write_cache")) { + if (utils::EndsWith(t_name, "_read_cache") || + utils::EndsWith(t_name, "_write_cache")) { EditTempTensor(other, level); } } @@ -776,8 +776,8 @@ void Stage::ComputeAt3(Stage *other, int level) { other->CtrlDepend(ir::Tensor(tensor())); if (this->tensor()->buffer.defined()) { std::string t_name = this->tensor()->buffer->name; - if (utils::Endswith(t_name, "_read_cache") || - utils::Endswith(t_name, "_write_cache")) { + if (utils::EndsWith(t_name, "_read_cache") || + utils::EndsWith(t_name, "_write_cache")) { EditTempTensor(other, level); } } @@ -788,8 +788,8 @@ void Stage::SimpleComputeAt(Stage *other, int level) { other->CtrlDepend(ir::Tensor(tensor())); if (this->tensor()->buffer.defined()) { std::string t_name = this->tensor()->buffer->name; - if (utils::Endswith(t_name, "_read_cache") || - utils::Endswith(t_name, "_write_cache")) { + if (utils::EndsWith(t_name, "_read_cache") || + utils::EndsWith(t_name, "_write_cache")) { EditTempTensor(other, level); } } diff --git a/paddle/cinn/utils/string.cc b/paddle/cinn/utils/string.cc index c802cedfa5c2eb..5e6560551c068a 100644 --- a/paddle/cinn/utils/string.cc +++ b/paddle/cinn/utils/string.cc @@ -46,7 +46,7 @@ std::string StringFormat(const std::string &fmt_str, ...) { std::string RemoveSuffix(const std::string &name) { std::string res = name; - while (Endswith(res, "_outer") || Endswith(res, "_inner")) { + while (EndsWith(res, "_outer") || EndsWith(res, "_inner")) { res = res.substr(0, res.size() - 6); } return res; @@ -68,10 +68,10 @@ std::string Uppercase(const std::string &x) { return res; } -bool Startswith(const std::string &x, const std::string &str) { +bool StartsWith(const std::string &x, const std::string &str) { return x.find(str) == 0; } -bool Endswith(const std::string &x, const std::string &str) { +bool EndsWith(const std::string &x, const std::string &str) { if (x.length() >= str.length()) { return std::equal(str.rbegin(), str.rend(), x.rbegin()); } diff --git a/paddle/cinn/utils/string.h b/paddle/cinn/utils/string.h index dbe876f4b6c6b1..900e1a6a2ed570 100644 --- a/paddle/cinn/utils/string.h +++ b/paddle/cinn/utils/string.h @@ -75,10 +75,10 @@ bool IsPrefix(const char& c); bool IsSuffix(const char& c); //! Tell if a string \p x start with \p str. -bool Startswith(const std::string& x, const std::string& str); +bool StartsWith(const std::string& x, const std::string& str); //! Tell if a string \p x ends with \p str. -bool Endswith(const std::string& x, const std::string& str); +bool EndsWith(const std::string& x, const std::string& str); template std::string GetStreamCnt(const T& x) { diff --git a/paddle/cinn/utils/string_test.cc b/paddle/cinn/utils/string_test.cc index 5925052f9ece56..1a479e11d632b1 100644 --- a/paddle/cinn/utils/string_test.cc +++ b/paddle/cinn/utils/string_test.cc @@ -19,19 +19,19 @@ namespace cinn { namespace utils { -TEST(string, Endswith) { +TEST(string, EndsWith) { std::string a = "a__p"; - ASSERT_TRUE(Endswith(a, "__p")); - ASSERT_FALSE(Endswith(a, "_x")); - ASSERT_TRUE(Endswith(a, "a__p")); - ASSERT_FALSE(Endswith(a, "a___p")); + ASSERT_TRUE(EndsWith(a, "__p")); + ASSERT_FALSE(EndsWith(a, "_x")); + ASSERT_TRUE(EndsWith(a, "a__p")); + ASSERT_FALSE(EndsWith(a, "a___p")); } -TEST(string, Startswith) { +TEST(string, StartsWith) { std::string a = "a__p"; - ASSERT_TRUE(Startswith(a, "a_")); - ASSERT_TRUE(Startswith(a, "a__")); - ASSERT_TRUE(Startswith(a, "a__p")); - ASSERT_FALSE(Startswith(a, "a___p")); + ASSERT_TRUE(StartsWith(a, "a_")); + ASSERT_TRUE(StartsWith(a, "a__")); + ASSERT_TRUE(StartsWith(a, "a__p")); + ASSERT_FALSE(StartsWith(a, "a___p")); } } // namespace utils