Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/cinn/ast_gen_ius/tensor_group.cc
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ TensorGroup ConvertStageMapToTensorGroup(const poly::StageMap& stage_map) {
if (iter->second->has_expression()) {
const std::string& tensor_name = iter->first;
stage_tensors.push_back(ir::Tensor(iter->second->tensor()));
if (utils::Endswith(tensor_name, "_reshape")) {
if (utils::EndsWith(tensor_name, "_reshape")) {
reshape_tensors.insert(ir::Tensor(iter->second->tensor()));
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/backends/codegen_c.cc
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ std::string CodeGenC::GetTypeName(Type type) {
auto customized_name = type.customized_type();
// get name of a cuda built-in vector type, it is started with a
// 'CudaVectorType::' prefix
if (utils::Startswith(
if (utils::StartsWith(
customized_name,
cinn::common::customized_type::kcuda_builtin_vector_t)) {
customized_name.erase(
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/backends/codegen_cuda_dev.cc
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,8 @@ void CodeGenCUDA_Dev::Visit(const ir::Free *op) {
}

void CodeGenCUDA_Dev::Visit(const ir::_Var_ *op) {
if (utils::Startswith(op->name, "threadIdx") ||
utils::Startswith(op->name, "blockIdx")) {
if (utils::StartsWith(op->name, "threadIdx") ||
utils::StartsWith(op->name, "blockIdx")) {
str_ += "(int)";
str_ += op->name;
} else {
Expand Down Expand Up @@ -409,7 +409,7 @@ void CodeGenCUDA_Dev::Visit(const ir::Let *op) {
// identify vectorized tensors by checking their dtypes are customized_type
// with customized_type::kcuda_builtin_vector_t prefix, and save their names
if (op->type().is_customized() &&
utils::Startswith(
utils::StartsWith(
op->type().customized_type(),
cinn::common::customized_type::kcuda_builtin_vector_t)) {
str_ += GetTypeRepr(op->type());
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/backends/llvm/codegen_llvm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1395,7 +1395,7 @@ void CodeGenLLVM::InitTarget(const Target &target) {
}

bool LLVM_WillVarLowerAsPointer(const std::string &var_name) {
return var_name == "_args" || utils::Endswith(var_name, "__ptr");
return var_name == "_args" || utils::EndsWith(var_name, "__ptr");
}

void CodeGenLLVM::AddTbaaMetadata(llvm::Instruction *inst,
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/common/cas.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1155,8 +1155,8 @@ inline bool IsVarNonnegative(
// Return if the var is binded with thread or block in cuda(which implies it is
// non-negative).
inline bool IsVarBinded(const std::string& var_name) {
return utils::Startswith(var_name, "threadIdx") ||
utils::Startswith(var_name, "blockIdx");
return utils::StartsWith(var_name, "threadIdx") ||
utils::StartsWith(var_name, "blockIdx");
}

/**
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/instruction.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ class Instruction {
int flag = -1;
void* stream = nullptr;
for (int idx = 0; idx < 4; idx++) {
if (utils::Startswith(out_args_[idx][0], "kernel_pack")) {
if (utils::StartsWith(out_args_[idx][0], "kernel_pack")) {
VLOG(3) << "PreRun " << idx << "-th function of fn_:" << fn_names_[idx];
flag = idx;
auto& pod_args = args_cached_[idx];
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/ir/buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,15 @@ namespace ir {

std::string TensorGetBufferName(const _Tensor_ *tensor) {
CHECK(!tensor->name.empty());
CHECK(!utils::Startswith(tensor->name, "_"))
CHECK(!utils::StartsWith(tensor->name, "_"))
<< "the name with prefix _ is not allowed for tensor. Current tensor's "
"name is: "
<< tensor->name;
return "_" + tensor->name;
}
std::string BufferGetTensorName(const _Buffer_ *buffer) {
CHECK(!buffer->name.empty());
CHECK(utils::Startswith(buffer->name, "_"))
CHECK(utils::StartsWith(buffer->name, "_"))
<< "buffer's name should start with _";
return buffer->name.substr(1);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/ir/lowered_func.cc
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ std::vector<Expr> _LoweredFunc_::PrepareCreateTempBufferExprs() const {
std::vector<Expr> _LoweredFunc_::CudaPrepareAllocTempBufferExprs() const {
std::vector<Expr> alloc_output_buffer_exprs;
for (auto temp_buf : temp_bufs) {
if (utils::Startswith(temp_buf->name, "_")) {
if (utils::StartsWith(temp_buf->name, "_")) {
temp_buf->name = temp_buf->name.substr(1);
}
if (!temp_buf->shape.empty() && temp_buf->type() != Void()) {
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/lang/lower.cc
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ std::vector<ir::Buffer> GetTempBuffers(
return x->as_tensor() && x->as_tensor()->buffer.defined() &&
((!buffer_arg_names.count(x->as_tensor()->buffer->name) &&
!tensor_arg_names.count(x->as_tensor()->name)) ||
utils::Endswith(x->as_tensor()->buffer->name, "temp_buffer"));
utils::EndsWith(x->as_tensor()->buffer->name, "temp_buffer"));
});
for (auto& e : all_temp_tensors) {
auto buffer_name = e.as_tensor()->buffer->name;
Expand Down Expand Up @@ -158,7 +158,7 @@ std::vector<ir::Buffer> GetTempBuffers(const std::vector<Tensor>& tensor_args,
(!tensor_group.Contain(x->as_tensor()->name) ||
((!buffer_arg_names.count(x->as_tensor()->buffer->name) &&
!tensor_arg_names.count(x->as_tensor()->name)) ||
utils::Endswith(x->as_tensor()->buffer->name, "temp_buffer")));
utils::EndsWith(x->as_tensor()->buffer->name, "temp_buffer")));
});
for (auto& e : all_temp_tensors) {
auto buffer_name = e.as_tensor()->buffer->name;
Expand Down Expand Up @@ -202,7 +202,7 @@ std::vector<ir::Buffer> GetTempBuffers(const std::vector<Tensor>& tensor_args,
!stage_map[x->as_tensor()]->inlined()) &&
((!buffer_arg_names.count(x->as_tensor()->buffer->name) &&
!tensor_arg_names.count(x->as_tensor()->name)) ||
utils::Endswith(x->as_tensor()->buffer->name, "temp_buffer"));
utils::EndsWith(x->as_tensor()->buffer->name, "temp_buffer"));
});
for (auto& e : all_temp_tensors) {
auto buffer_name = e.as_tensor()->buffer->name;
Expand Down Expand Up @@ -250,7 +250,7 @@ std::vector<ir::Buffer> GetTempBuffers(const std::vector<ir::Argument>& args,
ir::ir_utils::CollectIRNodesWithoutTensor(body, [&](const Expr* x) {
return x->as_tensor() && x->as_tensor()->buffer.defined() &&
(!buffer_arg_names.count(x->as_tensor()->buffer->name) ||
utils::Endswith(x->as_tensor()->buffer->name, "temp_buffer"));
utils::EndsWith(x->as_tensor()->buffer->name, "temp_buffer"));
});
for (auto& e : all_temp_tensors) {
auto buffer_name = e.as_tensor()->buffer->name;
Expand Down
14 changes: 7 additions & 7 deletions paddle/cinn/optim/compute_inline_expand.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,15 @@ struct TensorInlineExpandMutator : public ir::IRMutator<> {

void Visit(const ir::_Var_ *expr, Expr *op) override {
if (inline_code && temp_buffer) {
if (utils::Startswith(expr->name, "blockIdx") ||
(utils::Startswith(expr->name, "threadIdx") && memory_local)) {
if (utils::StartsWith(expr->name, "blockIdx") ||
(utils::StartsWith(expr->name, "threadIdx") && memory_local)) {
*op = ir::Expr(0);
}
}
}

void Visit(const ir::_Tensor_ *op, Expr *expr) override {
if (inline_code && utils::Endswith(op->name, "_write_cache") &&
if (inline_code && utils::EndsWith(op->name, "_write_cache") &&
(*all_tensor_map_).at(op->name)->buffer->memory_type ==
ir::MemoryType::Heap) {
auto no_cache_name = op->name.substr(0, op->name.size() - 12);
Expand Down Expand Up @@ -101,7 +101,7 @@ struct TensorInlineExpandMutator : public ir::IRMutator<> {
} else if (inline_code && tensor->buffer.defined()) {
bool is_heap = (*all_tensor_map_).at(tensor->name)->buffer->memory_type ==
ir::MemoryType::Heap;
if (utils::Endswith(tensor->buffer->name, "_write_cache") && is_heap) {
if (utils::EndsWith(tensor->buffer->name, "_write_cache") && is_heap) {
// temp fix: cache_write will change the tensor to the cache tensor
// wrongly
auto no_cache_name =
Expand All @@ -120,9 +120,9 @@ struct TensorInlineExpandMutator : public ir::IRMutator<> {
}
}
}
} else if (utils::Endswith(tensor->buffer->name, "_write_cache") ||
utils::Endswith(tensor->buffer->name, "_read_cache") ||
utils::Endswith(tensor->buffer->name, "_temp_buffer")) {
} else if (utils::EndsWith(tensor->buffer->name, "_write_cache") ||
utils::EndsWith(tensor->buffer->name, "_read_cache") ||
utils::EndsWith(tensor->buffer->name, "_temp_buffer")) {
#ifdef CINN_WITH_CUDA
auto axis_names = stages_[tensor]->axis_names();
auto compute_ats = stages_[tensor]->GetComputeAts();
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/optim/replace_const_param_to_integer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ struct Mutator : public ir::IRMutator<> {
using ir::IRMutator<>::Visit;

void Visit(const ir::_Var_* op, Expr* expr) override {
if (utils::Startswith(op->name, poly::kIslParamConstPrefix)) {
if (utils::StartsWith(op->name, poly::kIslParamConstPrefix)) {
std::string value = op->name.substr(strlen(poly::kIslParamConstPrefix));
*expr = Expr(std::stoi(value));
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/optim/resize_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ class AnalyzeLoopVarRange : public ir::IRMutator<> {
std::stringstream oss;
oss << less_than_ir->a();
std::string var_name = oss.str();
if (utils::Startswith(var_name, "blockIdx") ||
utils::Startswith(var_name, "threadIdx")) {
if (utils::StartsWith(var_name, "blockIdx") ||
utils::StartsWith(var_name, "threadIdx")) {
var_name_to_extent_[var_name] = less_than_ir->b();
}
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/poly/domain_add_unit_loop_mutator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void DomainAddUnitLoopMutator::Visit(const ir::For* op, Expr* expr) {
if (parent_for_.size() < dim_names_.size()) {
std::string check_name = dim_names_[parent_for_.size()];
std::tuple<int, int, int> t = dim_min_max_[parent_for_.size()];
if (!utils::Startswith(node->loop_var->name, check_name) &&
if (!utils::StartsWith(node->loop_var->name, check_name) &&
(std::get<2>(t) - std::get<1>(t) == 0)) {
ir::Expr unit_loop = ir::For::Make(ir::Var(check_name),
ir::Expr(0),
Expand Down Expand Up @@ -96,7 +96,7 @@ void DomainAddUnitLoopMutator::Visit(const ir::PolyFor* op, Expr* expr) {
if (parent_poly_for_.size() < dim_names_.size()) {
std::string check_name = dim_names_[parent_poly_for_.size()];
std::tuple<int, int, int> t = dim_min_max_[parent_poly_for_.size()];
if (!utils::Startswith(node->iterator->name, check_name) &&
if (!utils::StartsWith(node->iterator->name, check_name) &&
(std::get<2>(t) - std::get<1>(t) == 0)) {
ir::Expr unit_loop =
ir::PolyFor::Make(ir::Var(check_name),
Expand Down Expand Up @@ -153,7 +153,7 @@ void DomainAddUnitLoopMutator::MutateAfterVisit(ir::Expr* expr) {
std::tuple<int, int, int> t = dim_min_max_[i];
if (longest_loop_[i].As<ir::For>()) {
const ir::For* node = longest_loop_[i].As<ir::For>();
if (utils::Startswith(node->loop_var->name, dim_names_[i]) &&
if (utils::StartsWith(node->loop_var->name, dim_names_[i]) &&
node->min.is_constant() && node->min.as_int32() == std::get<1>(t) &&
node->extent.is_constant() &&
node->extent.as_int32() == std::get<2>(t)) {
Expand All @@ -164,7 +164,7 @@ void DomainAddUnitLoopMutator::MutateAfterVisit(ir::Expr* expr) {
}
} else if (longest_loop_[i].As<ir::PolyFor>()) {
const ir::PolyFor* node = longest_loop_[i].As<ir::PolyFor>();
if (utils::Startswith(node->iterator->name, dim_names_[i]) &&
if (utils::StartsWith(node->iterator->name, dim_names_[i]) &&
node->init.is_constant() && node->init.as_int32() == std::get<1>(t) &&
node->condition ==
ir::LE::Make(ir::Var(dim_names_[i]), ir::Expr(std::get<2>(t)))) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/poly/isl_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,7 @@ std::vector<std::string> GetRelatedInputAxies(
auto transformed_domain = temp_set.apply(x);
for (auto &i : dim_out_names) {
out_set.insert(i);
if (utils::Endswith(i, "_inner") || utils::Endswith(i, "_outer")) {
if (utils::EndsWith(i, "_inner") || utils::EndsWith(i, "_outer")) {
out_set_without_suffix.insert(utils::RemoveSuffix(i));
}
}
Expand Down
12 changes: 6 additions & 6 deletions paddle/cinn/poly/stage.cc
Original file line number Diff line number Diff line change
Expand Up @@ -751,8 +751,8 @@ void Stage::ComputeAt2(Stage *other, int level) {
other->CtrlDepend(ir::Tensor(tensor()));
if (this->tensor()->buffer.defined()) {
std::string t_name = this->tensor()->buffer->name;
if (utils::Endswith(t_name, "_read_cache") ||
utils::Endswith(t_name, "_write_cache")) {
if (utils::EndsWith(t_name, "_read_cache") ||
utils::EndsWith(t_name, "_write_cache")) {
EditTempTensor(other, level);
}
}
Expand All @@ -776,8 +776,8 @@ void Stage::ComputeAt3(Stage *other, int level) {
other->CtrlDepend(ir::Tensor(tensor()));
if (this->tensor()->buffer.defined()) {
std::string t_name = this->tensor()->buffer->name;
if (utils::Endswith(t_name, "_read_cache") ||
utils::Endswith(t_name, "_write_cache")) {
if (utils::EndsWith(t_name, "_read_cache") ||
utils::EndsWith(t_name, "_write_cache")) {
EditTempTensor(other, level);
}
}
Expand All @@ -788,8 +788,8 @@ void Stage::SimpleComputeAt(Stage *other, int level) {
other->CtrlDepend(ir::Tensor(tensor()));
if (this->tensor()->buffer.defined()) {
std::string t_name = this->tensor()->buffer->name;
if (utils::Endswith(t_name, "_read_cache") ||
utils::Endswith(t_name, "_write_cache")) {
if (utils::EndsWith(t_name, "_read_cache") ||
utils::EndsWith(t_name, "_write_cache")) {
EditTempTensor(other, level);
}
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/utils/string.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ std::string StringFormat(const std::string &fmt_str, ...) {

std::string RemoveSuffix(const std::string &name) {
std::string res = name;
while (Endswith(res, "_outer") || Endswith(res, "_inner")) {
while (EndsWith(res, "_outer") || EndsWith(res, "_inner")) {
res = res.substr(0, res.size() - 6);
}
return res;
Expand All @@ -68,10 +68,10 @@ std::string Uppercase(const std::string &x) {
return res;
}

bool Startswith(const std::string &x, const std::string &str) {
bool StartsWith(const std::string &x, const std::string &str) {
return x.find(str) == 0;
}
bool Endswith(const std::string &x, const std::string &str) {
bool EndsWith(const std::string &x, const std::string &str) {
if (x.length() >= str.length()) {
return std::equal(str.rbegin(), str.rend(), x.rbegin());
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/utils/string.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,10 @@ bool IsPrefix(const char& c);
bool IsSuffix(const char& c);

//! Tell if a string \p x start with \p str.
bool Startswith(const std::string& x, const std::string& str);
bool StartsWith(const std::string& x, const std::string& str);

//! Tell if a string \p x ends with \p str.
bool Endswith(const std::string& x, const std::string& str);
bool EndsWith(const std::string& x, const std::string& str);

template <typename T>
std::string GetStreamCnt(const T& x) {
Expand Down
20 changes: 10 additions & 10 deletions paddle/cinn/utils/string_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,19 @@
namespace cinn {
namespace utils {

TEST(string, Endswith) {
TEST(string, EndsWith) {
std::string a = "a__p";
ASSERT_TRUE(Endswith(a, "__p"));
ASSERT_FALSE(Endswith(a, "_x"));
ASSERT_TRUE(Endswith(a, "a__p"));
ASSERT_FALSE(Endswith(a, "a___p"));
ASSERT_TRUE(EndsWith(a, "__p"));
ASSERT_FALSE(EndsWith(a, "_x"));
ASSERT_TRUE(EndsWith(a, "a__p"));
ASSERT_FALSE(EndsWith(a, "a___p"));
}
TEST(string, Startswith) {
TEST(string, StartsWith) {
std::string a = "a__p";
ASSERT_TRUE(Startswith(a, "a_"));
ASSERT_TRUE(Startswith(a, "a__"));
ASSERT_TRUE(Startswith(a, "a__p"));
ASSERT_FALSE(Startswith(a, "a___p"));
ASSERT_TRUE(StartsWith(a, "a_"));
ASSERT_TRUE(StartsWith(a, "a__"));
ASSERT_TRUE(StartsWith(a, "a__p"));
ASSERT_FALSE(StartsWith(a, "a___p"));
}

} // namespace utils
Expand Down