Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/pir/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@ OpPatternKind CompatibleInfo::OpKind(const ::pir::Operation& op) {
auto kind = op_pattern_dict[cinn_op];
if (kind == hlir::framework::kBroadcast) {
// As binary op was defined as broadcast, actually it should be
// element-wise. See fusion_hepler_base.h for detail.
// element-wise. See fusion_helper_base.h for detail.
if (op_name != "broadcast_to") {
kind = hlir::framework::kElementWise;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/runtime/cuda/cuda_module.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ void CUDAModule::LaunchKernel(int device_id,

CUfunction CUDAModule::GetFunction(int device_id,
const std::string& func_name) {
VLOG(5) << "GetFuncion : " << func_name << " with device_id : " << device_id;
VLOG(5) << "GetFunction : " << func_name << " with device_id : " << device_id;
cinn::utils::RecordEvent record_run("cuLaunchKernel",
cinn::utils::EventType::kOrdinary);
if (!module_per_card_[device_id]) {
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/runtime/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ PD_DEFINE_bool(cinn_enable_map_expr_dynamic_shape,

PD_DEFINE_bool(cinn_enable_map_expr_index_detail,
BoolFromEnv("FLAGS_cinn_enable_map_expr_index_detail", false),
"It controls whether to display datail tensor index");
"It controls whether to display detail tensor index");

PD_DEFINE_bool(
cinn_use_custom_call,
Expand All @@ -116,7 +116,7 @@ PD_DEFINE_string(cinn_check_fusion_accuracy_pass,

PD_DEFINE_bool(cinn_use_cuda_vectorize,
BoolFromEnv("FLAGS_cinn_use_cuda_vectorize", false),
"Whether use cuda vectroize on schedule config");
"Whether use cuda vectorize on schedule config");

PD_DEFINE_bool(use_reduce_split_pass,
BoolFromEnv("FLAGS_use_reduce_split_pass", false),
Expand All @@ -141,7 +141,7 @@ PD_DEFINE_bool(
BoolFromEnv("FLAGS_cinn_nvrtc_cubin_with_fmad", true),
"Whether nvrtc enables fmad when compile to cubin. This flag only works "
"when FLAGS_nvrtc_compile_to_cubin=true. Fmad is the cuda speed up "
"technique which contract fp mulitplication and addition/subtraction into "
"technique which contract fp multiplication and addition/subtraction into "
"multiply-add operation. It may result in different fp precision.");

// FLAGS for performance analysis and accuracy debug
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/runtime/intrinsic.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ static const char* parallel_launch = "cinn_backend_parallel_launch";
} // namespace intrinsic

/**
* Call an intrnsic function.
* Call an intrinsic function.
* @param type Return type of the function.
* @param fn_name Name of the function.
* @param args The arguments for the function.
Expand Down
32 changes: 16 additions & 16 deletions paddle/common/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,7 @@ PHI_DEFINE_EXPORTED_uint64(
"specified by FLAGS_reallocate_gpu_memory_in_mb until the gpu has "
"no memory left for the additional trunk. Note: if you set this "
"flag, the memory size set by "
"FLAGS_fraction_of_gpu_memory_to_use will be overrided by this "
"FLAGS_fraction_of_gpu_memory_to_use will be overridden by this "
"flag. If you don't set this flag, PaddlePaddle will use "
"FLAGS_fraction_of_gpu_memory_to_use to allocate gpu memory");

Expand Down Expand Up @@ -670,7 +670,7 @@ PHI_DEFINE_EXPORTED_bool(use_mkldnn, false, "Use MKLDNN to run");
* Value Range: int, default=2
* Example:
* Note: Used to debug. Determine the call stack to print when error or
* exeception happens.
* exception happens.
* If FLAGS_call_stack_level == 0, only the error message summary will be shown.
* If FLAGS_call_stack_level == 1, the python stack and error message summary
* will be shown.
Expand All @@ -686,7 +686,7 @@ static const int32_t kDefaultCallStackLevel = 1;
PHI_DEFINE_EXPORTED_int32(
call_stack_level,
kDefaultCallStackLevel,
"Determine the call stack to print when error or exeception happens."
"Determine the call stack to print when error or exception happens."
// TODO(zhiqiu): implement logic of FLAGS_call_stack_level==0
// "If FLAGS_call_stack_level == 0, only the error message summary will be "
// "shown. "
Expand Down Expand Up @@ -852,13 +852,13 @@ PHI_DEFINE_EXPORTED_bool(
* Since Version: 2.2.0
* Value Range: bool, default=false
* Example:
* Note: Control whether load graph node and edge with multi threads parallely
* Note: Control whether load graph node and edge with multi threads parallelly
* If it is not set, load graph data with one thread
*/
PHI_DEFINE_EXPORTED_bool(graph_load_in_parallel,
false,
"It controls whether load graph node and edge with "
"mutli threads parallely.");
"multi threads parallelly.");

/**
* Distributed related FLAG
Expand All @@ -878,7 +878,7 @@ PHI_DEFINE_EXPORTED_bool(enable_neighbor_list_use_uva,
* Since Version: 2.5.0
* Value Range: double, default=1.0
* Example:
* Note: Control whether load graph node and edge with multi threads parallely
* Note: Control whether load graph node and edge with multi threads parallelly
* If it is not set, load graph data with one thread
*/
PHI_DEFINE_EXPORTED_double(graph_neighbor_size_percent,
Expand All @@ -891,13 +891,13 @@ PHI_DEFINE_EXPORTED_double(graph_neighbor_size_percent,
* Since Version: 2.2.0
* Value Range: bool, default=false
* Example:
* Note: Control whether load graph node and edge with multi threads parallely
* Note: Control whether load graph node and edge with multi threads parallelly
* If it is not set, load graph data with one thread
*/
PHI_DEFINE_EXPORTED_bool(graph_metapath_split_opt,
false,
"It controls whether load graph node and edge with "
"mutli threads parallely.");
"multi threads parallelly.");

/**
* Distributed related FLAG
Expand Down Expand Up @@ -1393,7 +1393,7 @@ PHI_DEFINE_EXPORTED_bool(enable_pir_with_pt_in_dy2st,
* Since Version: 2.6.0
* Value Range: bool, default=false
* Example:
* Note: If Ture, New IR API will be used in Python
* Note: If True, New IR API will be used in Python
*/
PHI_DEFINE_EXPORTED_bool(enable_pir_api, false, "Enable new IR API in Python");

Expand All @@ -1403,7 +1403,7 @@ PHI_DEFINE_EXPORTED_bool(enable_pir_api, false, "Enable new IR API in Python");
* Since Version: 2.6.0
* Value Range: bool, default=false
* Example:
* Note: If Ture, executor will use new IR and run in beta version by for trace
* Note: If True, executor will use new IR and run in beta version by for trace
* version.
*/
PHI_DEFINE_EXPORTED_bool(enable_pir_in_executor_trace_run,
Expand All @@ -1416,7 +1416,7 @@ PHI_DEFINE_EXPORTED_bool(enable_pir_in_executor_trace_run,
* Since Version: 2.6.0
* Value Range: bool, default=true
* Example:
* Note: If Ture, will apply inplace pass to new IR.
* Note: If True, will apply inplace pass to new IR.
*/
PHI_DEFINE_EXPORTED_bool(pir_apply_inplace_pass,
true,
Expand All @@ -1428,7 +1428,7 @@ PHI_DEFINE_EXPORTED_string(
"",
"It controls the ir inplace kernel subset do not use.");
/**
* Specify the directory of saving PIR sugraph from @to_static
* Specify the directory of saving PIR subgraph from @to_static
* Name: pir_subgraph_saving_dir
* Since Version: 2.6.0
* Value Range: str, default=""
Expand All @@ -1438,7 +1438,7 @@ PHI_DEFINE_EXPORTED_string(
PHI_DEFINE_EXPORTED_string(
pir_subgraph_saving_dir,
"",
"Specify the directory of saving PIR sugraph from @to_static.");
"Specify the directory of saving PIR subgraph from @to_static.");

PHI_DEFINE_EXPORTED_bool(enable_record_memory, false, "Enable memory recorder");

Expand All @@ -1464,7 +1464,7 @@ PHI_DEFINE_EXPORTED_int32(
PHI_DEFINE_EXPORTED_bool(print_ir, false, "Whether print ir debug str.");
PHI_DEFINE_EXPORTED_bool(prim_skip_dynamic,
false,
"Whether to skip decomping op with dynamic shape.");
"Whether to skip decomposing op with dynamic shape.");
PHI_DEFINE_EXPORTED_bool(prim_check_ops,
false,
"Whether to check the decomposed program, to ensure "
Expand Down Expand Up @@ -1514,7 +1514,7 @@ PHI_DEFINE_EXPORTED_bool(
PHI_DEFINE_EXPORTED_int64(alloc_fill_value,
-1,
"Whether to fill fixed value after allocation. "
"This is usefull for debugging.");
"This is useful for debugging.");

/**
* Apply shape optimization pass to new IR FLAG
Expand Down Expand Up @@ -1577,7 +1577,7 @@ PHI_DEFINE_EXPORTED_string(lapack_dir,
* Since Version: 3.0.0
* Value Range: bool, default=false
* Example:
* Note: If Ture, will apply check_infer_symbolic pass.
* Note: If True, will apply check_infer_symbolic pass.
*/
PHI_DEFINE_EXPORTED_bool(
check_infer_symbolic,
Expand Down