diff --git a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.h b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.h index fac9b49e886cb3..2c29a3eb7badbb 100644 --- a/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.h +++ b/paddle/fluid/framework/ir/embedding_eltwise_layernorm_fuse_pass.h @@ -120,8 +120,8 @@ struct SkipLayerNorm : public PatternBase { // (word, weights_0) lookup_table -> word_emb // (pos, weights_1) lookup_table -> pos_emb // (sent, weights_2) lookup_table -> sent_emb -// (word_emb, pos_emb) elementweise_add -> elementwise_out_0 -// (elemtwise_out_0, sent_emb) elementweise_add -> elementwise_out_1 +// (word_emb, pos_emb) elementwise_add -> elementwise_out_0 +// (elemtwise_out_0, sent_emb) elementwise_add -> elementwise_out_1 // (elementwise_out_1, scale, bias) layer_norm -> layer_norm_out // // and then convert the corresponding subgraph to: diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index aa043803e5d2a3..968bba9241361c 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -4384,7 +4384,7 @@ PDNode *patterns::ReverseRollPattern::operator()(PDNode *in) { } auto reshape2_50_op = pattern->NewNode(reshape2_50_op_repr())->assert_is_op("reshape2"); - auto reshape2_50_out = pattern->NewNode(reshaep2_50_out_repr()) + auto reshape2_50_out = pattern->NewNode(reshape2_50_out_repr()) ->assert_is_op_output("reshape2", "Out") ->AsOutput(); reshape2_00_op->LinksFrom({in}); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index de8a29a6787abb..0836ebe5948706 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -2245,7 +2245,7 @@ struct ReverseRollPattern : public PatternBase { PATTERN_DECL_NODE(roll_40_op); PATTERN_DECL_NODE(roll_40_out); PATTERN_DECL_NODE(reshape2_50_op); - PATTERN_DECL_NODE(reshaep2_50_out); + PATTERN_DECL_NODE(reshape2_50_out); }; // pattern for merge_layernorm diff --git a/paddle/fluid/framework/ir/is_test_pass.cc b/paddle/fluid/framework/ir/is_test_pass.cc index 38137d18db6bff..338e8227228d4f 100644 --- a/paddle/fluid/framework/ir/is_test_pass.cc +++ b/paddle/fluid/framework/ir/is_test_pass.cc @@ -23,7 +23,7 @@ namespace ir { class Graph; void IsTestPass::ApplyImpl(ir::Graph* graph) const { - VLOG(3) << "Sets is_test attrbiute to true and if it is missing, inserts it " + VLOG(3) << "Sets is_test attribute to true and if it is missing, inserts it " "for activations and pooling."; auto op_list = {"pool2d", "sigmoid", "logsigmoid", "softshrink", "exp", "brelu", diff --git a/paddle/fluid/framework/ir/op_compat_sensible_pass.cc b/paddle/fluid/framework/ir/op_compat_sensible_pass.cc index 37175c70e4b668..895a4f88c3c440 100644 --- a/paddle/fluid/framework/ir/op_compat_sensible_pass.cc +++ b/paddle/fluid/framework/ir/op_compat_sensible_pass.cc @@ -167,7 +167,7 @@ AttrCompat& OpCompat::AddAttr(const std::string& attr_name) { attr_compats_.find(attr_name), attr_compats_.end(), platform::errors::InvalidArgument( - "The attrubute compat with the same name has been added")); + "The attribute compat with the same name has been added")); attr_compats_.emplace(attr_name, AttrCompat(attr_name, this)); return attr_compats_.at(attr_name); } diff --git a/paddle/fluid/framework/ir/op_compat_sensible_pass.h b/paddle/fluid/framework/ir/op_compat_sensible_pass.h index ea4e09c6d09762..4c941e169b89e7 100644 --- a/paddle/fluid/framework/ir/op_compat_sensible_pass.h +++ b/paddle/fluid/framework/ir/op_compat_sensible_pass.h @@ -49,7 +49,7 @@ class AttrCompat { //! Assert the attribute is an integer in the `candidates` domain. AttrCompat& IsIntIn(const std::set& candidates); - // @{ Number-releated methods + // @{ Number-related methods //! Assert the attribute is a number and > `v`. template AttrCompat& IsNumGT(T v); @@ -162,11 +162,11 @@ class OpCompat { * void AddOpCompat(OpCompat&& judger); * * Most of the Passes are used for fusing ops, so we define a method for such - * scenerios. + * scenarios. * void AccessSubgraph(const GraphPatternDetector::subgraph_t& subgraph, Graph* g); * It will check the Op compatibility automatically. - * For other scenirios, one should call `IsCompat` by himself. + * For other scenarios, one should call `IsCompat` by himself. * * A FC fuse pass example: * class FcFusePass : public OpCompatSensiblePass { @@ -177,7 +177,7 @@ class OpCompat { * .AddInput("Input").IsTensor().End() * .AddAttr("in_num_col_dims").IsNumGE(1); * AddOpCompat(OpCompat("Add")). ...; - * // There are multiple activation implemention. + * // There are multiple activation implementation. * AddOpCompat(OpCompat("Tanh")). ...; * AddOpCompat(OpCompat("Sigmoid")). ...; * } diff --git a/paddle/fluid/framework/ir/preln_embedding_eltwise_layernorm_fuse_pass.h b/paddle/fluid/framework/ir/preln_embedding_eltwise_layernorm_fuse_pass.h index 7ca60901ebd6c9..f4d89c3b444779 100644 --- a/paddle/fluid/framework/ir/preln_embedding_eltwise_layernorm_fuse_pass.h +++ b/paddle/fluid/framework/ir/preln_embedding_eltwise_layernorm_fuse_pass.h @@ -131,7 +131,7 @@ struct PrelnSkipLayerNorm : public PatternBase { // and then convert the corresponding subgraph to: // // (word, pos, sent, weights_0, weights_1, weights_2, -// scale, baias) Prelnembedding_eltwise_layernorm -> layer_norm_out + +// scale, bias) Prelnembedding_eltwise_layernorm -> layer_norm_out + // elementwise_add_out // // diff --git a/paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc b/paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc index 764c1a62faf74d..ff3dbf11762ff5 100644 --- a/paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc +++ b/paddle/fluid/framework/ir/reverse_roll_fuse_pass.cc @@ -31,7 +31,7 @@ GET_IR_NODE(reshape2_30_op); \ GET_IR_NODE(reshape2_30_out); \ GET_IR_NODE(reshape2_50_op); \ - GET_IR_NODE(reshaep2_50_out); + GET_IR_NODE(reshape2_50_out); namespace paddle { namespace framework { @@ -168,7 +168,7 @@ int ReverseRollFusePass::ApplyPattern(ir::Graph* graph, bool with_roll) const { OpDesc reverse_roll_desc(reshape2_00_op->Op()->Block()); reverse_roll_desc.SetType("reverse_roll"); reverse_roll_desc.SetInput("X", {subgraph.at(x)->Name()}); - reverse_roll_desc.SetOutput("Out", {reshaep2_50_out->Name()}); + reverse_roll_desc.SetOutput("Out", {reshape2_50_out->Name()}); reverse_roll_desc.SetAttr("window_number", window_number); reverse_roll_desc.SetAttr("window_size", window_size); reverse_roll_desc.SetAttr("window_len", window_len); @@ -176,7 +176,7 @@ int ReverseRollFusePass::ApplyPattern(ir::Graph* graph, bool with_roll) const { reverse_roll_desc.SetAttr("input_resolution", input_resolution); auto reverse_roll_node = graph->CreateOpNode(&reverse_roll_desc); IR_NODE_LINK_TO(subgraph.at(x), reverse_roll_node); - IR_NODE_LINK_TO(reverse_roll_node, reshaep2_50_out); + IR_NODE_LINK_TO(reverse_roll_node, reshape2_50_out); GraphSafeRemoveNodes(graph, del_node_set); ++fuse_count; }; diff --git a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc index 3674f463e74aaf..7626c1e9142f94 100644 --- a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc +++ b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc @@ -174,7 +174,7 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const { } } - // shuffle_channel dosen't change shape + // shuffle_channel doesn't change shape if ((reshape2_shape[0] != -1) && (x_shape1[0] != reshape2_shape[0])) { return; } diff --git a/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc b/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc index 366f2682a7a643..93a1008838558b 100644 --- a/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc +++ b/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc @@ -25,8 +25,8 @@ namespace framework { namespace ir { /* - * This pass is to simplify the Grpah, it may contains: - * - replace comlicated op with basic op + * This pass is to simplify the Graph, it may contains: + * - replace complicated op with basic op * - remove some unnecessary op * * In the current implementation, it supports: diff --git a/paddle/fluid/framework/ir/subgraph_detector.cc b/paddle/fluid/framework/ir/subgraph_detector.cc index fca02e23ad1d59..1cfe740fc55d23 100644 --- a/paddle/fluid/framework/ir/subgraph_detector.cc +++ b/paddle/fluid/framework/ir/subgraph_detector.cc @@ -289,7 +289,7 @@ std::vector> SubgraphDetector::ExtractSubGraphs() { node_map[n->id()] = n; } - // create breif node map + // create brief node map for (auto &itr : brief_node_map) { for (Node *node : itr.second->node->inputs) { if (!valid_node_ids.count(node->id())) { diff --git a/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc b/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc index 7865f39ffda83d..9885979a749abc 100644 --- a/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc +++ b/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc @@ -44,7 +44,7 @@ namespace ir { // Put transfer_layout after op_node // transfer_info is for case when we need know this transfer_layout info, // nchw_nhwc or nhwc_nchw -void TransferLayoutElimPass::PutTranferlayoutAfterOp( +void TransferLayoutElimPass::PutTransferlayoutAfterOp( Node *op_node, ir::Graph *graph, std::string *transfer_info) const { std::unordered_set remove_nodes; // Ensure op_node has only one output! @@ -200,9 +200,9 @@ bool TransferLayoutElimPass::AllInputIsTransferlayout( // | | | // op0 op1 op2 -void TransferLayoutElimPass::ElimTwoTranferlayout(Node *op_node, - ir::Graph *graph, - bool *modify) const { +void TransferLayoutElimPass::ElimTwoTransferlayout(Node *op_node, + ir::Graph *graph, + bool *modify) const { std::unordered_set remove_nodes; auto var1 = op_node->inputs[0]; auto transfer_layout0 = var1->inputs[0]; @@ -293,7 +293,7 @@ void TransferLayoutElimPass::ApplyImpl(ir::Graph *graph) const { if (AllInputIsTransferlayout(op_node)) { if (is_concat_like_op) { std::string transfer_info; - PutTranferlayoutAfterOp(op_node, graph, &transfer_info); + PutTransferlayoutAfterOp(op_node, graph, &transfer_info); int axis = op_node->Op()->GetAttrIfExists("axis"); int modify_axis = axis; if (transfer_info == "nhwc_nchw") { @@ -319,7 +319,7 @@ void TransferLayoutElimPass::ApplyImpl(ir::Graph *graph) const { break; } if (is_pool_like_op) { - PutTranferlayoutAfterOp(op_node, graph, nullptr); + PutTransferlayoutAfterOp(op_node, graph, nullptr); op_node->Op()->SetAttr( "data_format", transfer_format( @@ -329,13 +329,13 @@ void TransferLayoutElimPass::ApplyImpl(ir::Graph *graph) const { break; } if (is_act_like_op) { - PutTranferlayoutAfterOp(op_node, graph, nullptr); + PutTransferlayoutAfterOp(op_node, graph, nullptr); modify = true; move_down_count++; break; } if (is_elim_op) { - ElimTwoTranferlayout(op_node, graph, &modify); + ElimTwoTransferlayout(op_node, graph, &modify); elim_count++; break; } diff --git a/paddle/fluid/framework/ir/transfer_layout_elim_pass.h b/paddle/fluid/framework/ir/transfer_layout_elim_pass.h index 0788f2f1ce128f..d2a401507eb87d 100644 --- a/paddle/fluid/framework/ir/transfer_layout_elim_pass.h +++ b/paddle/fluid/framework/ir/transfer_layout_elim_pass.h @@ -29,12 +29,12 @@ class TransferLayoutElimPass : public FusePassBase { protected: void ApplyImpl(ir::Graph *graph) const override; bool AllInputIsTransferlayout(const Node *op_node) const; - void PutTranferlayoutAfterOp(Node *op_node, - ir::Graph *graph, - std::string *transfer_info) const; - void ElimTwoTranferlayout(Node *op_node, - ir::Graph *graph, - bool *modify) const; + void PutTransferlayoutAfterOp(Node *op_node, + ir::Graph *graph, + std::string *transfer_info) const; + void ElimTwoTransferlayout(Node *op_node, + ir::Graph *graph, + bool *modify) const; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/xpu/generate_sequence_xpu_fuse_pass.cc b/paddle/fluid/framework/ir/xpu/generate_sequence_xpu_fuse_pass.cc index 5be359c0cd1c0c..0b8c1391867ef9 100644 --- a/paddle/fluid/framework/ir/xpu/generate_sequence_xpu_fuse_pass.cc +++ b/paddle/fluid/framework/ir/xpu/generate_sequence_xpu_fuse_pass.cc @@ -116,7 +116,7 @@ Origin subgraph: | cumsum | | \ / - elemetwise_sub + elementwise_sub Fused subgraph: generate_sequence_xpu