Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions paddle/fluid/framework/ir/fused_attention_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -267,9 +267,9 @@ PDNode* FusedAttentionPattern::operator()(PDNode* x,

PDNode* mp_allreduce_out_node{nullptr};
if (use_mp) {
mp_allreduce_out_node = pattern->NewNode(mp_allreudce_sum_out_repr())
mp_allreduce_out_node = pattern->NewNode(mp_allreduce_sum_out_repr())
->assert_is_op_output("mp_allreduce_sum");
auto* mp_allreduce_node = pattern->NewNode(mp_allreudce_sum_op_repr())
auto* mp_allreduce_node = pattern->NewNode(mp_allreduce_sum_op_repr())
->assert_is_op("mp_allreduce_sum");
out_linear_ele_add_out_node->assert_is_op_input("mp_allreduce_sum");
mp_allreduce_node->LinksFrom({out_linear_ele_add_out_node})
Expand Down Expand Up @@ -460,9 +460,9 @@ PDNode* FusedAttentionGradPattern::operator()(PDNode* x,

PDNode* mp_c_identity_out_node{nullptr};
if (use_mp) {
mp_c_identity_out_node = pattern->NewNode(mp_allreudce_sum_grad_out_repr())
mp_c_identity_out_node = pattern->NewNode(mp_allreduce_sum_grad_out_repr())
->assert_is_op_output("c_identity", "Out");
auto* mp_c_identity_node = pattern->NewNode(mp_allreudce_sum_grad_op_repr())
auto* mp_c_identity_node = pattern->NewNode(mp_allreduce_sum_grad_op_repr())
->assert_is_op("c_identity");
out_linear_dropout_grad_out_node->assert_is_op_input("c_identity");
mp_c_identity_node->LinksFrom({out_linear_dropout_grad_out_node})
Expand Down Expand Up @@ -989,13 +989,13 @@ ir::Graph* FusedAttentionsPass::ForwardHandlerHelper(
if (use_mp) {
GET_IR_NODE_FROM_SUBGRAPH(
c_identity_op_node, c_identity_op, fused_attention_pattern);
GET_IR_NODE_FROM_SUBGRAPH(mp_allreudce_sum_op_node,
mp_allreudce_sum_op,
GET_IR_NODE_FROM_SUBGRAPH(mp_allreduce_sum_op_node,
mp_allreduce_sum_op,
fused_attention_pattern);
remove_nodes.insert(c_identity_op_node);
remove_nodes.insert(mp_allreudce_sum_op_node);
remove_nodes.insert(mp_allreduce_sum_op_node);
ring_id = PADDLE_GET_CONST(
int, mp_allreudce_sum_op_node->Op()->GetAttr("ring_id"));
int, mp_allreduce_sum_op_node->Op()->GetAttr("ring_id"));
}

std::string cache_anchor_name = fuse_qkv_matmul_w_node->Var()->Name();
Expand Down Expand Up @@ -1367,16 +1367,16 @@ ir::Graph* FusedAttentionsPass::BackwardHandlerHelper(

int ring_id = -1;
if (use_mp) {
GET_IR_NODE_FROM_SUBGRAPH(mp_allreudce_sum_grad_op_node,
mp_allreudce_sum_grad_op,
GET_IR_NODE_FROM_SUBGRAPH(mp_allreduce_sum_grad_op_node,
mp_allreduce_sum_grad_op,
fused_attention_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(c_identity_grad_op_node,
c_identity_grad_op,
fused_attention_grad_pattern);
remove_nodes.insert(mp_allreudce_sum_grad_op_node);
remove_nodes.insert(mp_allreduce_sum_grad_op_node);
remove_nodes.insert(c_identity_grad_op_node);
ring_id = PADDLE_GET_CONST(
int, mp_allreudce_sum_grad_op_node->Op()->GetAttr("ring_id"));
int, mp_allreduce_sum_grad_op_node->Op()->GetAttr("ring_id"));
}

OpDesc fused_attention_grad_op_desc(
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/ir/fused_attention_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,9 @@ struct FusedAttentionPattern : public PatternBase {
PATTERN_DECL_NODE(out_linear_ele_add_bias);
PATTERN_DECL_NODE(out_linear_ele_add_out);

// allreudce for mp
PATTERN_DECL_NODE(mp_allreudce_sum_op);
PATTERN_DECL_NODE(mp_allreudce_sum_out);
// allreduce for mp
PATTERN_DECL_NODE(mp_allreduce_sum_op);
PATTERN_DECL_NODE(mp_allreduce_sum_out);

PATTERN_DECL_NODE(out_linear_dropout_op);
PATTERN_DECL_NODE(out_linear_dropout_out);
Expand Down Expand Up @@ -174,8 +174,8 @@ struct FusedAttentionGradPattern : public PatternBase {
PATTERN_DECL_NODE(out_linear_dropout_grad_out);

// c_identity for mp
PATTERN_DECL_NODE(mp_allreudce_sum_grad_op); // c_identity
PATTERN_DECL_NODE(mp_allreudce_sum_grad_out);
PATTERN_DECL_NODE(mp_allreduce_sum_grad_op); // c_identity
PATTERN_DECL_NODE(mp_allreduce_sum_grad_out);

PATTERN_DECL_NODE(out_linear_ele_add_grad_op);
PATTERN_DECL_NODE(out_linear_ele_add_grad_x);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ void Reducer::TraverseBackwardGraph(
}

// After each batch is calculated, the counter of each group(group.pending_)
// and allreudce sequence counter(next_group_) will be cleaned up again.
// and allreduce sequence counter(next_group_) will be cleaned up again.
void Reducer::PrepareForBackward(
const std::vector<std::shared_ptr<imperative::VarBase>> &outputs) {
VLOG(3) << "after forward, then reset count for backward.";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ def get_data_parallel_group(dist_ctx, op, act_grad_names, rank):

def sync_and_scale_gradients(dist_ctx, op, groups, allreduce_var_names):
"""
insert the allreudce and scale ops for gradients of model
insert the allreduce and scale ops for gradients of model
parameters for operator in data parallelism.

Args:
Expand Down Expand Up @@ -608,7 +608,7 @@ def gradient_synchronization(
dist_ctx, op, act_grad_names, out_grad_names, rank
):
"""
conduct the allreudce and scaling for gradients of model
conduct the allreduce and scaling for gradients of model
parameters for operator in parallelism train.

Args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ def __init__(
assert dst != -1
else:
raise ValueError(
"The act should be allreudce for dp or reduce for sharding."
"The act should be allreduce for dp or reduce for sharding."
)
self._dst = dst

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -880,7 +880,7 @@ def test_sharding_hybrid_dp(self):
loss_scale = 1.0 / scale
self.assertAlmostEqual(float(op.attr('value')), loss_scale)

# check program (allreudce)
# check program (allreduce)
ops = [op.type for op in main_prog_ops]
self.assertEqual(
ops,
Expand Down