Skip to content

Commit c7c62a9

Browse files
committed
update ut
1 parent 946ee54 commit c7c62a9

File tree

4 files changed

+38
-6
lines changed

4 files changed

+38
-6
lines changed

python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def _insert_allreduce_ops_for_gm(self, gm_block):
165165
block = self.main_program.global_block()
166166

167167
first_optimize_op_idx = None
168-
for i, op in reversed(enumerate(list(gm_block.ops))):
168+
for i, op in reversed(list(enumerate(gm_block.ops))):
169169
if is_backward_op(op) and first_optimize_op_idx is None:
170170
first_optimize_op_idx = i + 1
171171
break

python/paddle/fluid/tests/unittests/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -778,7 +778,7 @@ endif()
778778
if (WITH_DISTRIBUTE AND NOT APPLE)
779779
if(WITH_GPU OR WITH_ROCM)
780780
set_tests_properties(test_c_comm_init_op PROPERTIES TIMEOUT 120)
781-
set_tests_properties(test_dist_mnist_gradient_merge PROPERTIES TIMEOUT 120)
781+
set_tests_properties(test_dist_mnist_gradient_merge PROPERTIES TIMEOUT 160)
782782
endif()
783783
endif()
784784

python/paddle/fluid/tests/unittests/dist_mnist_gradient_merge_raw_optimizer.py

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import os
1516
import paddle
1617
import paddle.nn as nn
1718
import paddle.fluid as fluid
@@ -44,9 +45,10 @@ def get_model(self, batch_size=2, single_device=False):
4445
strategy.build_strategy = build_strategy
4546

4647
strategy.gradient_merge = True
48+
avg = os.environ['enable_gm_avg'] == "True"
4749
strategy.gradient_merge_configs = {
4850
"k_steps": 2,
49-
"avg": False,
51+
"avg": avg,
5052
}
5153
strategy.without_graph_optimization = True
5254

@@ -65,9 +67,25 @@ def get_model(self, batch_size=2, single_device=False):
6567
optimizer,
6668
k_steps=strategy.gradient_merge_configs["k_steps"],
6769
avg=strategy.gradient_merge_configs["avg"])
70+
world_size = 1
6871
else:
6972
optimizer = fleet.distributed_optimizer(optimizer)
73+
world_size = fleet.world_size()
7074
optimizer.minimize(cost)
75+
if world_size > 1:
76+
assert paddle.static.default_main_program().num_blocks == 2
77+
gm_block = paddle.static.default_main_program().block(1)
78+
start_allreduce_idx = None
79+
for i, op in enumerate(gm_block.ops):
80+
if op.type == "c_allreduce_sum":
81+
start_allreduce_idx = i
82+
break
83+
# the magic number 1 below means skip the c_sync_calc_stream op
84+
if avg:
85+
assert start_allreduce_idx > 1
86+
else:
87+
assert start_allreduce_idx == 1
88+
7189
train_reader = paddle.batch(
7290
paddle.dataset.mnist.test(), batch_size=batch_size)
7391
test_reader = paddle.batch(

python/paddle/fluid/tests/unittests/test_dist_mnist_gradient_merge.py

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,21 +52,35 @@ def test_dist_train(self):
5252
log_name=flag_name + "_no_fuse")
5353

5454

55-
class TestDistMnistGradMergeRawOptimizer(TestDistBase):
55+
class TestDistMnistGradMergeRawOptimizerBase(TestDistBase):
5656
def _setup_config(self):
5757
self._use_reader_alloc = False
5858
self._nccl2_mode = True
5959
self._use_fleet_api = True
6060
self._use_fleet_api_20 = True
6161

62+
def enable_avg(self):
63+
return False
64+
6265
def test_dist_train(self):
6366
if fluid.core.is_compiled_with_cuda():
67+
avg = str(self.enable_avg())
68+
log_name = flag_name + "_raw_optimizer_gm_avg_" + avg
6469
self.check_with_place(
6570
"dist_mnist_gradient_merge_raw_optimizer.py",
6671
delta=1e-5,
6772
check_error_log=True,
68-
log_name=flag_name + "_raw_optimizer",
69-
need_envs={'FLAGS_apply_pass_to_program': '1'})
73+
log_name=log_name,
74+
need_envs={
75+
'FLAGS_apply_pass_to_program': '1',
76+
'enable_gm_avg': avg,
77+
})
78+
79+
80+
class TestDistMnistGradMergeRawOptimizerAvg(
81+
TestDistMnistGradMergeRawOptimizerBase):
82+
def enable_avg(self):
83+
return True
7084

7185

7286
if __name__ == "__main__":

0 commit comments

Comments
 (0)