Skip to content

Commit b881477

Browse files
authored
Fluid clean (#48841)
* add index sample fp16 support * remove fluid APIs in distributed_strategy.py and role_maker.py * Revert "remove fluid APIs in distributed_strategy.py and role_maker.py" This reverts commit 223bbee. * remove fluid APIs in distributed_strategy.py and role_maker.py * remove index sample op changes * remove fluid APIs under fleet.base * remove fluid APIs under fleet.layers.mpu * remove fluid APIs under fleet.meta_optimizers * fix fluid error * fix util_factory.py * reset fluid.io.load_inference_model API * remove dygraph.parallel.prepare_context * remove fluid.dygraph.StaticModelRunner API * remove split_lod_tensor merge_lod_tensor * remove unittests
1 parent 1e0f873 commit b881477

13 files changed

+4
-1282
lines changed

python/paddle/fluid/dygraph/__init__.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,6 @@
3636
from . import learning_rate_scheduler
3737
from .learning_rate_scheduler import *
3838

39-
from . import static_runner
40-
from .static_runner import StaticModelRunner
41-
4239
from . import amp
4340
from .amp import *
4441

python/paddle/fluid/dygraph/parallel.py

Lines changed: 1 addition & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -37,51 +37,11 @@
3737
in_dygraph_mode,
3838
)
3939

40-
__all__ = ["prepare_context", "ParallelEnv", "DataParallel"]
40+
__all__ = ["ParallelEnv", "DataParallel"]
4141

4242
ParallelStrategy = core.ParallelStrategy
4343

4444

45-
@deprecated(since="2.0.0", update_to="paddle.distributed.init_parallel_env")
46-
def prepare_context(strategy=None):
47-
'''
48-
:api_attr: imperative
49-
'''
50-
if strategy is None:
51-
strategy = ParallelStrategy()
52-
strategy.nranks = Env().nranks
53-
strategy.local_rank = Env().local_rank
54-
strategy.trainer_endpoints = Env().trainer_endpoints
55-
strategy.current_endpoint = Env().current_endpoint
56-
if strategy.nranks < 2:
57-
return
58-
assert (
59-
framework._non_static_mode() is True
60-
), "dygraph.prepare_context should be used with dygraph mode."
61-
place = framework._current_expected_place()
62-
assert (
63-
place is not None
64-
), "dygraph.prepare_context should be used in fluid.dygraph.guard(place) guard."
65-
if not parallel_helper._is_parallel_ctx_initialized():
66-
if isinstance(place, core.CUDAPlace):
67-
parallel_helper._set_parallel_ctx(
68-
core.NCCLParallelContext(strategy, place)
69-
)
70-
elif isinstance(place, core.XPUPlace):
71-
parallel_helper._set_parallel_ctx(
72-
core.BKCLParallelContext(strategy, place)
73-
)
74-
elif isinstance(place, core.NPUPlace):
75-
parallel_helper._set_parallel_ctx(
76-
core.HCCLParallelContext(strategy, place)
77-
)
78-
else:
79-
# TODO(Yancey1989): add Gloo Parallel Context to support CPU parallel computation
80-
assert "Only support CUDAPlace or XPUPlace or NPUPlace for now."
81-
parallel_helper._init_parallel_ctx()
82-
return strategy
83-
84-
8545
class ParallelEnv:
8646
"""
8747
.. note::

python/paddle/fluid/dygraph/static_runner.py

Lines changed: 0 additions & 37 deletions
This file was deleted.

python/paddle/fluid/layers/control_flow.py

Lines changed: 0 additions & 124 deletions
Original file line numberDiff line numberDiff line change
@@ -145,130 +145,6 @@ def select_input(inputs, mask):
145145
return out
146146

147147

148-
def split_lod_tensor(input, mask, level=0):
149-
"""
150-
This function takes in an input that contains the complete lod information,
151-
and takes in a mask which is used to mask certain parts of the input.
152-
The output is the true branch and the false branch with the mask applied to
153-
the input at a certain level in the tensor. Mainly used in IfElse to split
154-
data into two parts.
155-
156-
Args:
157-
input(Variable|tuple|list|None): The input tensor that contains complete
158-
lod information needed to construct the output.
159-
mask(Variable|list): A bool column vector which masks the input.
160-
level(int): The specific lod level to split.
161-
162-
Returns:
163-
tuple(Variable, Variable):
164-
The true branch of tensor as per the mask applied to input.
165-
166-
The false branch of tensor as per the mask applied to input.
167-
168-
Examples:
169-
.. code-block:: python
170-
171-
import paddle.fluid as fluid
172-
x = fluid.layers.data(name='x', shape=[1])
173-
x.persistable = True
174-
175-
y = fluid.layers.data(name='y', shape=[1])
176-
y.persistable = True
177-
178-
out_true, out_false = fluid.layers.split_lod_tensor(
179-
input=x, mask=y, level=level)
180-
181-
"""
182-
check_type(
183-
input,
184-
'input',
185-
(Variable, list, tuple, type(None)),
186-
'fluid.layers.split_lod_tensor',
187-
)
188-
check_type(mask, 'mask', (Variable, list), 'fluid.layers.split_lod_tensor')
189-
check_type(level, 'level', int, 'fluid.layers.split_lod_tensor')
190-
helper = LayerHelper('split_lod_tensor', **locals())
191-
out_true = helper.create_variable_for_type_inference(dtype=input.dtype)
192-
out_false = helper.create_variable_for_type_inference(dtype=input.dtype)
193-
helper.append_op(
194-
type='split_lod_tensor',
195-
inputs={
196-
'X': input,
197-
'Mask': mask,
198-
},
199-
outputs={'OutTrue': out_true, 'OutFalse': out_false},
200-
attrs={'level': level},
201-
)
202-
return out_true, out_false
203-
204-
205-
def merge_lod_tensor(in_true, in_false, x, mask, level=0):
206-
"""
207-
**merge_lod_tensor**
208-
209-
This function takes in an input :math:`x`, the True branch, the False
210-
branch and a binary :math:`mask`. Using this information, this function
211-
merges the True and False branches of the tensor into a single tensor as
212-
output at a certain lod level indicated by :math:`level`. Used in IfElse
213-
to merge the output if True block and False Block.
214-
215-
Args:
216-
in_true(Variable|tuple|list|None): The True branch to be merged.
217-
in_false(Variable|tuple|list|None): The False branch to be merged.
218-
x(Variable|tuple|list|None): The input tensor that contains complete
219-
lod information needed to construct the output.
220-
mask(Variable|list): A bool column vector which masks the input.
221-
level(int): The specific lod level to merge.
222-
223-
Returns:
224-
Variable: The merged output tensor.
225-
226-
Examples:
227-
.. code-block:: python
228-
229-
import paddle.fluid as fluid
230-
x = layers.data(
231-
name='x', shape=[1], dtype='float32', stop_gradient=False)
232-
y = layers.data(
233-
name='y', shape=[1], dtype='bool', stop_gradient=False)
234-
235-
level = 0
236-
237-
out_true, out_false = layers.split_lod_tensor(
238-
input=x, mask=y, level=level)
239-
out = layers.merge_lod_tensor(
240-
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
241-
"""
242-
helper = LayerHelper('merge_lod_tensor', **locals())
243-
check_type(
244-
x,
245-
'x',
246-
(Variable, list, tuple, type(None)),
247-
'fluid.layers.merge_lod_tensor',
248-
)
249-
check_type(mask, 'mask', (Variable, list), 'fluid.layers.merge_lod_tensor')
250-
check_type(
251-
in_true,
252-
'in_true',
253-
(Variable, list, tuple, type(None)),
254-
'fluid.layers.merge_lod_tensor',
255-
)
256-
check_type(
257-
in_false,
258-
'in_false',
259-
(Variable, list, tuple, type(None)),
260-
'fluid.layers.merge_lod_tensor',
261-
)
262-
out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
263-
helper.append_op(
264-
type='merge_lod_tensor',
265-
inputs={'X': x, 'Mask': mask, 'InTrue': in_true, 'InFalse': in_false},
266-
outputs={'Out': out},
267-
attrs={'level': level},
268-
)
269-
return out
270-
271-
272148
@static_only
273149
def Print(
274150
input,

python/paddle/fluid/tests/unittests/CMakeLists.txt

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -449,8 +449,6 @@ list(REMOVE_ITEM TEST_OPS test_basic_lstm_unit_op)
449449
list(REMOVE_ITEM TEST_OPS test_fuse_all_reduce_pass)
450450
list(REMOVE_ITEM TEST_OPS test_fuse_bn_act_pass)
451451
list(REMOVE_ITEM TEST_OPS test_fuse_bn_add_act_pass)
452-
list(REMOVE_ITEM TEST_OPS test_imperative_static_runner_mnist)
453-
list(REMOVE_ITEM TEST_OPS test_imperative_static_runner_while)
454452

455453
# disable this unittest temporarily
456454
list(REMOVE_ITEM TEST_OPS test_imperative_data_loader_exception)
@@ -569,12 +567,6 @@ py_test_modules(
569567
py_test_modules(test_install_check MODULES test_install_check ENVS
570568
FLAGS_cudnn_deterministic=1)
571569
set_tests_properties(test_install_check PROPERTIES LABELS "RUN_TYPE=DIST")
572-
py_test_modules(
573-
test_imperative_static_runner_mnist MODULES
574-
test_imperative_static_runner_mnist ENVS FLAGS_cudnn_deterministic=1)
575-
py_test_modules(
576-
test_imperative_static_runner_while MODULES
577-
test_imperative_static_runner_while ENVS FLAGS_cudnn_deterministic=1)
578570

579571
if((WITH_GPU) AND (CUDA_VERSION GREATER_EQUAL 11.6))
580572
py_test_modules(test_fused_gemm_epilogue_op MODULES
@@ -1084,7 +1076,6 @@ set_tests_properties(test_svd_op PROPERTIES TIMEOUT 80)
10841076
set_tests_properties(test_einsum_op PROPERTIES TIMEOUT 120)
10851077
set_tests_properties(test_qr_op PROPERTIES TIMEOUT 60)
10861078
set_tests_properties(test_trilinear_interp_v2_op PROPERTIES TIMEOUT 120)
1087-
set_tests_properties(test_imperative_static_runner_mnist PROPERTIES TIMEOUT 120)
10881079
set_tests_properties(test_masked_select_op PROPERTIES TIMEOUT 120)
10891080
set_tests_properties(test_sigmoid_cross_entropy_with_logits_op
10901081
PROPERTIES TIMEOUT 120)
@@ -1167,8 +1158,6 @@ if(APPLE)
11671158
PROPERTIES TIMEOUT 300)
11681159
set_tests_properties(test_multiclass_nms_op PROPERTIES TIMEOUT 300)
11691160
set_tests_properties(test_weight_decay PROPERTIES TIMEOUT 300)
1170-
set_tests_properties(test_imperative_static_runner_mnist PROPERTIES TIMEOUT
1171-
300)
11721161
endif()
11731162

11741163
if((WITH_ROCM OR WITH_GPU) AND NOT WIN32)

python/paddle/fluid/tests/unittests/test_dist_base.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -676,7 +676,6 @@ def run_trainer(self, args):
676676
type(self).__name__,
677677
"begin to prepare context in dygraph with nccl2",
678678
)
679-
dygraph.parallel.prepare_context(strategy)
680679
if not args.find_unused_parameters:
681680
model = dygraph.parallel.DataParallel(
682681
model, strategy, find_unused_parameters=False

python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import paddle.fluid as fluid
2020
import paddle.fluid.core as core
2121
import paddle.fluid.dygraph as dygraph
22+
from paddle.distributed import init_parallel_env
2223
from paddle.nn import Linear
2324

2425

@@ -38,9 +39,9 @@ def forward(self, inputs):
3839
class TestDataParallelStateDict(unittest.TestCase):
3940
def test_data_parallel_state_dict(self):
4041
with fluid.dygraph.guard():
41-
strategy = dygraph.parallel.prepare_context()
42+
init_parallel_env()
4243
mlp = MLP()
43-
parallel_mlp = dygraph.parallel.DataParallel(mlp, strategy)
44+
parallel_mlp = dygraph.parallel.DataParallel(mlp)
4445

4546
single_state = mlp.state_dict()
4647
parallel_state = parallel_mlp.state_dict()

0 commit comments

Comments
 (0)