Skip to content

Commit bde2d6c

Browse files
committed
reverse device.py all list;
fix some flake8 errors
1 parent ef6a421 commit bde2d6c

File tree

8 files changed

+23
-36
lines changed

8 files changed

+23
-36
lines changed

python/paddle/__init__.py

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@
2121
import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
2222
)
2323

24-
import paddle.batch
25-
batch = batch.batch
24+
from .batch import batch # noqa: F401
2625
from .fluid import monkey_patch_variable
2726
from .fluid.dygraph import monkey_patch_math_varbase
2827
monkey_patch_variable()
@@ -135,7 +134,6 @@
135134
from .tensor.manipulation import squeeze_ # noqa: F401
136135
from .tensor.manipulation import stack # noqa: F401
137136
from .tensor.manipulation import strided_slice # noqa: F401
138-
from .tensor.manipulation import transpose # noqa: F401
139137
from .tensor.manipulation import unique # noqa: F401
140138
from .tensor.manipulation import unsqueeze # noqa: F401
141139
from .tensor.manipulation import unsqueeze_ # noqa: F401
@@ -191,7 +189,6 @@
191189
from .tensor.math import multiply # noqa: F401
192190
from .tensor.math import add # noqa: F401
193191
from .tensor.math import subtract # noqa: F401
194-
from .tensor.math import atan # noqa: F401
195192
from .tensor.math import logsumexp # noqa: F401
196193
from .tensor.math import inverse # noqa: F401
197194
from .tensor.math import log1p # noqa: F401
@@ -280,7 +277,7 @@
280277
from .tensor.random import check_shape # noqa: F401
281278
disable_static()
282279

283-
__all__ = [ #noqa
280+
__all__ = [ # noqa
284281
'dtype',
285282
'uint8',
286283
'int8',
@@ -322,7 +319,6 @@
322319
'cos',
323320
'tan',
324321
'mean',
325-
'XPUPlace',
326322
'mv',
327323
'in_dynamic_mode',
328324
'min',
@@ -359,7 +355,6 @@
359355
'to_tensor',
360356
'gather_nd',
361357
'isinf',
362-
'set_device',
363358
'uniform',
364359
'floor_divide',
365360
'remainder',
@@ -383,8 +378,6 @@
383378
'rand',
384379
'less_equal',
385380
'triu',
386-
'is_compiled_with_cuda',
387-
'is_compiled_with_rocm',
388381
'sin',
389382
'dist',
390383
'unbind',
@@ -413,8 +406,6 @@
413406
'bernoulli',
414407
'summary',
415408
'sinh',
416-
'is_compiled_with_xpu',
417-
'is_compiled_with_npu',
418409
'round',
419410
'DataParallel',
420411
'argmin',
@@ -436,7 +427,6 @@
436427
'not_equal',
437428
'sum',
438429
'tile',
439-
'get_device',
440430
'greater_equal',
441431
'isfinite',
442432
'create_parameter',
@@ -469,7 +459,6 @@
469459
'scatter_nd',
470460
'set_default_dtype',
471461
'expand_as',
472-
'get_cudnn_version',
473462
'stack',
474463
'sqrt',
475464
'cholesky',

python/paddle/batch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def batch_reader():
6060
if len(b) == batch_size:
6161
yield b
6262
b = []
63-
if drop_last == False and len(b) != 0:
63+
if drop_last is False and len(b) != 0:
6464
yield b
6565

6666
# Batch size check

python/paddle/compat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
if six.PY2:
2121
int_type = int
22-
long_type = long
22+
long_type = long # noqa: F821
2323
else:
2424
int_type = int
2525
long_type = int

python/paddle/device.py

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,17 @@
2121
from paddle.fluid.framework import is_compiled_with_cuda # noqa: F401
2222
from paddle.fluid.framework import is_compiled_with_rocm # noqa: F401
2323

24-
__all__ = []
24+
25+
__all__ = [ # npqa
26+
'get_cudnn_version',
27+
'set_device',
28+
'get_device',
29+
'XPUPlace',
30+
'is_compiled_with_xpu',
31+
'is_compiled_with_cuda',
32+
'is_compiled_with_rocm',
33+
'is_compiled_with_npu'
34+
]
2535

2636
_cudnn_version = None
2737

@@ -113,15 +123,13 @@ def _convert_to_place(device):
113123
place = core.CPUPlace()
114124
elif lower_device == 'gpu':
115125
if not core.is_compiled_with_cuda():
116-
raise ValueError(
117-
"The device should not be 'gpu', " \
118-
"since PaddlePaddle is not compiled with CUDA")
126+
raise ValueError("The device should not be 'gpu', "
127+
"since PaddlePaddle is not compiled with CUDA")
119128
place = core.CUDAPlace(ParallelEnv().dev_id)
120129
elif lower_device == 'xpu':
121130
if not core.is_compiled_with_xpu():
122-
raise ValueError(
123-
"The device should not be 'xpu', " \
124-
"since PaddlePaddle is not compiled with XPU")
131+
raise ValueError("The device should not be 'xpu', "
132+
"since PaddlePaddle is not compiled with XPU")
125133
selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",")
126134
device_id = int(selected_xpus[0])
127135
place = core.XPUPlace(device_id)
@@ -135,7 +143,7 @@ def _convert_to_place(device):
135143
if avaliable_gpu_device:
136144
if not core.is_compiled_with_cuda():
137145
raise ValueError(
138-
"The device should not be {}, since PaddlePaddle is " \
146+
"The device should not be {}, since PaddlePaddle is "
139147
"not compiled with CUDA".format(avaliable_gpu_device))
140148
device_info_list = device.split(':', 1)
141149
device_id = device_info_list[1]
@@ -144,7 +152,7 @@ def _convert_to_place(device):
144152
if avaliable_xpu_device:
145153
if not core.is_compiled_with_xpu():
146154
raise ValueError(
147-
"The device should not be {}, since PaddlePaddle is " \
155+
"The device should not be {}, since PaddlePaddle is "
148156
"not compiled with XPU".format(avaliable_xpu_device))
149157
device_info_list = device.split(':', 1)
150158
device_id = device_info_list[1]

python/paddle/distributed/parallel.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,6 @@ def _check_var_exists(var_name):
150150
init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
151151
if init_gloo:
152152
ep_rank_0 = parallel_env.trainer_endpoints[0].split(":")
153-
ep_rank = parallel_env.trainer_endpoints[parallel_env.rank].split(":")
154153
manager = Manager()
155154
# glboal dict to store status
156155
http_server_d = manager.dict()

python/paddle/incubate/optimizer/lookahead.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -282,9 +282,6 @@ def minimize(self,
282282
"""
283283
assert isinstance(loss, Variable), "The loss should be an Tensor."
284284

285-
parameter_list = parameters if parameters \
286-
else self._parameter_list
287-
288285
# Apply inner optimizer to the main_program
289286
optimize_ops, params_grads = self.inner_optimizer.minimize(
290287
loss,

python/paddle/incubate/optimizer/modelaverage.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,6 @@ def apply(self, executor=None, need_restore=True):
415415
param)
416416
old_num_accumulates = self._get_accumulator(
417417
'old_num_accumulates', param)
418-
num_updates = self._get_accumulator('num_updates', param)
419418
sum_1 = self._get_accumulator('sum_1', param)
420419
sum_2 = self._get_accumulator('sum_2', param)
421420
sum_3 = self._get_accumulator('sum_3', param)
@@ -506,17 +505,15 @@ def _add_average_apply_op(self, block, param):
506505
self._get_accumulator('num_accumulates', param))
507506
old_num_accumulates = block._clone_variable(
508507
self._get_accumulator('old_num_accumulates', param))
509-
num_updates = block._clone_variable(
510-
self._get_accumulator('num_updates', param))
511508
# backup param value to grad
512509
layers.assign(input=param, output=grad)
513510
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
514511
tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
515512
sum = layers.sum(x=[sum_1, sum_2, sum_3])
516513
tmp = layers.cast(
517-
x=tmp, dtype='float32' if self._dtype == None else self._dtype)
514+
x=tmp, dtype='float32' if self._dtype is None else self._dtype)
518515
sum = layers.cast(
519-
x=sum, dtype='float32' if self._dtype == None else self._dtype)
516+
x=sum, dtype='float32' if self._dtype is None else self._dtype)
520517
layers.ops._elementwise_div(x=sum, y=tmp, out=param)
521518

522519
def _add_average_restore_op(self, block, param):

python/paddle/tensor/__init__.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
from .creation import zeros # noqa: F401
2626
from .creation import zeros_like # noqa: F401
2727
from .creation import arange # noqa: F401
28-
from .creation import eye # noqa: F401
2928
from .creation import full # noqa: F401
3029
from .creation import full_like # noqa: F401
3130
from .creation import triu # noqa: F401
@@ -82,7 +81,6 @@
8281
from .manipulation import squeeze_ # noqa: F401
8382
from .manipulation import stack # noqa: F401
8483
from .manipulation import strided_slice # noqa: F401
85-
from .manipulation import transpose # noqa: F401
8684
from .manipulation import unique # noqa: F401
8785
from .manipulation import unsqueeze # noqa: F401
8886
from .manipulation import unsqueeze_ # noqa: F401
@@ -143,7 +141,6 @@
143141
from .math import add_ # noqa: F401
144142
from .math import subtract # noqa: F401
145143
from .math import subtract_ # noqa: F401
146-
from .math import atan # noqa: F401
147144
from .math import logsumexp # noqa: F401
148145
from .math import inverse # noqa: F401
149146
from .math import log2 # noqa: F401

0 commit comments

Comments
 (0)