Skip to content

Commit 4d59d9c

Browse files
authored
[CodeStyle] black -> ruff format migration - part 30 (#74744)
1 parent 9c90d4a commit 4d59d9c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+557
-546
lines changed

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ repos:
8585
8686
| python/paddle/distributed/[g-z].+
8787
88-
# | python/paddle/[e-i].+
88+
| python/paddle/[e-i].+
8989
9090
# | python/paddle/j.+
9191
@@ -141,7 +141,7 @@ repos:
141141
142142
# | python/paddle/distributed/[g-z].+
143143
144-
| python/paddle/[e-i].+
144+
# | python/paddle/[e-i].+
145145
146146
| python/paddle/j.+
147147

python/paddle/framework/io.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -235,9 +235,9 @@ def _load_state_dict_from_save_inference_model(model_path, config):
235235
structured_name = extra_var_info[var_name].get(
236236
'structured_name', None
237237
)
238-
assert (
239-
structured_name is not None
240-
), f"Cannot find saved variable ({var_name})'s structured name in saved model."
238+
assert structured_name is not None, (
239+
f"Cannot find saved variable ({var_name})'s structured name in saved model."
240+
)
241241
structured_para_dict[structured_name] = load_param_dict[
242242
var_name
243243
]

python/paddle/hapi/model.py

Lines changed: 48 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -368,13 +368,13 @@ def mode(self, value):
368368
self.model.mode = value
369369

370370
def train_batch(self, inputs, labels=None, update=True):
371-
assert (
372-
self.model._optimizer
373-
), "model not ready, please call `model.prepare()` first"
371+
assert self.model._optimizer, (
372+
"model not ready, please call `model.prepare()` first"
373+
)
374374
self.mode = 'train'
375-
assert (
376-
update is True
377-
), "Does not support `update == False` in static graph mode by now."
375+
assert update is True, (
376+
"Does not support `update == False` in static graph mode by now."
377+
)
378378
return self._run(inputs, labels)
379379

380380
def eval_batch(self, inputs, labels=None):
@@ -500,16 +500,16 @@ def _load_optimizer(self, state, executor):
500500
# However, dygraph wouldn't save it.
501501
if var.name not in state:
502502
continue
503-
assert (
504-
var.name in converted_state
505-
), f"variable [{var.name}] is not in optimizer state file"
503+
assert var.name in converted_state, (
504+
f"variable [{var.name}] is not in optimizer state file"
505+
)
506506
self._set_var(var.name, converted_state[var.name])
507507

508508
def _run(self, inputs, labels=None):
509509
compiled_prog = self._compiled_progs.get(self.mode, None)
510-
assert (
511-
compiled_prog
512-
), "Model is not ready, please call `model.prepare()` first"
510+
assert compiled_prog, (
511+
"Model is not ready, please call `model.prepare()` first"
512+
)
513513

514514
inputs = to_list(inputs)
515515
if labels is not None:
@@ -689,9 +689,9 @@ def _make_program(self, mode):
689689
}
690690

691691
def _initialize(self, prog, mode):
692-
assert (
693-
self.model._place is not None
694-
), "device is not set, please call `model.prepare()` first"
692+
assert self.model._place is not None, (
693+
"device is not set, please call `model.prepare()` first"
694+
)
695695

696696
place = self.model._place
697697

@@ -756,13 +756,13 @@ def mode(self, value):
756756
self.model.mode = value
757757

758758
def train_batch(self, inputs, labels=None, update=True):
759-
assert (
760-
self.model._optimizer
761-
), "model not ready, please call `model.prepare()` first"
759+
assert self.model._optimizer, (
760+
"model not ready, please call `model.prepare()` first"
761+
)
762762
self.mode = 'train'
763-
assert (
764-
update is True
765-
), "Does not support `update == False` in static graph mode by now."
763+
assert update is True, (
764+
"Does not support `update == False` in static graph mode by now."
765+
)
766766
return self._run(inputs, labels)
767767

768768
def eval_batch(self, inputs, labels=None):
@@ -919,9 +919,9 @@ def _load_optimizer(self, state, executor):
919919
converted_state.pop(dy_state_name)
920920
)
921921

922-
assert (
923-
var.name in converted_state
924-
), f"variable [{var.name}] is not in optimizer state file"
922+
assert var.name in converted_state, (
923+
f"variable [{var.name}] is not in optimizer state file"
924+
)
925925
self._set_var(var, converted_state[var.name])
926926

927927
def _set_var(self, var, ndarray):
@@ -940,9 +940,9 @@ def _set_var(self, var, ndarray):
940940

941941
def _run(self, inputs, labels=None):
942942
compiled_prog = self._compiled_progs.get(self.mode, None)
943-
assert (
944-
compiled_prog
945-
), "Model is not ready, please call `model.prepare()` first"
943+
assert compiled_prog, (
944+
"Model is not ready, please call `model.prepare()` first"
945+
)
946946

947947
inputs = to_list(inputs)
948948
if labels is not None:
@@ -1141,9 +1141,9 @@ def _compile_and_initialize(self, prog, mode):
11411141
if compiled_prog is not None:
11421142
return compiled_prog
11431143

1144-
assert (
1145-
self.model._place is not None
1146-
), "device is not set, please call `model.prepare()` first"
1144+
assert self.model._place is not None, (
1145+
"device is not set, please call `model.prepare()` first"
1146+
)
11471147

11481148
place = self.model._place
11491149

@@ -1234,9 +1234,9 @@ def mode(self, value):
12341234

12351235
# TODO multi device in dygraph mode not implemented at present time
12361236
def train_batch(self, inputs, labels=None, update=True):
1237-
assert (
1238-
self.model._optimizer
1239-
), "model not ready, please call `model.prepare()` first"
1237+
assert self.model._optimizer, (
1238+
"model not ready, please call `model.prepare()` first"
1239+
)
12401240
self.model.network.train()
12411241
self.mode = 'train'
12421242
inputs = to_list(inputs)
@@ -2031,7 +2031,9 @@ def _check_pure_fp16_configs():
20312031
assert isinstance(
20322032
self._optimizer._grad_clip,
20332033
(paddle.nn.ClipGradByGlobalNorm, paddle.nn.ClipGradByNorm),
2034-
), "Only ClipGradByNorm and ClipGradByGlobalNorm are supported in amp training with level=O2 currently."
2034+
), (
2035+
"Only ClipGradByNorm and ClipGradByGlobalNorm are supported in amp training with level=O2 currently."
2036+
)
20352037

20362038
self._adapter._amp_custom_lists = {}
20372039
self._adapter._amp_configs = {}
@@ -2188,9 +2190,9 @@ def prepare(
21882190

21892191
metrics = metrics or []
21902192
for metric in to_list(metrics):
2191-
assert isinstance(
2192-
metric, Metric
2193-
), f"{metric.__class__.__name__} is not sub class of Metric"
2193+
assert isinstance(metric, Metric), (
2194+
f"{metric.__class__.__name__} is not sub class of Metric"
2195+
)
21942196
self._metrics = to_list(metrics)
21952197
self._prepare_amp(amp_configs)
21962198

@@ -2353,9 +2355,9 @@ def fit(
23532355
if isinstance(batch_size, (tuple, list)) and all(
23542356
isinstance(x, int) for x in batch_size
23552357
):
2356-
assert (
2357-
len(batch_size) == 2
2358-
), "batch_size length error, expected train_batch_size and eval_batch_size."
2358+
assert len(batch_size) == 2, (
2359+
"batch_size length error, expected train_batch_size and eval_batch_size."
2360+
)
23592361
train_batch_size, eval_batch_size = batch_size
23602362
elif isinstance(batch_size, int):
23612363
train_batch_size, eval_batch_size = batch_size, batch_size
@@ -2748,9 +2750,9 @@ def _save_inference_model(self, path: str) -> None:
27482750
params_filename = file_prefix + INFER_PARAMS_SUFFIX
27492751

27502752
prog = self._adapter._progs.get('test', None)
2751-
assert (
2752-
prog
2753-
), "Model is not ready, please call `model.prepare()` first"
2753+
assert prog, (
2754+
"Model is not ready, please call `model.prepare()` first"
2755+
)
27542756

27552757
if in_pir_mode():
27562758
infer_prog = prog
@@ -2914,9 +2916,9 @@ def summary(
29142916
{'total_params': 61610, 'trainable_params': 61610}
29152917
29162918
"""
2917-
assert (
2918-
input_size is not None or self._inputs is not None
2919-
), "'input_size' or 'self._input' must be set"
2919+
assert input_size is not None or self._inputs is not None, (
2920+
"'input_size' or 'self._input' must be set"
2921+
)
29202922
if input_size is not None:
29212923
_input_size = input_size
29222924
else:

python/paddle/hapi/model_summary.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -348,10 +348,10 @@ def summary(
348348
for item in input_size:
349349
if isinstance(item, int):
350350
item = (item,)
351-
assert isinstance(
352-
item, (tuple, InputSpec)
353-
), f'When input_size is list, \
351+
assert isinstance(item, (tuple, InputSpec)), (
352+
f'When input_size is list, \
354353
expect item in input_size is a tuple or InputSpec, but got {type(item)}'
354+
)
355355

356356
if isinstance(item, InputSpec):
357357
_input_size.append(tuple(item.shape))

python/paddle/incubate/asp/asp.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -464,9 +464,9 @@ def prune_model(
464464
'mask_2d_greedy': MaskAlgo.MASK_2D_GREEDY,
465465
'mask_2d_best': MaskAlgo.MASK_2D_BEST,
466466
}
467-
assert (
468-
mask_algo in MaskAlgo_mapping
469-
), 'The "mask_algo" should be one of ["mask_1d", "mask_2d_greedy", "mask_2d_best"]'
467+
assert mask_algo in MaskAlgo_mapping, (
468+
'The "mask_algo" should be one of ["mask_1d", "mask_2d_greedy", "mask_2d_best"]'
469+
)
470470

471471
prune_func = None
472472
if isinstance(model, paddle.nn.Layer):
@@ -685,9 +685,9 @@ def prune_model_by_layer(
685685
target_program = None
686686
for param in layer.parameters():
687687
target_program = param.block.program
688-
assert (
689-
target_program is not None
690-
), 'Cannot get paddle.static.Program from Paddle.nn.Layer.'
688+
assert target_program is not None, (
689+
'Cannot get paddle.static.Program from Paddle.nn.Layer.'
690+
)
691691
return ASPHelper.prune_model_by_program(
692692
place,
693693
target_program,
@@ -795,7 +795,9 @@ def _is_supported_layer(
795795
return False
796796

797797
@classmethod
798-
def _get_prune_func_by_name(cls, param_name: str) -> Callable[
798+
def _get_prune_func_by_name(
799+
cls, param_name: str
800+
) -> Callable[
799801
[npt.NDArray[Any], int, int, MaskAlgo, str],
800802
tuple[npt.NDArray[Any], npt.NDArray[Any]],
801803
]:
@@ -1036,9 +1038,9 @@ def set_state_dict(self, state_dict: dict[str, Tensor]) -> None:
10361038
)
10371039
for param_name, var in asp_info.mask_vars.items():
10381040
param_mask_name = ASPHelper._get_mask_name(param_name)
1039-
assert (
1040-
param_mask_name in state_dict
1041-
), f"The {param_mask_name} is not found."
1041+
assert param_mask_name in state_dict, (
1042+
f"The {param_mask_name} is not found."
1043+
)
10421044
var.set_value(state_dict[param_mask_name])
10431045
asp_info.update_masks(param_name, var.numpy())
10441046
return self._optimizer.set_state_dict(state_dict)

python/paddle/incubate/asp/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,9 +74,9 @@ def get_checking_method(mask_algo: MaskAlgo) -> CheckMethod:
7474
>>> print(CheckMethod.get_checking_method(MaskAlgo.MASK_2D_BEST))
7575
CheckMethod.CHECK_2D
7676
"""
77-
assert isinstance(
78-
mask_algo, MaskAlgo
79-
), "mask_algo should be MaskAlgo type"
77+
assert isinstance(mask_algo, MaskAlgo), (
78+
"mask_algo should be MaskAlgo type"
79+
)
8080
if mask_algo == MaskAlgo.MASK_1D:
8181
return CheckMethod.CHECK_1D
8282
else:

0 commit comments

Comments
 (0)