Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ repos:

# | python/paddle/distributed/[g-z].+

# | python/paddle/[e-i].+
| python/paddle/[e-i].+

# | python/paddle/j.+

Expand Down Expand Up @@ -141,7 +141,7 @@ repos:

| python/paddle/distributed/[g-z].+

| python/paddle/[e-i].+
# | python/paddle/[e-i].+

| python/paddle/j.+

Expand Down
6 changes: 3 additions & 3 deletions python/paddle/framework/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,9 +235,9 @@ def _load_state_dict_from_save_inference_model(model_path, config):
structured_name = extra_var_info[var_name].get(
'structured_name', None
)
assert (
structured_name is not None
), f"Cannot find saved variable ({var_name})'s structured name in saved model."
assert structured_name is not None, (
f"Cannot find saved variable ({var_name})'s structured name in saved model."
)
structured_para_dict[structured_name] = load_param_dict[
var_name
]
Expand Down
94 changes: 48 additions & 46 deletions python/paddle/hapi/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,13 +368,13 @@ def mode(self, value):
self.model.mode = value

def train_batch(self, inputs, labels=None, update=True):
assert (
self.model._optimizer
), "model not ready, please call `model.prepare()` first"
assert self.model._optimizer, (
"model not ready, please call `model.prepare()` first"
)
self.mode = 'train'
assert (
update is True
), "Does not support `update == False` in static graph mode by now."
assert update is True, (
"Does not support `update == False` in static graph mode by now."
)
return self._run(inputs, labels)

def eval_batch(self, inputs, labels=None):
Expand Down Expand Up @@ -500,16 +500,16 @@ def _load_optimizer(self, state, executor):
# However, dygraph wouldn't save it.
if var.name not in state:
continue
assert (
var.name in converted_state
), f"variable [{var.name}] is not in optimizer state file"
assert var.name in converted_state, (
f"variable [{var.name}] is not in optimizer state file"
)
self._set_var(var.name, converted_state[var.name])

def _run(self, inputs, labels=None):
compiled_prog = self._compiled_progs.get(self.mode, None)
assert (
compiled_prog
), "Model is not ready, please call `model.prepare()` first"
assert compiled_prog, (
"Model is not ready, please call `model.prepare()` first"
)

inputs = to_list(inputs)
if labels is not None:
Expand Down Expand Up @@ -689,9 +689,9 @@ def _make_program(self, mode):
}

def _initialize(self, prog, mode):
assert (
self.model._place is not None
), "device is not set, please call `model.prepare()` first"
assert self.model._place is not None, (
"device is not set, please call `model.prepare()` first"
)

place = self.model._place

Expand Down Expand Up @@ -756,13 +756,13 @@ def mode(self, value):
self.model.mode = value

def train_batch(self, inputs, labels=None, update=True):
assert (
self.model._optimizer
), "model not ready, please call `model.prepare()` first"
assert self.model._optimizer, (
"model not ready, please call `model.prepare()` first"
)
self.mode = 'train'
assert (
update is True
), "Does not support `update == False` in static graph mode by now."
assert update is True, (
"Does not support `update == False` in static graph mode by now."
)
return self._run(inputs, labels)

def eval_batch(self, inputs, labels=None):
Expand Down Expand Up @@ -919,9 +919,9 @@ def _load_optimizer(self, state, executor):
converted_state.pop(dy_state_name)
)

assert (
var.name in converted_state
), f"variable [{var.name}] is not in optimizer state file"
assert var.name in converted_state, (
f"variable [{var.name}] is not in optimizer state file"
)
self._set_var(var, converted_state[var.name])

def _set_var(self, var, ndarray):
Expand All @@ -940,9 +940,9 @@ def _set_var(self, var, ndarray):

def _run(self, inputs, labels=None):
compiled_prog = self._compiled_progs.get(self.mode, None)
assert (
compiled_prog
), "Model is not ready, please call `model.prepare()` first"
assert compiled_prog, (
"Model is not ready, please call `model.prepare()` first"
)

inputs = to_list(inputs)
if labels is not None:
Expand Down Expand Up @@ -1141,9 +1141,9 @@ def _compile_and_initialize(self, prog, mode):
if compiled_prog is not None:
return compiled_prog

assert (
self.model._place is not None
), "device is not set, please call `model.prepare()` first"
assert self.model._place is not None, (
"device is not set, please call `model.prepare()` first"
)

place = self.model._place

Expand Down Expand Up @@ -1234,9 +1234,9 @@ def mode(self, value):

# TODO multi device in dygraph mode not implemented at present time
def train_batch(self, inputs, labels=None, update=True):
assert (
self.model._optimizer
), "model not ready, please call `model.prepare()` first"
assert self.model._optimizer, (
"model not ready, please call `model.prepare()` first"
)
self.model.network.train()
self.mode = 'train'
inputs = to_list(inputs)
Expand Down Expand Up @@ -2031,7 +2031,9 @@ def _check_pure_fp16_configs():
assert isinstance(
self._optimizer._grad_clip,
(paddle.nn.ClipGradByGlobalNorm, paddle.nn.ClipGradByNorm),
), "Only ClipGradByNorm and ClipGradByGlobalNorm are supported in amp training with level=O2 currently."
), (
"Only ClipGradByNorm and ClipGradByGlobalNorm are supported in amp training with level=O2 currently."
)

self._adapter._amp_custom_lists = {}
self._adapter._amp_configs = {}
Expand Down Expand Up @@ -2188,9 +2190,9 @@ def prepare(

metrics = metrics or []
for metric in to_list(metrics):
assert isinstance(
metric, Metric
), f"{metric.__class__.__name__} is not sub class of Metric"
assert isinstance(metric, Metric), (
f"{metric.__class__.__name__} is not sub class of Metric"
)
self._metrics = to_list(metrics)
self._prepare_amp(amp_configs)

Expand Down Expand Up @@ -2353,9 +2355,9 @@ def fit(
if isinstance(batch_size, (tuple, list)) and all(
isinstance(x, int) for x in batch_size
):
assert (
len(batch_size) == 2
), "batch_size length error, expected train_batch_size and eval_batch_size."
assert len(batch_size) == 2, (
"batch_size length error, expected train_batch_size and eval_batch_size."
)
train_batch_size, eval_batch_size = batch_size
elif isinstance(batch_size, int):
train_batch_size, eval_batch_size = batch_size, batch_size
Expand Down Expand Up @@ -2748,9 +2750,9 @@ def _save_inference_model(self, path: str) -> None:
params_filename = file_prefix + INFER_PARAMS_SUFFIX

prog = self._adapter._progs.get('test', None)
assert (
prog
), "Model is not ready, please call `model.prepare()` first"
assert prog, (
"Model is not ready, please call `model.prepare()` first"
)

if in_pir_mode():
infer_prog = prog
Expand Down Expand Up @@ -2914,9 +2916,9 @@ def summary(
{'total_params': 61610, 'trainable_params': 61610}

"""
assert (
input_size is not None or self._inputs is not None
), "'input_size' or 'self._input' must be set"
assert input_size is not None or self._inputs is not None, (
"'input_size' or 'self._input' must be set"
)
if input_size is not None:
_input_size = input_size
else:
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/hapi/model_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,10 +348,10 @@ def summary(
for item in input_size:
if isinstance(item, int):
item = (item,)
assert isinstance(
item, (tuple, InputSpec)
), f'When input_size is list, \
assert isinstance(item, (tuple, InputSpec)), (
f'When input_size is list, \
expect item in input_size is a tuple or InputSpec, but got {type(item)}'
)

if isinstance(item, InputSpec):
_input_size.append(tuple(item.shape))
Expand Down
22 changes: 12 additions & 10 deletions python/paddle/incubate/asp/asp.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,9 +464,9 @@ def prune_model(
'mask_2d_greedy': MaskAlgo.MASK_2D_GREEDY,
'mask_2d_best': MaskAlgo.MASK_2D_BEST,
}
assert (
mask_algo in MaskAlgo_mapping
), 'The "mask_algo" should be one of ["mask_1d", "mask_2d_greedy", "mask_2d_best"]'
assert mask_algo in MaskAlgo_mapping, (
'The "mask_algo" should be one of ["mask_1d", "mask_2d_greedy", "mask_2d_best"]'
)

prune_func = None
if isinstance(model, paddle.nn.Layer):
Expand Down Expand Up @@ -685,9 +685,9 @@ def prune_model_by_layer(
target_program = None
for param in layer.parameters():
target_program = param.block.program
assert (
target_program is not None
), 'Cannot get paddle.static.Program from Paddle.nn.Layer.'
assert target_program is not None, (
'Cannot get paddle.static.Program from Paddle.nn.Layer.'
)
return ASPHelper.prune_model_by_program(
place,
target_program,
Expand Down Expand Up @@ -795,7 +795,9 @@ def _is_supported_layer(
return False

@classmethod
def _get_prune_func_by_name(cls, param_name: str) -> Callable[
def _get_prune_func_by_name(
cls, param_name: str
) -> Callable[
[npt.NDArray[Any], int, int, MaskAlgo, str],
tuple[npt.NDArray[Any], npt.NDArray[Any]],
]:
Expand Down Expand Up @@ -1036,9 +1038,9 @@ def set_state_dict(self, state_dict: dict[str, Tensor]) -> None:
)
for param_name, var in asp_info.mask_vars.items():
param_mask_name = ASPHelper._get_mask_name(param_name)
assert (
param_mask_name in state_dict
), f"The {param_mask_name} is not found."
assert param_mask_name in state_dict, (
f"The {param_mask_name} is not found."
)
var.set_value(state_dict[param_mask_name])
asp_info.update_masks(param_name, var.numpy())
return self._optimizer.set_state_dict(state_dict)
6 changes: 3 additions & 3 deletions python/paddle/incubate/asp/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,9 @@ def get_checking_method(mask_algo: MaskAlgo) -> CheckMethod:
>>> print(CheckMethod.get_checking_method(MaskAlgo.MASK_2D_BEST))
CheckMethod.CHECK_2D
"""
assert isinstance(
mask_algo, MaskAlgo
), "mask_algo should be MaskAlgo type"
assert isinstance(mask_algo, MaskAlgo), (
"mask_algo should be MaskAlgo type"
)
if mask_algo == MaskAlgo.MASK_1D:
return CheckMethod.CHECK_1D
else:
Expand Down
Loading