Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ repos:

# | paddle/.+

# | python/paddle/[a-c].+
| python/paddle/[a-c].+

# | python/paddle/de.+

Expand Down Expand Up @@ -129,7 +129,7 @@ repos:

| paddle/.+

| python/paddle/[a-c].+
# | python/paddle/[a-c].+

| python/paddle/de.+

Expand Down
6 changes: 3 additions & 3 deletions python/paddle/amp/accuracy_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,9 +149,9 @@ def __init__(
if fp32_tensor_info is not None and fp16_tensor_info is not None:
# Check whether the op name and data are equal
assert fp32_tensor_info.op_type == fp16_tensor_info.op_type
assert (
fp32_tensor_info.numel == fp16_tensor_info.numel
), f"Error:\n\tFP32 Tensor Info:{fp32_tensor_info}\n\tFP16 Tensor Info:{fp16_tensor_info}"
assert fp32_tensor_info.numel == fp16_tensor_info.numel, (
f"Error:\n\tFP32 Tensor Info:{fp32_tensor_info}\n\tFP16 Tensor Info:{fp16_tensor_info}"
)
# Fp16 divided by fp32
self.fp32_div_fp16_max_value = self._div(
self.fp16_max_value, self.fp32_max_value
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/amp/auto_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,9 +512,9 @@ def amp_guard(
paddle.float32
>>> # doctest: -SKIP
"""
assert (
in_dynamic_or_pir_mode()
), "We only support 'amp_guard' in dynamic or pir mode."
assert in_dynamic_or_pir_mode(), (
"We only support 'amp_guard' in dynamic or pir mode."
)

amp_state = locals()
global _g_amp_state_
Expand Down
20 changes: 5 additions & 15 deletions python/paddle/apy/matmul_pass/matmul_variadic_tpl.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,18 +116,14 @@ def compile(
)

def get_kernel_arg_runtime_getters(self):
all_kernel_arg_id_and_unique_names = (
self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
)
all_kernel_arg_id_and_unique_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
return ap.map(
lambda pair: pair[0].runtime_getter,
all_kernel_arg_id_and_unique_names,
)

def get_kernel_arg_types(self):
all_kernel_arg_id_and_unique_names = (
self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
)
all_kernel_arg_id_and_unique_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
return ap.map(
lambda pair: pair[0].type, all_kernel_arg_id_and_unique_names
)
Expand All @@ -151,9 +147,7 @@ def declare_epilogue_arguments_field(pair):
f"{type_name} {field_name}" if for_declare else f"{field_name}"
)

all_kernel_arg_id_and_names = (
self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
)
all_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
return ", ".join(
ap.map(
declare_epilogue_arguments_field, all_kernel_arg_id_and_names
Expand All @@ -171,9 +165,7 @@ def declare_epilogue_arguments_field(pair):
type_name = self.dtype2type_name[dtype]
return f"{type_name} {field_name};"

generated_kernel_arg_id_and_names = (
self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items()
)
generated_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items()
return f"\n{indent}".join(
ap.map(
declare_epilogue_arguments_field,
Expand All @@ -190,9 +182,7 @@ def declare_epilogue_arguments_assign(pair):
)
return f"{param_obj_name}.{field_name} = {var_name};"

generated_kernel_arg_id_and_names = (
self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items()
)
generated_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items()
return f"\n{indent}".join(
ap.map(
declare_epilogue_arguments_assign,
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/apy/matmul_pass/op_index_translator_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,9 @@ def get_dim_var_name(i):
offset_expr = " + ".join(
ap.map(lambda elts: " * ".join(elts), var_name_and_dims_list)
)
assert (
len(self.output_properties[0].symbolic_shape) == 1
), "len(self.output_properties[0]) should be 1"
assert len(self.output_properties[0].symbolic_shape) == 1, (
"len(self.output_properties[0]) should be 1"
)
return [
index_code_gen_value_util.IndexCodeGenValue([f"({offset_expr})"])
]
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/audio/datasets/esc50.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,9 +179,9 @@ def __init__(
archive: dict[str, str] | None = None,
**kwargs: Any,
) -> None:
assert split in range(
1, 6
), f'The selected split should be integer, and 1 <= split <= 5, but got {split}'
assert split in range(1, 6), (
f'The selected split should be integer, and 1 <= split <= 5, but got {split}'
)
if archive is not None:
self.archive = archive
files, labels = self._get_data(mode, split)
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/audio/datasets/tess.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,12 @@ def __init__(
archive: dict[str, str] | None = None,
**kwargs: Any,
) -> None:
assert isinstance(n_folds, int) and (
n_folds >= 1
), f'the n_folds should be integer and n_folds >= 1, but got {n_folds}'
assert split in range(
1, n_folds + 1
), f'The selected split should be integer and should be 1 <= split <= {n_folds}, but got {split}'
assert isinstance(n_folds, int) and (n_folds >= 1), (
f'the n_folds should be integer and n_folds >= 1, but got {n_folds}'
)
assert split in range(1, n_folds + 1), (
f'The selected split should be integer and should be 1 <= split <= {n_folds}, but got {split}'
)
if archive is not None:
self.archive = archive
files, labels = self._get_data(mode, n_folds, split)
Expand Down
10 changes: 4 additions & 6 deletions python/paddle/audio/features/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,9 +410,9 @@ def __init__(
dtype: str = 'float32',
) -> None:
super().__init__()
assert (
n_mfcc <= n_mels
), f'n_mfcc cannot be larger than n_mels: {n_mfcc} vs {n_mels}'
assert n_mfcc <= n_mels, (
f'n_mfcc cannot be larger than n_mels: {n_mfcc} vs {n_mels}'
)
self._log_melspectrogram = LogMelSpectrogram(
sr=sr,
n_fft=n_fft,
Expand Down Expand Up @@ -446,7 +446,5 @@ def forward(self, x: Tensor) -> Tensor:
log_mel_feature = self._log_melspectrogram(x)
mfcc = paddle.matmul(
log_mel_feature.transpose((0, 2, 1)), self.dct_matrix
).transpose(
(0, 2, 1)
) # (B, n_mels, L)
).transpose((0, 2, 1)) # (B, n_mels, L)
return mfcc
30 changes: 15 additions & 15 deletions python/paddle/autograd/backward_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,38 +102,38 @@ def check_tensors(
if isinstance(in_out_list, (list, tuple)):
assert len(in_out_list) > 0, f"{name} cannot be empty"
for each_var in in_out_list:
assert isinstance(
each_var, paddle.Tensor
), f"Elements of {name} must be paddle.Tensor"
assert isinstance(each_var, paddle.Tensor), (
f"Elements of {name} must be paddle.Tensor"
)
return in_out_list
else:
assert isinstance(
in_out_list, paddle.Tensor
), f"{name} must be Tensor or list of Tensor"
assert isinstance(in_out_list, paddle.Tensor), (
f"{name} must be Tensor or list of Tensor"
)
return [in_out_list]

tensors = check_tensors(tensors, "tensors")

assert len(tensors) == len(
set(tensors)
), "The argument 'tensors' of paddle.autograd.backward contains duplicate paddle.Tensor object."
assert len(tensors) == len(set(tensors)), (
"The argument 'tensors' of paddle.autograd.backward contains duplicate paddle.Tensor object."
)

if grad_tensors is not None:
if not isinstance(grad_tensors, (list, tuple)):
grad_tensors = [grad_tensors]

for each_tensor in grad_tensors:
if each_tensor is not None:
assert isinstance(
each_tensor, paddle.Tensor
), "The argument 'grad_tensors' of paddle.autograd.backward is invalid, it can be 'None', 'paddle.Tensor' or 'list[None/paddle.Tensor]'."
assert isinstance(each_tensor, paddle.Tensor), (
"The argument 'grad_tensors' of paddle.autograd.backward is invalid, it can be 'None', 'paddle.Tensor' or 'list[None/paddle.Tensor]'."
)
else:
grad_tensors = []

if len(grad_tensors) > 0:
assert len(tensors) == len(
grad_tensors
), "The length of grad_tensors must be equal to tensors"
assert len(tensors) == len(grad_tensors), (
"The length of grad_tensors must be equal to tensors"
)

assert isinstance(retain_graph, bool), "retain_graph must be True or False"

Expand Down
4 changes: 3 additions & 1 deletion python/paddle/autograd/backward_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,9 @@ def argument_to_value(while_op):

assert len(while_op.as_while_op().block_arguments()) + 1 == len(
while_op.operands_source()
), "while op's block_arguments size + 1 should same to while op's operands_source size"
), (
"while op's block_arguments size + 1 should same to while op's operands_source size"
)
arg_to_value_map = ValueDict()
value_to_arg_map = ValueDict()
for arg, value in zip(
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/autograd/ir_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -585,9 +585,9 @@ def update_input_grad_map(op, input_grads, all_inputs):
i += 1

def update_if_double_grad_input_grad_map(input_grads, all_inputs):
assert len(input_grads) == len(
all_inputs
), "input_grads should same to all_inputs"
assert len(input_grads) == len(all_inputs), (
"input_grads should same to all_inputs"
)
for input, input_grad in zip(all_inputs, input_grads):
if isinstance(input_grad, list):
state.value_to_valuegrad[input].append(input_grad)
Expand Down
10 changes: 4 additions & 6 deletions python/paddle/base/backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1775,9 +1775,9 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
if block.desc.has_var_recursive(grad_var_name.encode()):
# meet invalid sum variables, remove the invalid operand.
new_inputs.append(grad_var_name)
assert (
len(new_inputs) > 0
), "After remove invalid variables, sum op have no inputs."
assert len(new_inputs) > 0, (
"After remove invalid variables, sum op have no inputs."
)
op_desc.set_input("X", new_inputs)

new_vars = set()
Expand Down Expand Up @@ -2105,9 +2105,7 @@ def append_backward(
loss, parameter_list, no_grad_set
)

grad_op_id_to_fwd_op = (
{}
) # for cuda graph usage, recording the mapping between grad op original id to fwd op
grad_op_id_to_fwd_op = {} # for cuda graph usage, recording the mapping between grad op original id to fwd op

check_type(
loss, 'loss', framework.Variable, 'paddle.static.append_backward'
Expand Down
30 changes: 15 additions & 15 deletions python/paddle/base/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,9 +205,9 @@ def _with_inference_optimize(self, config):
Returns:
self
"""
assert (
not self._is_inference
), "Already compiled with inference, cannot be recompiled."
assert not self._is_inference, (
"Already compiled with inference, cannot be recompiled."
)

assert any(
[
Expand Down Expand Up @@ -238,9 +238,9 @@ def _compile_data_parallel(self, places, use_device, scope=None):
assert scope is not None, ""
self._local_scopes = []

assert isinstance(
places, (list, tuple)
), f"Currently, The places type can only be list or tuple, but the input type is {type(places)}."
assert isinstance(places, (list, tuple)), (
f"Currently, The places type can only be list or tuple, but the input type is {type(places)}."
)

if self._build_strategy is None:
self._build_strategy = BuildStrategy()
Expand All @@ -255,9 +255,9 @@ def _compile_data_parallel(self, places, use_device, scope=None):
):
tps = self._program._trainers_endpoints

assert self._build_strategy.num_trainers == len(
tps
), "The trainer numbers is not equal to endpoint numbers."
assert self._build_strategy.num_trainers == len(tps), (
"The trainer numbers is not equal to endpoint numbers."
)
self._build_strategy.trainers_endpoints = tps

if self._program:
Expand All @@ -270,9 +270,9 @@ def _compile_data_parallel(self, places, use_device, scope=None):
)

if self._program is not None and self._program._enable_dgc:
assert (
self._build_strategy.num_trainers * len(places) > 1
), "DGC is not available for single card training."
assert self._build_strategy.num_trainers * len(places) > 1, (
"DGC is not available for single card training."
)
assert (
self._build_strategy.reduce_strategy
== BuildStrategy.ReduceStrategy.AllReduce
Expand Down Expand Up @@ -363,9 +363,9 @@ def _get_places(self, place, place_list):
has_set_place = place_list is not None
if has_set_place:
for p in place_list:
assert (
p._type() == place._type()
), "Place type not match. You may set wrong type of places."
assert p._type() == place._type(), (
"Place type not match. You may set wrong type of places."
)
else:
if isinstance(place, core.CUDAPlace):
place_list = cuda_places()
Expand Down
24 changes: 12 additions & 12 deletions python/paddle/base/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -552,36 +552,36 @@ def _set_prim_backward_blacklist(*args):


def _set_prim_backward_enabled(value: bool, print_flag: bool = False):
assert isinstance(
value, bool
), f"value should be bool, but got {type(value)}"
assert isinstance(value, bool), (
f"value should be bool, but got {type(value)}"
)
__set_bwd_prim_enabled(value)
if _prim_return_log() or print_flag:
print("backward prim enabled: ", bool(_is_bwd_prim_enabled()))


def _set_prim_forward_enabled(value: bool, print_flag: bool = False):
assert isinstance(
value, bool
), f"value should be bool, but got {type(value)}"
assert isinstance(value, bool), (
f"value should be bool, but got {type(value)}"
)
__set_fwd_prim_enabled(value)
if _prim_return_log() or print_flag:
print("forward prim enabled: ", bool(_is_fwd_prim_enabled()))


def set_prim_eager_enabled(value: bool, print_flag: bool = False):
assert isinstance(
value, bool
), f"value should be bool, but got {type(value)}"
assert isinstance(value, bool), (
f"value should be bool, but got {type(value)}"
)
__set_eager_prim_enabled(value)
if _prim_return_log() or print_flag:
print("eager prim enabled: ", bool(_is_eager_prim_enabled()))


def _set_prim_all_enabled(value: bool, print_flag: bool = False):
assert isinstance(
value, bool
), f"value should be bool, but got {type(value)}"
assert isinstance(value, bool), (
f"value should be bool, but got {type(value)}"
)
__set_all_prim_enabled(value)
if _prim_return_log() or print_flag:
print(
Expand Down
Loading