Skip to content

Commit 253e28b

Browse files
gouzilSigureMo
andauthored
[CodeStyle] black -> ruff format migration - part 24 (#74709)
--------- Co-authored-by: SigureMo <[email protected]>
1 parent a412420 commit 253e28b

28 files changed

+341
-342
lines changed

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ repos:
7373
7474
# | paddle/.+
7575
76-
# | python/paddle/[a-c].+
76+
| python/paddle/[a-c].+
7777
7878
# | python/paddle/de.+
7979
@@ -129,7 +129,7 @@ repos:
129129
130130
| paddle/.+
131131
132-
| python/paddle/[a-c].+
132+
# | python/paddle/[a-c].+
133133
134134
| python/paddle/de.+
135135

python/paddle/amp/accuracy_compare.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -149,9 +149,9 @@ def __init__(
149149
if fp32_tensor_info is not None and fp16_tensor_info is not None:
150150
# Check whether the op name and data are equal
151151
assert fp32_tensor_info.op_type == fp16_tensor_info.op_type
152-
assert (
153-
fp32_tensor_info.numel == fp16_tensor_info.numel
154-
), f"Error:\n\tFP32 Tensor Info:{fp32_tensor_info}\n\tFP16 Tensor Info:{fp16_tensor_info}"
152+
assert fp32_tensor_info.numel == fp16_tensor_info.numel, (
153+
f"Error:\n\tFP32 Tensor Info:{fp32_tensor_info}\n\tFP16 Tensor Info:{fp16_tensor_info}"
154+
)
155155
# Fp16 divided by fp32
156156
self.fp32_div_fp16_max_value = self._div(
157157
self.fp16_max_value, self.fp32_max_value

python/paddle/amp/auto_cast.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -512,9 +512,9 @@ def amp_guard(
512512
paddle.float32
513513
>>> # doctest: -SKIP
514514
"""
515-
assert (
516-
in_dynamic_or_pir_mode()
517-
), "We only support 'amp_guard' in dynamic or pir mode."
515+
assert in_dynamic_or_pir_mode(), (
516+
"We only support 'amp_guard' in dynamic or pir mode."
517+
)
518518

519519
amp_state = locals()
520520
global _g_amp_state_

python/paddle/apy/matmul_pass/matmul_variadic_tpl.py

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -116,18 +116,14 @@ def compile(
116116
)
117117

118118
def get_kernel_arg_runtime_getters(self):
119-
all_kernel_arg_id_and_unique_names = (
120-
self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
121-
)
119+
all_kernel_arg_id_and_unique_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
122120
return ap.map(
123121
lambda pair: pair[0].runtime_getter,
124122
all_kernel_arg_id_and_unique_names,
125123
)
126124

127125
def get_kernel_arg_types(self):
128-
all_kernel_arg_id_and_unique_names = (
129-
self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
130-
)
126+
all_kernel_arg_id_and_unique_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
131127
return ap.map(
132128
lambda pair: pair[0].type, all_kernel_arg_id_and_unique_names
133129
)
@@ -151,9 +147,7 @@ def declare_epilogue_arguments_field(pair):
151147
f"{type_name} {field_name}" if for_declare else f"{field_name}"
152148
)
153149

154-
all_kernel_arg_id_and_names = (
155-
self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
156-
)
150+
all_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items()
157151
return ", ".join(
158152
ap.map(
159153
declare_epilogue_arguments_field, all_kernel_arg_id_and_names
@@ -171,9 +165,7 @@ def declare_epilogue_arguments_field(pair):
171165
type_name = self.dtype2type_name[dtype]
172166
return f"{type_name} {field_name};"
173167

174-
generated_kernel_arg_id_and_names = (
175-
self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items()
176-
)
168+
generated_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items()
177169
return f"\n{indent}".join(
178170
ap.map(
179171
declare_epilogue_arguments_field,
@@ -190,9 +182,7 @@ def declare_epilogue_arguments_assign(pair):
190182
)
191183
return f"{param_obj_name}.{field_name} = {var_name};"
192184

193-
generated_kernel_arg_id_and_names = (
194-
self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items()
195-
)
185+
generated_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items()
196186
return f"\n{indent}".join(
197187
ap.map(
198188
declare_epilogue_arguments_assign,

python/paddle/apy/matmul_pass/op_index_translator_util.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,9 +160,9 @@ def get_dim_var_name(i):
160160
offset_expr = " + ".join(
161161
ap.map(lambda elts: " * ".join(elts), var_name_and_dims_list)
162162
)
163-
assert (
164-
len(self.output_properties[0].symbolic_shape) == 1
165-
), "len(self.output_properties[0]) should be 1"
163+
assert len(self.output_properties[0].symbolic_shape) == 1, (
164+
"len(self.output_properties[0]) should be 1"
165+
)
166166
return [
167167
index_code_gen_value_util.IndexCodeGenValue([f"({offset_expr})"])
168168
]

python/paddle/audio/datasets/esc50.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -179,9 +179,9 @@ def __init__(
179179
archive: dict[str, str] | None = None,
180180
**kwargs: Any,
181181
) -> None:
182-
assert split in range(
183-
1, 6
184-
), f'The selected split should be integer, and 1 <= split <= 5, but got {split}'
182+
assert split in range(1, 6), (
183+
f'The selected split should be integer, and 1 <= split <= 5, but got {split}'
184+
)
185185
if archive is not None:
186186
self.archive = archive
187187
files, labels = self._get_data(mode, split)

python/paddle/audio/datasets/tess.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -106,12 +106,12 @@ def __init__(
106106
archive: dict[str, str] | None = None,
107107
**kwargs: Any,
108108
) -> None:
109-
assert isinstance(n_folds, int) and (
110-
n_folds >= 1
111-
), f'the n_folds should be integer and n_folds >= 1, but got {n_folds}'
112-
assert split in range(
113-
1, n_folds + 1
114-
), f'The selected split should be integer and should be 1 <= split <= {n_folds}, but got {split}'
109+
assert isinstance(n_folds, int) and (n_folds >= 1), (
110+
f'the n_folds should be integer and n_folds >= 1, but got {n_folds}'
111+
)
112+
assert split in range(1, n_folds + 1), (
113+
f'The selected split should be integer and should be 1 <= split <= {n_folds}, but got {split}'
114+
)
115115
if archive is not None:
116116
self.archive = archive
117117
files, labels = self._get_data(mode, n_folds, split)

python/paddle/audio/features/layers.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -410,9 +410,9 @@ def __init__(
410410
dtype: str = 'float32',
411411
) -> None:
412412
super().__init__()
413-
assert (
414-
n_mfcc <= n_mels
415-
), f'n_mfcc cannot be larger than n_mels: {n_mfcc} vs {n_mels}'
413+
assert n_mfcc <= n_mels, (
414+
f'n_mfcc cannot be larger than n_mels: {n_mfcc} vs {n_mels}'
415+
)
416416
self._log_melspectrogram = LogMelSpectrogram(
417417
sr=sr,
418418
n_fft=n_fft,
@@ -446,7 +446,5 @@ def forward(self, x: Tensor) -> Tensor:
446446
log_mel_feature = self._log_melspectrogram(x)
447447
mfcc = paddle.matmul(
448448
log_mel_feature.transpose((0, 2, 1)), self.dct_matrix
449-
).transpose(
450-
(0, 2, 1)
451-
) # (B, n_mels, L)
449+
).transpose((0, 2, 1)) # (B, n_mels, L)
452450
return mfcc

python/paddle/autograd/backward_mode.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -102,38 +102,38 @@ def check_tensors(
102102
if isinstance(in_out_list, (list, tuple)):
103103
assert len(in_out_list) > 0, f"{name} cannot be empty"
104104
for each_var in in_out_list:
105-
assert isinstance(
106-
each_var, paddle.Tensor
107-
), f"Elements of {name} must be paddle.Tensor"
105+
assert isinstance(each_var, paddle.Tensor), (
106+
f"Elements of {name} must be paddle.Tensor"
107+
)
108108
return in_out_list
109109
else:
110-
assert isinstance(
111-
in_out_list, paddle.Tensor
112-
), f"{name} must be Tensor or list of Tensor"
110+
assert isinstance(in_out_list, paddle.Tensor), (
111+
f"{name} must be Tensor or list of Tensor"
112+
)
113113
return [in_out_list]
114114

115115
tensors = check_tensors(tensors, "tensors")
116116

117-
assert len(tensors) == len(
118-
set(tensors)
119-
), "The argument 'tensors' of paddle.autograd.backward contains duplicate paddle.Tensor object."
117+
assert len(tensors) == len(set(tensors)), (
118+
"The argument 'tensors' of paddle.autograd.backward contains duplicate paddle.Tensor object."
119+
)
120120

121121
if grad_tensors is not None:
122122
if not isinstance(grad_tensors, (list, tuple)):
123123
grad_tensors = [grad_tensors]
124124

125125
for each_tensor in grad_tensors:
126126
if each_tensor is not None:
127-
assert isinstance(
128-
each_tensor, paddle.Tensor
129-
), "The argument 'grad_tensors' of paddle.autograd.backward is invalid, it can be 'None', 'paddle.Tensor' or 'list[None/paddle.Tensor]'."
127+
assert isinstance(each_tensor, paddle.Tensor), (
128+
"The argument 'grad_tensors' of paddle.autograd.backward is invalid, it can be 'None', 'paddle.Tensor' or 'list[None/paddle.Tensor]'."
129+
)
130130
else:
131131
grad_tensors = []
132132

133133
if len(grad_tensors) > 0:
134-
assert len(tensors) == len(
135-
grad_tensors
136-
), "The length of grad_tensors must be equal to tensors"
134+
assert len(tensors) == len(grad_tensors), (
135+
"The length of grad_tensors must be equal to tensors"
136+
)
137137

138138
assert isinstance(retain_graph, bool), "retain_graph must be True or False"
139139

python/paddle/autograd/backward_utils.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -652,7 +652,9 @@ def argument_to_value(while_op):
652652

653653
assert len(while_op.as_while_op().block_arguments()) + 1 == len(
654654
while_op.operands_source()
655-
), "while op's block_arguments size + 1 should same to while op's operands_source size"
655+
), (
656+
"while op's block_arguments size + 1 should same to while op's operands_source size"
657+
)
656658
arg_to_value_map = ValueDict()
657659
value_to_arg_map = ValueDict()
658660
for arg, value in zip(

0 commit comments

Comments
 (0)