diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 64aa9927963414..443066ee963bfc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -73,7 +73,7 @@ repos: # | paddle/.+ - # | python/paddle/[a-c].+ + | python/paddle/[a-c].+ # | python/paddle/de.+ @@ -129,7 +129,7 @@ repos: | paddle/.+ - | python/paddle/[a-c].+ + # | python/paddle/[a-c].+ | python/paddle/de.+ diff --git a/python/paddle/amp/accuracy_compare.py b/python/paddle/amp/accuracy_compare.py index f3f5e2564e3edf..15d82aa24883e6 100644 --- a/python/paddle/amp/accuracy_compare.py +++ b/python/paddle/amp/accuracy_compare.py @@ -149,9 +149,9 @@ def __init__( if fp32_tensor_info is not None and fp16_tensor_info is not None: # Check whether the op name and data are equal assert fp32_tensor_info.op_type == fp16_tensor_info.op_type - assert ( - fp32_tensor_info.numel == fp16_tensor_info.numel - ), f"Error:\n\tFP32 Tensor Info:{fp32_tensor_info}\n\tFP16 Tensor Info:{fp16_tensor_info}" + assert fp32_tensor_info.numel == fp16_tensor_info.numel, ( + f"Error:\n\tFP32 Tensor Info:{fp32_tensor_info}\n\tFP16 Tensor Info:{fp16_tensor_info}" + ) # Fp16 divided by fp32 self.fp32_div_fp16_max_value = self._div( self.fp16_max_value, self.fp32_max_value diff --git a/python/paddle/amp/auto_cast.py b/python/paddle/amp/auto_cast.py index 5517881ff1dd9f..6cf9c4fee2a176 100644 --- a/python/paddle/amp/auto_cast.py +++ b/python/paddle/amp/auto_cast.py @@ -512,9 +512,9 @@ def amp_guard( paddle.float32 >>> # doctest: -SKIP """ - assert ( - in_dynamic_or_pir_mode() - ), "We only support 'amp_guard' in dynamic or pir mode." + assert in_dynamic_or_pir_mode(), ( + "We only support 'amp_guard' in dynamic or pir mode." + ) amp_state = locals() global _g_amp_state_ diff --git a/python/paddle/apy/matmul_pass/matmul_variadic_tpl.py b/python/paddle/apy/matmul_pass/matmul_variadic_tpl.py index 7f91e8ae242427..bab8686068c6b2 100644 --- a/python/paddle/apy/matmul_pass/matmul_variadic_tpl.py +++ b/python/paddle/apy/matmul_pass/matmul_variadic_tpl.py @@ -116,18 +116,14 @@ def compile( ) def get_kernel_arg_runtime_getters(self): - all_kernel_arg_id_and_unique_names = ( - self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items() - ) + all_kernel_arg_id_and_unique_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items() return ap.map( lambda pair: pair[0].runtime_getter, all_kernel_arg_id_and_unique_names, ) def get_kernel_arg_types(self): - all_kernel_arg_id_and_unique_names = ( - self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items() - ) + all_kernel_arg_id_and_unique_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items() return ap.map( lambda pair: pair[0].type, all_kernel_arg_id_and_unique_names ) @@ -151,9 +147,7 @@ def declare_epilogue_arguments_field(pair): f"{type_name} {field_name}" if for_declare else f"{field_name}" ) - all_kernel_arg_id_and_names = ( - self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items() - ) + all_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.all_kernel_arg_id2unique_name.items() return ", ".join( ap.map( declare_epilogue_arguments_field, all_kernel_arg_id_and_names @@ -171,9 +165,7 @@ def declare_epilogue_arguments_field(pair): type_name = self.dtype2type_name[dtype] return f"{type_name} {field_name};" - generated_kernel_arg_id_and_names = ( - self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items() - ) + generated_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items() return f"\n{indent}".join( ap.map( declare_epilogue_arguments_field, @@ -190,9 +182,7 @@ def declare_epilogue_arguments_assign(pair): ) return f"{param_obj_name}.{field_name} = {var_name};" - generated_kernel_arg_id_and_names = ( - self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items() - ) + generated_kernel_arg_id_and_names = self.mut_kernel_arg_id_registry.generated_kernel_arg_id2unique_name.items() return f"\n{indent}".join( ap.map( declare_epilogue_arguments_assign, diff --git a/python/paddle/apy/matmul_pass/op_index_translator_util.py b/python/paddle/apy/matmul_pass/op_index_translator_util.py index 5aab66f06fb31c..8dce3bb6f5c35f 100644 --- a/python/paddle/apy/matmul_pass/op_index_translator_util.py +++ b/python/paddle/apy/matmul_pass/op_index_translator_util.py @@ -160,9 +160,9 @@ def get_dim_var_name(i): offset_expr = " + ".join( ap.map(lambda elts: " * ".join(elts), var_name_and_dims_list) ) - assert ( - len(self.output_properties[0].symbolic_shape) == 1 - ), "len(self.output_properties[0]) should be 1" + assert len(self.output_properties[0].symbolic_shape) == 1, ( + "len(self.output_properties[0]) should be 1" + ) return [ index_code_gen_value_util.IndexCodeGenValue([f"({offset_expr})"]) ] diff --git a/python/paddle/audio/datasets/esc50.py b/python/paddle/audio/datasets/esc50.py index 9980ad5895f888..46dbcda4fd6599 100644 --- a/python/paddle/audio/datasets/esc50.py +++ b/python/paddle/audio/datasets/esc50.py @@ -179,9 +179,9 @@ def __init__( archive: dict[str, str] | None = None, **kwargs: Any, ) -> None: - assert split in range( - 1, 6 - ), f'The selected split should be integer, and 1 <= split <= 5, but got {split}' + assert split in range(1, 6), ( + f'The selected split should be integer, and 1 <= split <= 5, but got {split}' + ) if archive is not None: self.archive = archive files, labels = self._get_data(mode, split) diff --git a/python/paddle/audio/datasets/tess.py b/python/paddle/audio/datasets/tess.py index def08bff92abcc..a3cff87cb1ada1 100644 --- a/python/paddle/audio/datasets/tess.py +++ b/python/paddle/audio/datasets/tess.py @@ -106,12 +106,12 @@ def __init__( archive: dict[str, str] | None = None, **kwargs: Any, ) -> None: - assert isinstance(n_folds, int) and ( - n_folds >= 1 - ), f'the n_folds should be integer and n_folds >= 1, but got {n_folds}' - assert split in range( - 1, n_folds + 1 - ), f'The selected split should be integer and should be 1 <= split <= {n_folds}, but got {split}' + assert isinstance(n_folds, int) and (n_folds >= 1), ( + f'the n_folds should be integer and n_folds >= 1, but got {n_folds}' + ) + assert split in range(1, n_folds + 1), ( + f'The selected split should be integer and should be 1 <= split <= {n_folds}, but got {split}' + ) if archive is not None: self.archive = archive files, labels = self._get_data(mode, n_folds, split) diff --git a/python/paddle/audio/features/layers.py b/python/paddle/audio/features/layers.py index cbd09e4498a121..25bf66112f7d84 100644 --- a/python/paddle/audio/features/layers.py +++ b/python/paddle/audio/features/layers.py @@ -410,9 +410,9 @@ def __init__( dtype: str = 'float32', ) -> None: super().__init__() - assert ( - n_mfcc <= n_mels - ), f'n_mfcc cannot be larger than n_mels: {n_mfcc} vs {n_mels}' + assert n_mfcc <= n_mels, ( + f'n_mfcc cannot be larger than n_mels: {n_mfcc} vs {n_mels}' + ) self._log_melspectrogram = LogMelSpectrogram( sr=sr, n_fft=n_fft, @@ -446,7 +446,5 @@ def forward(self, x: Tensor) -> Tensor: log_mel_feature = self._log_melspectrogram(x) mfcc = paddle.matmul( log_mel_feature.transpose((0, 2, 1)), self.dct_matrix - ).transpose( - (0, 2, 1) - ) # (B, n_mels, L) + ).transpose((0, 2, 1)) # (B, n_mels, L) return mfcc diff --git a/python/paddle/autograd/backward_mode.py b/python/paddle/autograd/backward_mode.py index fbeb073d9282e0..7d872f20ffa3f8 100644 --- a/python/paddle/autograd/backward_mode.py +++ b/python/paddle/autograd/backward_mode.py @@ -102,21 +102,21 @@ def check_tensors( if isinstance(in_out_list, (list, tuple)): assert len(in_out_list) > 0, f"{name} cannot be empty" for each_var in in_out_list: - assert isinstance( - each_var, paddle.Tensor - ), f"Elements of {name} must be paddle.Tensor" + assert isinstance(each_var, paddle.Tensor), ( + f"Elements of {name} must be paddle.Tensor" + ) return in_out_list else: - assert isinstance( - in_out_list, paddle.Tensor - ), f"{name} must be Tensor or list of Tensor" + assert isinstance(in_out_list, paddle.Tensor), ( + f"{name} must be Tensor or list of Tensor" + ) return [in_out_list] tensors = check_tensors(tensors, "tensors") - assert len(tensors) == len( - set(tensors) - ), "The argument 'tensors' of paddle.autograd.backward contains duplicate paddle.Tensor object." + assert len(tensors) == len(set(tensors)), ( + "The argument 'tensors' of paddle.autograd.backward contains duplicate paddle.Tensor object." + ) if grad_tensors is not None: if not isinstance(grad_tensors, (list, tuple)): @@ -124,16 +124,16 @@ def check_tensors( for each_tensor in grad_tensors: if each_tensor is not None: - assert isinstance( - each_tensor, paddle.Tensor - ), "The argument 'grad_tensors' of paddle.autograd.backward is invalid, it can be 'None', 'paddle.Tensor' or 'list[None/paddle.Tensor]'." + assert isinstance(each_tensor, paddle.Tensor), ( + "The argument 'grad_tensors' of paddle.autograd.backward is invalid, it can be 'None', 'paddle.Tensor' or 'list[None/paddle.Tensor]'." + ) else: grad_tensors = [] if len(grad_tensors) > 0: - assert len(tensors) == len( - grad_tensors - ), "The length of grad_tensors must be equal to tensors" + assert len(tensors) == len(grad_tensors), ( + "The length of grad_tensors must be equal to tensors" + ) assert isinstance(retain_graph, bool), "retain_graph must be True or False" diff --git a/python/paddle/autograd/backward_utils.py b/python/paddle/autograd/backward_utils.py index 3af103cb22ae24..a89b2dfd7068cb 100644 --- a/python/paddle/autograd/backward_utils.py +++ b/python/paddle/autograd/backward_utils.py @@ -652,7 +652,9 @@ def argument_to_value(while_op): assert len(while_op.as_while_op().block_arguments()) + 1 == len( while_op.operands_source() - ), "while op's block_arguments size + 1 should same to while op's operands_source size" + ), ( + "while op's block_arguments size + 1 should same to while op's operands_source size" + ) arg_to_value_map = ValueDict() value_to_arg_map = ValueDict() for arg, value in zip( diff --git a/python/paddle/autograd/ir_backward.py b/python/paddle/autograd/ir_backward.py index 52679332966888..b0aef4c8dcd2a6 100644 --- a/python/paddle/autograd/ir_backward.py +++ b/python/paddle/autograd/ir_backward.py @@ -585,9 +585,9 @@ def update_input_grad_map(op, input_grads, all_inputs): i += 1 def update_if_double_grad_input_grad_map(input_grads, all_inputs): - assert len(input_grads) == len( - all_inputs - ), "input_grads should same to all_inputs" + assert len(input_grads) == len(all_inputs), ( + "input_grads should same to all_inputs" + ) for input, input_grad in zip(all_inputs, input_grads): if isinstance(input_grad, list): state.value_to_valuegrad[input].append(input_grad) diff --git a/python/paddle/base/backward.py b/python/paddle/base/backward.py index 473a161702cefb..9b696fd1fc99f2 100755 --- a/python/paddle/base/backward.py +++ b/python/paddle/base/backward.py @@ -1775,9 +1775,9 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): if block.desc.has_var_recursive(grad_var_name.encode()): # meet invalid sum variables, remove the invalid operand. new_inputs.append(grad_var_name) - assert ( - len(new_inputs) > 0 - ), "After remove invalid variables, sum op have no inputs." + assert len(new_inputs) > 0, ( + "After remove invalid variables, sum op have no inputs." + ) op_desc.set_input("X", new_inputs) new_vars = set() @@ -2105,9 +2105,7 @@ def append_backward( loss, parameter_list, no_grad_set ) - grad_op_id_to_fwd_op = ( - {} - ) # for cuda graph usage, recording the mapping between grad op original id to fwd op + grad_op_id_to_fwd_op = {} # for cuda graph usage, recording the mapping between grad op original id to fwd op check_type( loss, 'loss', framework.Variable, 'paddle.static.append_backward' diff --git a/python/paddle/base/compiler.py b/python/paddle/base/compiler.py index 359060464acae1..60ba8fc80ce8cc 100644 --- a/python/paddle/base/compiler.py +++ b/python/paddle/base/compiler.py @@ -205,9 +205,9 @@ def _with_inference_optimize(self, config): Returns: self """ - assert ( - not self._is_inference - ), "Already compiled with inference, cannot be recompiled." + assert not self._is_inference, ( + "Already compiled with inference, cannot be recompiled." + ) assert any( [ @@ -238,9 +238,9 @@ def _compile_data_parallel(self, places, use_device, scope=None): assert scope is not None, "" self._local_scopes = [] - assert isinstance( - places, (list, tuple) - ), f"Currently, The places type can only be list or tuple, but the input type is {type(places)}." + assert isinstance(places, (list, tuple)), ( + f"Currently, The places type can only be list or tuple, but the input type is {type(places)}." + ) if self._build_strategy is None: self._build_strategy = BuildStrategy() @@ -255,9 +255,9 @@ def _compile_data_parallel(self, places, use_device, scope=None): ): tps = self._program._trainers_endpoints - assert self._build_strategy.num_trainers == len( - tps - ), "The trainer numbers is not equal to endpoint numbers." + assert self._build_strategy.num_trainers == len(tps), ( + "The trainer numbers is not equal to endpoint numbers." + ) self._build_strategy.trainers_endpoints = tps if self._program: @@ -270,9 +270,9 @@ def _compile_data_parallel(self, places, use_device, scope=None): ) if self._program is not None and self._program._enable_dgc: - assert ( - self._build_strategy.num_trainers * len(places) > 1 - ), "DGC is not available for single card training." + assert self._build_strategy.num_trainers * len(places) > 1, ( + "DGC is not available for single card training." + ) assert ( self._build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce @@ -363,9 +363,9 @@ def _get_places(self, place, place_list): has_set_place = place_list is not None if has_set_place: for p in place_list: - assert ( - p._type() == place._type() - ), "Place type not match. You may set wrong type of places." + assert p._type() == place._type(), ( + "Place type not match. You may set wrong type of places." + ) else: if isinstance(place, core.CUDAPlace): place_list = cuda_places() diff --git a/python/paddle/base/core.py b/python/paddle/base/core.py index dc434c2337f96b..f0bd0b089c2839 100644 --- a/python/paddle/base/core.py +++ b/python/paddle/base/core.py @@ -552,36 +552,36 @@ def _set_prim_backward_blacklist(*args): def _set_prim_backward_enabled(value: bool, print_flag: bool = False): - assert isinstance( - value, bool - ), f"value should be bool, but got {type(value)}" + assert isinstance(value, bool), ( + f"value should be bool, but got {type(value)}" + ) __set_bwd_prim_enabled(value) if _prim_return_log() or print_flag: print("backward prim enabled: ", bool(_is_bwd_prim_enabled())) def _set_prim_forward_enabled(value: bool, print_flag: bool = False): - assert isinstance( - value, bool - ), f"value should be bool, but got {type(value)}" + assert isinstance(value, bool), ( + f"value should be bool, but got {type(value)}" + ) __set_fwd_prim_enabled(value) if _prim_return_log() or print_flag: print("forward prim enabled: ", bool(_is_fwd_prim_enabled())) def set_prim_eager_enabled(value: bool, print_flag: bool = False): - assert isinstance( - value, bool - ), f"value should be bool, but got {type(value)}" + assert isinstance(value, bool), ( + f"value should be bool, but got {type(value)}" + ) __set_eager_prim_enabled(value) if _prim_return_log() or print_flag: print("eager prim enabled: ", bool(_is_eager_prim_enabled())) def _set_prim_all_enabled(value: bool, print_flag: bool = False): - assert isinstance( - value, bool - ), f"value should be bool, but got {type(value)}" + assert isinstance(value, bool), ( + f"value should be bool, but got {type(value)}" + ) __set_all_prim_enabled(value) if _prim_return_log() or print_flag: print( diff --git a/python/paddle/base/dygraph/base.py b/python/paddle/base/dygraph/base.py index 1d2ff80247e640..354d089847e826 100644 --- a/python/paddle/base/dygraph/base.py +++ b/python/paddle/base/dygraph/base.py @@ -831,14 +831,14 @@ def check_in_out(in_out_list, name): if isinstance(in_out_list, (list, tuple)): assert len(in_out_list) > 0, f"{name} cannot be empty" for each_var in in_out_list: - assert isinstance( - each_var, core.eager.Tensor - ), f"Elements of {name} must be Tensor" + assert isinstance(each_var, core.eager.Tensor), ( + f"Elements of {name} must be Tensor" + ) return in_out_list else: - assert isinstance( - in_out_list, core.eager.Tensor - ), f"{name} must be Tensor or list of Tensor" + assert isinstance(in_out_list, core.eager.Tensor), ( + f"{name} must be Tensor or list of Tensor" + ) return [in_out_list] outputs = check_in_out(outputs, 'outputs') @@ -850,16 +850,16 @@ def check_in_out(in_out_list, name): for each_var in grad_outputs: if each_var is not None: - assert isinstance( - each_var, core.eager.Tensor - ), "grad_outputs must be None, a Variable or a list containing None or Variables" + assert isinstance(each_var, core.eager.Tensor), ( + "grad_outputs must be None, a Variable or a list containing None or Variables" + ) else: grad_outputs = [] if len(grad_outputs) > 0: - assert len(grad_outputs) == len( - outputs - ), "The length of grad_outputs must be equal to outputs" + assert len(grad_outputs) == len(outputs), ( + "The length of grad_outputs must be equal to outputs" + ) if no_grad_vars is None: no_grad_vars = [] @@ -868,9 +868,9 @@ def check_in_out(in_out_list, name): elif isinstance(no_grad_vars, (list, tuple, set)): no_grad_vars = list(no_grad_vars) for var in no_grad_vars: - assert isinstance( - var, core.eager.Tensor - ), "no_grad_vars can only contains Tensor" + assert isinstance(var, core.eager.Tensor), ( + "no_grad_vars can only contains Tensor" + ) else: raise AssertionError( "no_grad_vars must be None, Tensor or list/tuple/set of Tensors" @@ -881,9 +881,9 @@ def check_in_out(in_out_list, name): if retain_graph is None: retain_graph = create_graph - assert isinstance( - retain_graph, bool - ), "retain_graph must be None, True or False" + assert isinstance(retain_graph, bool), ( + "retain_graph must be None, True or False" + ) assert isinstance(allow_unused, bool), "allow_unused must be True or False" diff --git a/python/paddle/base/dygraph/math_op_patch.py b/python/paddle/base/dygraph/math_op_patch.py index 86239b0835bf3a..2da6c8d7dbf8da 100644 --- a/python/paddle/base/dygraph/math_op_patch.py +++ b/python/paddle/base/dygraph/math_op_patch.py @@ -190,9 +190,9 @@ def _abs_(var: Tensor) -> Tensor: def _complex_(var: Tensor) -> complex: numel = np.prod(var.shape) - assert ( - numel == 1 - ), "only one element variable can be converted to complex." + assert numel == 1, ( + "only one element variable can be converted to complex." + ) assert var._is_initialized(), "variable's tensor is not initialized" if not var.is_complex(): var = var.astype('complex64') @@ -200,9 +200,9 @@ def _complex_(var: Tensor) -> complex: def _float_(var: Tensor) -> float: numel = np.prod(var.shape) - assert ( - numel == 1 - ), "only one element variable can be converted to float." + assert numel == 1, ( + "only one element variable can be converted to float." + ) assert var._is_initialized(), "variable's tensor is not initialized" if ( var.dtype == core.VarDesc.VarType.BF16 @@ -244,9 +244,9 @@ def _len_(var: Tensor) -> int: def _index_(var: Tensor) -> int: numel = np.prod(var.shape) - assert ( - numel == 1 - ), "only one element variable can be converted to python index." + assert numel == 1, ( + "only one element variable can be converted to python index." + ) assert var._is_initialized(), "variable's tensor is not initialized" if ( var.dtype == core.VarDesc.VarType.BF16 diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index b61d751a0f7090..b70aef0771eb28 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -206,26 +206,26 @@ def set_value( """ if id(self) == id(value): return - assert isinstance( - value, (np.ndarray, paddle.Tensor, dict, str) - ), "Variable set_value function, arguments type only support Variable, numpy, Tensor, dict, string." + assert isinstance(value, (np.ndarray, paddle.Tensor, dict, str)), ( + "Variable set_value function, arguments type only support Variable, numpy, Tensor, dict, string." + ) if self.is_dist(): - assert isinstance( - value, (np.ndarray, paddle.Tensor) - ), "For set_value function of dist tensor, arguments type only support numpy or Tensor." + assert isinstance(value, (np.ndarray, paddle.Tensor)), ( + "For set_value function of dist tensor, arguments type only support numpy or Tensor." + ) if isinstance(value, (dict, str)): - assert len(self) == len( - value - ), f"Variable length not match, Variable [ {self.name} ] need tensor with length {len(self)} but load set tensor with length {len(value)}" + assert len(self) == len(value), ( + f"Variable length not match, Variable [ {self.name} ] need tensor with length {len(self)} but load set tensor with length {len(value)}" + ) if isinstance(value, dict): self.value().set_vocab(value) else: self.value().set_string_list(value) else: - assert self.shape == list( - value.shape - ), f"Variable Shape not match, Variable [ {self.name} ] need tensor with shape {self.shape} but load set tensor with shape {value.shape}" + assert self.shape == list(value.shape), ( + f"Variable Shape not match, Variable [ {self.name} ] need tensor with shape {self.shape} but load set tensor with shape {value.shape}" + ) if isinstance(value, paddle.Tensor): dtype = value.dtype @@ -234,9 +234,9 @@ def set_value( else: dtype = convert_np_dtype_to_dtype_(value.dtype) - assert ( - self.dtype == dtype - ), f"Variable dtype not match, Variable [ {self.name} ] need tensor with dtype {self.dtype} but load tensor with dtype {dtype}" + assert self.dtype == dtype, ( + f"Variable dtype not match, Variable [ {self.name} ] need tensor with dtype {self.dtype} but load tensor with dtype {dtype}" + ) # NOTE(wuweilong): self could be Tensor, the subsequent behavior are defined in different files # if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc @@ -248,9 +248,14 @@ def set_value( ) # TODO: support reshard later - assert value.process_mesh == self.value().process_mesh or check_placements_equal( - value.placements, self.value().placements - ), f"process_mesh:{value.process_mesh} != {self.value().process_mesh} or placements:{value.placements} != {self.value().placements} not match" + assert ( + value.process_mesh == self.value().process_mesh + or check_placements_equal( + value.placements, self.value().placements + ) + ), ( + f"process_mesh:{value.process_mesh} != {self.value().process_mesh} or placements:{value.placements} != {self.value().placements} not match" + ) else: # calling set method bound for DistTensor value = paddle.distributed.shard_tensor( @@ -344,13 +349,13 @@ def backward( ) record_event.begin() if grad_tensor is not None: - assert isinstance( - grad_tensor, core.eager.Tensor - ), "The type of grad_tensor must be paddle.Tensor" + assert isinstance(grad_tensor, core.eager.Tensor), ( + "The type of grad_tensor must be paddle.Tensor" + ) - assert ( - grad_tensor.shape == self.shape - ), f"Tensor shape not match, Tensor of grad_tensor [ {grad_tensor.name} ] with shape {grad_tensor.shape} mismatch Tensor [ {self.name} ] with shape {self.shape}" + assert grad_tensor.shape == self.shape, ( + f"Tensor shape not match, Tensor of grad_tensor [ {grad_tensor.name} ] with shape {grad_tensor.shape} mismatch Tensor [ {self.name} ] with shape {self.shape}" + ) if grad_tensor is None: grad_tensor = [] @@ -643,9 +648,9 @@ def get_device_id(place: PlaceLike): if blocking is None: blocking = True else: - assert isinstance( - blocking, bool - ), "blocking value error, must be the True, False or None" + assert isinstance(blocking, bool), ( + "blocking value error, must be the True, False or None" + ) def transform(t, device, dtype, blocking): if device is None: @@ -996,9 +1001,9 @@ def block(self): def __nonzero__(self: Tensor) -> bool: # np.prod([]) -> np.float64, so use int numel = int(np.prod(self.shape)) - assert ( - numel == 1 - ), "When Variable is used as the condition of if/while , Variable can only contain one element." + assert numel == 1, ( + "When Variable is used as the condition of if/while , Variable can only contain one element." + ) # resolve the error issue in scenario of pipeline parallel # where some devices do not have this data, return True or False does not affect # the execution result in those devices, so currently we return False diff --git a/python/paddle/base/executor.py b/python/paddle/base/executor.py index 576e6d8783a7e5..4bcbf3979170f0 100755 --- a/python/paddle/base/executor.py +++ b/python/paddle/base/executor.py @@ -518,9 +518,9 @@ def _add_feed_fetch_ops( global_block, fetch_list, fetch_var_name, fetch_op ): for i, var in enumerate(fetch_list): - assert isinstance( - var, (Variable, str) - ), f"Wrong type for fetch_list[{i}]: {type(var)}" + assert isinstance(var, (Variable, str)), ( + f"Wrong type for fetch_list[{i}]: {type(var)}" + ) global_block.append_op( type=fetch_op, inputs={'X': [var]}, @@ -544,9 +544,9 @@ def _add_pir_fetch_ops(program, fetch_list, fetch_var_name): if need_fetch_info: with paddle.static.program_guard(program): for i, fetch_input in enumerate(need_fetch_info): - assert isinstance( - fetch_input, Value - ), f"Wrong type for fetch_list[{i}]: {type(fetch_input)}" + assert isinstance(fetch_input, Value), ( + f"Wrong type for fetch_list[{i}]: {type(fetch_input)}" + ) if is_startup_program: fetch_input = paddle._pir_ops.parameter(fetch_input.name) out = paddle._pir_ops.fetch( @@ -720,9 +720,9 @@ def _as_lodtensor(data, place, dtype=None): """ # NOTE(zhiqiu): convert python builtin, like float, int, and list, to numpy ndarray if not isinstance(data, np.ndarray): - assert ( - dtype is not None - ), 'The dtype should be given when feed data is not np.ndarray' + assert dtype is not None, ( + 'The dtype should be given when feed data is not np.ndarray' + ) dtype = convert_dtype(dtype) if np.isscalar(data): data = np.array(data).astype(dtype) @@ -2058,9 +2058,9 @@ def _run_impl( if hasattr(program, 'lr_scheduler'): from paddle.optimizer.lr import LRScheduler - assert isinstance( - program.lr_scheduler, LRScheduler - ), "must be LRScheduler" + assert isinstance(program.lr_scheduler, LRScheduler), ( + "must be LRScheduler" + ) lr_scheduler = program.lr_scheduler lr_value = lr_scheduler() lr_var = program.global_block().vars[lr_scheduler._var_name] @@ -2113,9 +2113,9 @@ def _run_impl( acp._auto_checkpoint(self, program) program._compile(scope, self.place) - assert ( - program._is_inference - ), f"Program must have _is_inference = True, but get {program._is_inference}" + assert program._is_inference, ( + f"Program must have _is_inference = True, but get {program._is_inference}" + ) return self._run_inference(program._executor, feed) def _run_pir_impl( @@ -2187,9 +2187,9 @@ def _run_pir_impl( if hasattr(program, 'lr_scheduler'): from paddle.optimizer.lr import LRScheduler - assert isinstance( - program.lr_scheduler, LRScheduler - ), "must be LRScheduler" + assert isinstance(program.lr_scheduler, LRScheduler), ( + "must be LRScheduler" + ) lr_scheduler = program.lr_scheduler lr_value = lr_scheduler() @@ -2822,9 +2822,9 @@ def _add_fetch_ops( global_block, fetch_list, fetch_var_name, fetch_op ): for i, var in enumerate(fetch_list): - assert isinstance( - var, (Variable, str) - ), f"Wrong type for fetch_list[{i}]: {type(var)}" + assert isinstance(var, (Variable, str)), ( + f"Wrong type for fetch_list[{i}]: {type(var)}" + ) global_block.append_op( type=fetch_op, inputs={'X': [var]}, diff --git a/python/paddle/base/framework.py b/python/paddle/base/framework.py index ac5ffbdf1b69ff..f3372b52310c63 100644 --- a/python/paddle/base/framework.py +++ b/python/paddle/base/framework.py @@ -706,9 +706,9 @@ def _dygraph_not_support_( func: Callable[_InputT, _RetT], ) -> Callable[_InputT, _RetT]: def __impl__(*args: _InputT.args, **kwargs: _InputT.kwargs) -> _RetT: - assert ( - not in_dygraph_mode() - ), f"We don't support {func.__name__} in dynamic graph mode" + assert not in_dygraph_mode(), ( + f"We don't support {func.__name__} in dynamic graph mode" + ) return func(*args, **kwargs) return __impl__ @@ -716,9 +716,9 @@ def __impl__(*args: _InputT.args, **kwargs: _InputT.kwargs) -> _RetT: def _dygraph_only_(func: Callable[_InputT, _RetT]) -> Callable[_InputT, _RetT]: def __impl__(*args: _InputT.args, **kwargs: _InputT.kwargs) -> _RetT: - assert ( - in_dygraph_mode() - ), f"We only support '{func.__name__}()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." + assert in_dygraph_mode(), ( + f"We only support '{func.__name__}()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." + ) return func(*args, **kwargs) return __impl__ @@ -730,9 +730,9 @@ def _non_static_only_( def __impl__(*args: _InputT.args, **kwargs: _InputT.kwargs) -> _RetT: from .dygraph.base import in_to_static_mode - assert ( - in_dygraph_mode() or in_to_static_mode() - ), f"We only support '{func.__name__}()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." + assert in_dygraph_mode() or in_to_static_mode(), ( + f"We only support '{func.__name__}()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." + ) return func(*args, **kwargs) return __impl__ @@ -740,9 +740,9 @@ def __impl__(*args: _InputT.args, **kwargs: _InputT.kwargs) -> _RetT: def _static_only_(func: Callable[_InputT, _RetT]) -> Callable[_InputT, _RetT]: def __impl__(*args: _InputT.args, **kwargs: _InputT.kwargs) -> _RetT: - assert ( - not in_dygraph_mode() - ), f"In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and '{func.__name__}()' is only supported in static graph mode. So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode." + assert not in_dygraph_mode(), ( + f"In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and '{func.__name__}()' is only supported in static graph mode. So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode." + ) return func(*args, **kwargs) return __impl__ @@ -1890,7 +1890,9 @@ def detach(self): assert ( self.type == core.VarDesc.VarType.SELECTED_ROWS or self.type == core.VarDesc.VarType.DENSE_TENSOR - ), "only support a variable with SELECTED_ROWS or DENSE_TENSOR to be detached" + ), ( + "only support a variable with SELECTED_ROWS or DENSE_TENSOR to be detached" + ) with unique_name.guard(self.block.program._name_generator): output = self.block.create_var( @@ -3120,9 +3122,9 @@ def instance(cls): return cls._instance def __init__(self): - assert not hasattr( - self.__class__, "_instance" - ), "Please use `instance()` to get OpProtoHolder object!" + assert not hasattr(self.__class__, "_instance"), ( + "Please use `instance()` to get OpProtoHolder object!" + ) op_protos = get_all_op_protos() self.op_proto_map = {} for proto in op_protos: @@ -3362,9 +3364,9 @@ def find_name(var_list, name): if inputs is not None: for in_proto in proto.inputs: found = find_name(inputs, in_proto.name) - assert ( - found or in_proto.dispensable - ), f"Input {in_proto.name} not found" + assert found or in_proto.dispensable, ( + f"Input {in_proto.name} not found" + ) if found: in_args = inputs[in_proto.name] if not isinstance(in_args, (list, tuple)): @@ -3555,9 +3557,9 @@ def _to_readable_code(self, skip_op_callstack=True): ... outputs={"Out": [var]}) >>> print(new_op._to_readable_code()) """ - assert isinstance( - skip_op_callstack, bool - ), f"skip_op_callstack parameter's type is error, expect bool, received {type(skip_op_callstack)}" + assert isinstance(skip_op_callstack, bool), ( + f"skip_op_callstack parameter's type is error, expect bool, received {type(skip_op_callstack)}" + ) outputs_str = "{" for i in range(0, len(self.output_names)): outputs_str += f"{self.output_names[i]}=" @@ -3939,9 +3941,9 @@ def _var_attr(self, name): Variable: the Variable attribute. """ attr_type = self.desc.attr_type(name, True) - assert ( - attr_type == core.AttrType.VAR - ), f"Required type attr({name}) is Variable, but received {attr_type}" + assert attr_type == core.AttrType.VAR, ( + f"Required type attr({name}) is Variable, but received {attr_type}" + ) attr_var_name = self.desc.attr(name, True).name() return self.block._var_recursive(attr_var_name) @@ -3956,9 +3958,9 @@ def _vars_attr(self, name): Variables: the Variables attribute. """ attr_type = self.desc.attr_type(name, True) - assert ( - attr_type == core.AttrType.VARS - ), f"Required type attr({name}) is list[Variable], but received {attr_type}" + assert attr_type == core.AttrType.VARS, ( + f"Required type attr({name}) is list[Variable], but received {attr_type}" + ) attr_vars = [ self.block._var_recursive(var.name()) for var in self.desc.attr(name, True) @@ -4350,9 +4352,9 @@ def _to_readable_code(self, skip_op_callstack=True): ... outputs={"Out": [new_var]}) >>> print(cur_block._to_readable_code()) """ - assert isinstance( - skip_op_callstack, bool - ), f"skip_op_callstack parameter's type is error, expect bool, received {type(skip_op_callstack)}" + assert isinstance(skip_op_callstack, bool), ( + f"skip_op_callstack parameter's type is error, expect bool, received {type(skip_op_callstack)}" + ) block_str = f"{{ // block_idx:{self.idx} parent_idx:{self.parent_idx} forward_idx:{self.forward_block_idx} backward_idx:{self.backward_block_idx}\n" for var in list(self.vars.values()): block_str += f" {var._to_readable_code()}\n" @@ -5086,9 +5088,9 @@ def __init__(self, node): Args: node(core.Node): C++ Node. """ - assert isinstance( - node, core.Node - ), "node must be the instance of core.Node." + assert isinstance(node, core.Node), ( + "node must be the instance of core.Node." + ) self.node = node def name(self): @@ -5264,9 +5266,9 @@ def __init__(self, node): Args: node(core.Node): C++ Node. """ - assert ( - isinstance(node, core.Node) and node.is_var() - ), "node must be the instance of core.Node and it must be a variable node." + assert isinstance(node, core.Node) and node.is_var(), ( + "node must be the instance of core.Node and it must be a variable node." + ) super().__init__(node) self.node = node @@ -5277,9 +5279,9 @@ def set_shape(self, shape): Args: shape(list): shape to be set. """ - assert ( - self.node.var() is not None - ), "The node variable description can not be None." + assert self.node.var() is not None, ( + "The node variable description can not be None." + ) self.node.var().set_shape(shape) def persistable(self): @@ -5289,9 +5291,9 @@ def persistable(self): Returns: bool: indicate whether the variable is persistable. """ - assert ( - self.node.var() is not None - ), "The node variable description can not be None." + assert self.node.var() is not None, ( + "The node variable description can not be None." + ) return self.node.var().persistable() def type(self): @@ -5301,9 +5303,9 @@ def type(self): Returns: core.VarDesc.VarType: the variable type. """ - assert ( - self.node.var() is not None - ), "The node variable description can not be None." + assert self.node.var() is not None, ( + "The node variable description can not be None." + ) return self.node.var().type() def dtype(self): @@ -5313,9 +5315,9 @@ def dtype(self): Returns: core.VarDesc.VarType: the variable data type. """ - assert ( - self.node.var() is not None - ), "The node variable description can not be None." + assert self.node.var() is not None, ( + "The node variable description can not be None." + ) return self.node.var().dtype() def shape(self): @@ -5325,9 +5327,9 @@ def shape(self): Returns: list: the variable shape. """ - assert ( - self.node.var() is not None - ), "The node variable description can not be None." + assert self.node.var() is not None, ( + "The node variable description can not be None." + ) return self.node.var().shape() @property @@ -5363,9 +5365,9 @@ def __init__(self, node): Args: node(core.Node): C++ Node. """ - assert ( - isinstance(node, core.Node) and node.is_op() - ), "node must be the instance of core.Node and it must be a operator node." + assert isinstance(node, core.Node) and node.is_op(), ( + "node must be the instance of core.Node and it must be a operator node." + ) super().__init__(node) self.node = node @@ -5377,9 +5379,9 @@ def rename_input(self, old_input_name, new_input_name): old_input_name(str): the old input name. new_input_name(str): the new input name. """ - assert ( - self.node.op() is not None - ), "The node operator description can not be None." + assert self.node.op() is not None, ( + "The node operator description can not be None." + ) self.node.op()._rename_input(old_input_name, new_input_name) def rename_output(self, old_output_name, new_output_name): @@ -5390,9 +5392,9 @@ def rename_output(self, old_output_name, new_output_name): old_output_name(str): the old output name. new_output_name(str): the new output name. """ - assert ( - self.node.op() is not None - ), "The node operator description can not be None." + assert self.node.op() is not None, ( + "The node operator description can not be None." + ) self.node.op()._rename_output(old_output_name, new_output_name) def input(self, name): @@ -5405,9 +5407,9 @@ def input(self, name): Returns: list(str): the argument name list. """ - assert ( - self.node.op() is not None - ), "The node operator description can not be None." + assert self.node.op() is not None, ( + "The node operator description can not be None." + ) return self.node.op().input(name) def output(self, name): @@ -5420,9 +5422,9 @@ def output(self, name): Returns: list(str): the argument name list. """ - assert ( - self.node.op() is not None - ), "The node operator description can not be None." + assert self.node.op() is not None, ( + "The node operator description can not be None." + ) return self.node.op().output(name) def set_type(self, new_type): @@ -5432,9 +5434,9 @@ def set_type(self, new_type): Args: new_type(str): new operator type to be set. """ - assert ( - self.node.op() is not None - ), "The node operator description can not be None." + assert self.node.op() is not None, ( + "The node operator description can not be None." + ) return self.node.op().set_type(new_type) def set_attr(self, name, val): @@ -5451,9 +5453,9 @@ def _update_desc_attr(self, name, val): """ Update the value of the op desc's attribute by attribute's name. """ - assert ( - self.node.op() is not None - ), "The node operator description can not be None." + assert self.node.op() is not None, ( + "The node operator description can not be None." + ) desc = self.node.op() if isinstance(val, Variable): desc.set_var_attr(name, val.desc) @@ -5475,9 +5477,9 @@ def input_arg_names(self): Returns: list(str): input arguments' names of this op node. """ - assert ( - self.node.op() is not None - ), "The node operator description can not be None." + assert self.node.op() is not None, ( + "The node operator description can not be None." + ) return self.node.op().input_arg_names() def output_arg_names(self): @@ -5487,9 +5489,9 @@ def output_arg_names(self): Returns: list(str): output arguments' names of this op node. """ - assert ( - self.node.op() is not None - ), "The node operator description can not be None." + assert self.node.op() is not None, ( + "The node operator description can not be None." + ) return self.node.op().output_arg_names() @property @@ -5529,9 +5531,9 @@ def __init__(self, graph, for_test=False): graph(core.Graph): C++ Graph. for_test(bool): True for the test graph and false for the train graph. """ - assert isinstance( - graph, core.Graph - ), "graph must be the instance of core.Graph." + assert isinstance(graph, core.Graph), ( + "graph must be the instance of core.Graph." + ) self.graph = graph self._for_test = for_test @@ -5719,7 +5721,9 @@ def update_input_link(self, old_input_node, new_input_node, op_node): old_input_node.node in self.graph.nodes() and new_input_node.node in self.graph.nodes() and op_node.node in self.graph.nodes() - ), "The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes." + ), ( + "The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes." + ) old_input_node.remove_output(op_node) op_node.remove_input(old_input_node) new_input_node.append_output(op_node) @@ -5739,7 +5743,9 @@ def update_output_link(self, old_output_node, new_output_node, op_node): old_output_node.node in self.graph.nodes() and new_output_node.node in self.graph.nodes() and op_node.node in self.graph.nodes() - ), "The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes." + ), ( + "The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes." + ) old_output_node.remove_input(op_node) op_node.remove_output(old_output_node) new_output_node.append_input(op_node) @@ -5754,12 +5760,12 @@ def link_to(self, node_in, node_out): node_in(IrNode): the input node. node_out(IrNode): the output node. """ - assert ( - node_in.node in self.graph.nodes() - ), f"node_in({node_in.node.name()}) must be in the graph nodes." - assert ( - node_out.node in self.graph.nodes() - ), f"node_out({node_out.node.name()}) must be in the graph nodes." + assert node_in.node in self.graph.nodes(), ( + f"node_in({node_in.node.name()}) must be in the graph nodes." + ) + assert node_out.node in self.graph.nodes(), ( + f"node_out({node_out.node.name()}) must be in the graph nodes." + ) node_in.append_output(node_out) node_out.append_input(node_in) @@ -5920,9 +5926,9 @@ def _find_node_by_name(self, nodes, node_name): for n in nodes: if n.name() == node_name: target_node = n - assert ( - target_node is not None - ), f"Cannot find the target node ({node_name})in the giving set." + assert target_node is not None, ( + f"Cannot find the target node ({node_name})in the giving set." + ) return target_node def _update_desc_attr(self, desc, name, val): @@ -6382,9 +6388,9 @@ def _to_readable_code(self, skip_op_callstack=True): ... outputs={"Out": [new_var]}) >>> print(cur_program._to_readable_code()) """ - assert isinstance( - skip_op_callstack, bool - ), f"skip_op_callstack parameter's type is error, expect bool, received {type(skip_op_callstack)}" + assert isinstance(skip_op_callstack, bool), ( + f"skip_op_callstack parameter's type is error, expect bool, received {type(skip_op_callstack)}" + ) program_str = "" for block in self.blocks: program_str += block._to_readable_code(skip_op_callstack) @@ -6423,12 +6429,12 @@ def to_string(self, throw_on_error, with_details=False): >>> print("program string without detail: {}".format(prog_string)) >>> print("program string with detail: {}".format(prog_string_with_details)) """ - assert isinstance( - throw_on_error, bool - ), f"The type of throw_on_error parameter is wrong, expected bool, but received {type(throw_on_error)}." - assert isinstance( - with_details, bool - ), f"The type of with_details parameter is wrong, expected bool, but received {type(with_details)}." + assert isinstance(throw_on_error, bool), ( + f"The type of throw_on_error parameter is wrong, expected bool, but received {type(throw_on_error)}." + ) + assert isinstance(with_details, bool), ( + f"The type of with_details parameter is wrong, expected bool, but received {type(with_details)}." + ) if with_details: res_str = "" @@ -7814,9 +7820,9 @@ def set_init_func(self, obj): @dygraph_only def initialize(self): - assert ( - self._init_func is not None - ), "Required self._init_func is not None, but received None." + assert self._init_func is not None, ( + "Required self._init_func is not None, but received None." + ) self._init_func(self, None) # clear function handle to release resource self._init_func = None @@ -7838,9 +7844,9 @@ def _create_init_op(self, block): """ Call init_op_creator function to create initializer operation in block. """ - assert ( - self._init_op_creator is not None - ), "Required self._init_op_creator is not None, but received None." + assert self._init_op_creator is not None, ( + "Required self._init_op_creator is not None, but received None." + ) self._init_op_creator(self, block) def __str__(self): @@ -8249,12 +8255,12 @@ def _cuda_graph_guard(cuda_graph_attr=None): cuda_graph_attr(str|None): The cuda graph attr with the format of: cuda_graph_capture_mode;memory_pool_id;cuda_graph_id """ - assert ( - not in_dygraph_mode() - ), "cuda_graph_guard only works under static graph mode" - assert ( - core.is_compiled_with_cuda() - ), "cuda_graph_guard context can be only used when Paddle is compiled with cuda" + assert not in_dygraph_mode(), ( + "cuda_graph_guard only works under static graph mode" + ) + assert core.is_compiled_with_cuda(), ( + "cuda_graph_guard context can be only used when Paddle is compiled with cuda" + ) pre_mode = _switch_cuda_graph_mode(cuda_graph_attr) try: yield diff --git a/python/paddle/base/incubate/checkpoint/auto_checkpoint.py b/python/paddle/base/incubate/checkpoint/auto_checkpoint.py index 6fb4ef6074c5f9..b98f850cdd8ac8 100644 --- a/python/paddle/base/incubate/checkpoint/auto_checkpoint.py +++ b/python/paddle/base/incubate/checkpoint/auto_checkpoint.py @@ -62,9 +62,9 @@ def _get_logger(log_level, name="auto_checkpoint"): def _thread_checker(): - assert ( - current_thread().name == "MainThread" - ), "auto checkpoint must run under main thread" + assert current_thread().name == "MainThread", ( + "auto checkpoint must run under main thread" + ) class AutoCheckpointChecker: @@ -282,9 +282,9 @@ def __init__( self._save_checkpoint_inter = checkpoint_inter else: self._save_checkpoint_inter = self._checker.save_checkpoint_inter - assert ( - self._save_checkpoint_inter >= 0 - ), f"checkpoint inter:{self._save_checkpoint_inter} must >=0" + assert self._save_checkpoint_inter >= 0, ( + f"checkpoint inter:{self._save_checkpoint_inter} must >=0" + ) self._last_checkpoint_time = time.time() self._load_cp_nos = None @@ -446,9 +446,9 @@ def next(self): if self._max_epoch_num < 0: self._max_epoch_num = sys.maxint - assert ( - self._epoch_no >= -1 - ), f"self._epoch_no:{self._epoch_no} must >=-1" + assert self._epoch_no >= -1, ( + f"self._epoch_no:{self._epoch_no} must >=-1" + ) self._last_checkpoint_time = time.time() start = self._epoch_no + 1 @@ -669,9 +669,9 @@ def _auto_checkpoint(exe, prog): ) if g_train_epoch_range.restored_from == CONST_CHECKPOINT: - assert ( - key in exe_status - ), f"when restored key:{key} must be in train_epoch_range:{g_train_epoch_range}" + assert key in exe_status, ( + f"when restored key:{key} must be in train_epoch_range:{g_train_epoch_range}" + ) t = None if key in exe_status: diff --git a/python/paddle/base/incubate/checkpoint/checkpoint_saver.py b/python/paddle/base/incubate/checkpoint/checkpoint_saver.py index fc20b6300126aa..dc9d1bee8230f4 100644 --- a/python/paddle/base/incubate/checkpoint/checkpoint_saver.py +++ b/python/paddle/base/incubate/checkpoint/checkpoint_saver.py @@ -94,9 +94,9 @@ def save_checkpoint( if not local_fs.is_exist(cache_path): local_fs.mkdirs(cache_path) else: - assert local_fs.is_dir( - cache_path - ), f"cache path:{cache_path} must be a directory" + assert local_fs.is_dir(cache_path), ( + f"cache path:{cache_path} must be a directory" + ) saved_path = cache_path diff --git a/python/paddle/base/lod_tensor.py b/python/paddle/base/lod_tensor.py index edbd935670b3bf..8b4ddcdc8052d4 100644 --- a/python/paddle/base/lod_tensor.py +++ b/python/paddle/base/lod_tensor.py @@ -84,9 +84,9 @@ def create_lod_tensor(data, recursive_seq_lens, place): new_recursive_seq_lens.append(len(seq)) converter.feed(seq) - assert [ - new_recursive_seq_lens - ] == recursive_seq_lens, "data and recursive_seq_lens do not match" + assert [new_recursive_seq_lens] == recursive_seq_lens, ( + "data and recursive_seq_lens do not match" + ) arr = np.array(converter.data) diff --git a/python/paddle/base/reader.py b/python/paddle/base/reader.py index 21637a93ca9fa5..501046c3d3120a 100644 --- a/python/paddle/base/reader.py +++ b/python/paddle/base/reader.py @@ -655,9 +655,9 @@ def _reset(self): def __iter__(self): assert self.iterable, "DataLoader is not iterable" - assert ( - self._batch_reader is not None - ), "Data source of DataLoader has not set yet" + assert self._batch_reader is not None, ( + "Data source of DataLoader has not set yet" + ) self._init_iterable() self._start() @@ -797,9 +797,9 @@ def set_batch_generator(self, reader, places=None): if places is None: places = _current_expected_place() self._places = _convert_places(places) - assert ( - len(self._places) == 1 - ), "Number of places must be 1 in imperative mode" + assert len(self._places) == 1, ( + "Number of places must be 1 in imperative mode" + ) return self @@ -972,9 +972,9 @@ def iterable(self): def __iter__(self): assert self.iterable, "DataLoader is not iterable" - assert ( - self._tensor_reader is not None - ), "Data source of DataLoader has not set yet" + assert self._tensor_reader is not None, ( + "Data source of DataLoader has not set yet" + ) self._init_iterable() self._start() @@ -995,15 +995,15 @@ def __next__(self): raise def start(self): - assert ( - not self._iterable - ), "start() cannot be called when DataLoader is iterable" + assert not self._iterable, ( + "start() cannot be called when DataLoader is iterable" + ) self._start() def reset(self): - assert ( - not self._iterable - ), "reset() cannot be called when DataLoader is iterable" + assert not self._iterable, ( + "reset() cannot be called when DataLoader is iterable" + ) self._reset() def _start(self): @@ -1118,9 +1118,9 @@ def set_batch_generator(self, reader, places=None): places = _get_paddle_place(places) self._tensor_reader = reader if self._iterable: - assert ( - places is not None - ), "Places cannot be None when DataLoader is iterable" + assert places is not None, ( + "Places cannot be None when DataLoader is iterable" + ) self._places = _convert_places(places) else: if places is not None: @@ -1623,9 +1623,9 @@ def __init__(self, dataset, places, drop_last): assert isinstance( dataset, paddle.distributed.fleet.dataset.DatasetBase ), "dataset must be type of DatasetBase" - assert ( - not in_dygraph_mode() - ), "DatasetLoader is not supported in dygraph mode yet" + assert not in_dygraph_mode(), ( + "DatasetLoader is not supported in dygraph mode yet" + ) if isinstance(places, (list, tuple)): places = _get_paddle_place_list(places) else: @@ -1633,9 +1633,9 @@ def __init__(self, dataset, places, drop_last): thread_num = len(places) - assert ( - len(dataset.filelist) >= thread_num - ), f"Filelist number of dataset {len(dataset.filelist)} must be not less than place number {thread_num}" + assert len(dataset.filelist) >= thread_num, ( + f"Filelist number of dataset {len(dataset.filelist)} must be not less than place number {thread_num}" + ) if dataset.thread_num != 0 and dataset.thread_num != thread_num: logging.warning( diff --git a/python/paddle/base/variable_index.py b/python/paddle/base/variable_index.py index 8e2767917dab2f..242850860a5671 100644 --- a/python/paddle/base/variable_index.py +++ b/python/paddle/base/variable_index.py @@ -147,9 +147,9 @@ def _setitem_for_tensor_array(var, item, value): from .framework import Variable - assert ( - not paddle.in_dynamic_mode() - ), "setitem for tensor_array must be called in static graph mode." + assert not paddle.in_dynamic_mode(), ( + "setitem for tensor_array must be called in static graph mode." + ) if isinstance(item, (Variable, paddle.pir.Value, int)): from paddle.jit.dy2static.convert_operators import to_static_variable from paddle.tensor import array_write diff --git a/python/paddle/cinn/auto_schedule/cost_model/xgb_cost_model.py b/python/paddle/cinn/auto_schedule/cost_model/xgb_cost_model.py index de8796bb7c18ba..b0538b4b0b5bfc 100644 --- a/python/paddle/cinn/auto_schedule/cost_model/xgb_cost_model.py +++ b/python/paddle/cinn/auto_schedule/cost_model/xgb_cost_model.py @@ -78,9 +78,9 @@ def save(self, path): Args: path(str): path to save """ - assert ( - self.booster is not None - ), "Calling save on a XgbCostModel not been trained" + assert self.booster is not None, ( + "Calling save on a XgbCostModel not been trained" + ) self.booster.save_model(path) def load(self, path): diff --git a/python/paddle/cinn/compiler/compute_code_generator.py b/python/paddle/cinn/compiler/compute_code_generator.py index 381290015b3c21..a25f6eb8d55bdc 100644 --- a/python/paddle/cinn/compiler/compute_code_generator.py +++ b/python/paddle/cinn/compiler/compute_code_generator.py @@ -167,10 +167,10 @@ def visit_Assign(self, node): ): return "no compute" - assert ( - len(node.targets) == 1 - ), "Unsupported targets is a \ + assert len(node.targets) == 1, ( + "Unsupported targets is a \ list of nodes, like 'a = b = c'" + ) lhs = node.targets[0] # 1 parse RHS diff --git a/python/paddle/cinn/compiler/expr_executor.py b/python/paddle/cinn/compiler/expr_executor.py index d22163883e9f9e..0ced8208e90c7e 100644 --- a/python/paddle/cinn/compiler/expr_executor.py +++ b/python/paddle/cinn/compiler/expr_executor.py @@ -111,9 +111,9 @@ def eval_UnaryOp(self, fields): return AST2CINN[type(fields["op"])].make(*args) def eval_Compare(self, fields): - assert ( - len(fields["ops"]) == 1 - ), "Only binary comparison symbols are supported. Expressions such as '1 <= a < 10' are not supported." + assert len(fields["ops"]) == 1, ( + "Only binary comparison symbols are supported. Expressions such as '1 <= a < 10' are not supported." + ) args = [ self.exec_expr(fields["left"]), self.exec_expr(fields["comparators"][0]), diff --git a/python/paddle/cinn/compiler/schedule_code_generator.py b/python/paddle/cinn/compiler/schedule_code_generator.py index 52fb65e060b730..af73caee15aa81 100644 --- a/python/paddle/cinn/compiler/schedule_code_generator.py +++ b/python/paddle/cinn/compiler/schedule_code_generator.py @@ -57,9 +57,9 @@ def parse(self): return self.cinn_llir_func def visit_For(self, node): - assert isinstance( - node.target, ast.Name - ), "Current only support range() to make ForLoop" + assert isinstance(node.target, ast.Name), ( + "Current only support range() to make ForLoop" + ) with self.variable_table: self.loop_var_stack.append(node.target) self.generic_visit(node)