diff --git a/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc b/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc index 8da1ef87814deb..5e2be03108294c 100644 --- a/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc +++ b/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc @@ -176,7 +176,7 @@ bool ComputeInterceptor::IsInputReady() { flag = flag && (ready_size_map.at(i) != 0); } if (flag) { - if (scope_id_to_finish_flag.empty()) { + if (scope_id_to_finish_flag.empty()) { // NOLINT cur_scope_id_ = i; return true; } else if (scope_id_to_finish_flag.find(i) != @@ -303,7 +303,7 @@ void ComputeInterceptor::RunOps() { cur_scope_id_)); } - if (!cores_.empty()) { + if (!cores_.empty()) { // NOLINT cores_[cur_scope_id_]->Run(/*feed_names=*/{}, /*need_fetch=*/false); } else { for (auto op : node_->ops()) { diff --git a/paddle/fluid/distributed/fleet_executor/dist_model.cc b/paddle/fluid/distributed/fleet_executor/dist_model.cc index a1fd38295319ed..4c19069b33705b 100644 --- a/paddle/fluid/distributed/fleet_executor/dist_model.cc +++ b/paddle/fluid/distributed/fleet_executor/dist_model.cc @@ -215,7 +215,7 @@ bool DistModel::Init() { } bool DistModel::PreparePlace() { - if (config_.place == "GPU") { + if (config_.place == "GPU") { // NOLINT place_ = paddle::platform::CUDAPlace(config_.device_id); } else if (config_.place == "CPU") { place_ = paddle::platform::CPUPlace(); diff --git a/paddle/fluid/eager/custom_operator/custom_operator_utils.cc b/paddle/fluid/eager/custom_operator/custom_operator_utils.cc index b843e081c29bed..a9272053346a77 100644 --- a/paddle/fluid/eager/custom_operator/custom_operator_utils.cc +++ b/paddle/fluid/eager/custom_operator/custom_operator_utils.cc @@ -558,7 +558,7 @@ std::vector> RunInferShapeFn( out_dims = RunInferShapeFunc(ctx, infer_shape_func, inputs, outputs, inplace_map); } else { - if (is_forward) { + if (is_forward) { // NOLINT out_dims = RunDefaultInferShapeFunc(ctx, inputs, outputs, inplace_map); } else { out_dims = @@ -592,7 +592,7 @@ std::vector> RunInferDtypeFn( out_dtypes = RunInferDtypeFunc(ctx, infer_dtype_func, inputs, outputs, inplace_map); } else { - if (is_forward) { + if (is_forward) { // NOLINT out_dtypes = RunDefaultInferDtypeFunc(ctx, inputs, outputs, inplace_map); } else { out_dtypes = diff --git a/paddle/fluid/eager/grad_tensor_holder.cc b/paddle/fluid/eager/grad_tensor_holder.cc index dac55f8f5462f1..47f41b5a4f93b7 100644 --- a/paddle/fluid/eager/grad_tensor_holder.cc +++ b/paddle/fluid/eager/grad_tensor_holder.cc @@ -79,7 +79,7 @@ void GradTensorHolder::CopyValueFromTensor(size_t slot_id, // Create new tensor->impl and fill it with 1.0 if (t.defined()) { // Fill 1.0, use full to support complex, one_like don't support it. - if (t.is_dense_tensor()) { + if (t.is_dense_tensor()) { // NOLINT buffer_[slot_id][rank] = paddle::experimental::full(t.shape(), 1, t.dtype(), t.place()); } else if (t.is_sparse_csr_tensor() || t.is_sparse_coo_tensor()) { diff --git a/paddle/fluid/framework/data_feed.cc b/paddle/fluid/framework/data_feed.cc index 470b4962ba34c0..2d1b9842e82a31 100644 --- a/paddle/fluid/framework/data_feed.cc +++ b/paddle/fluid/framework/data_feed.cc @@ -1813,7 +1813,7 @@ int PaddleBoxDataFeed::Next() { this->batch_size_ = index; VLOG(3) << "pv_batch_size_=" << this->batch_size_ << ", thread_id=" << thread_id_; - if (this->batch_size_ != 0) { + if (this->batch_size_ != 0) { // NOLINT PutToFeedVec(pv_vec); } else { VLOG(3) << "finish reading, output_pv_channel_ size=" @@ -2113,7 +2113,7 @@ void SlotRecordInMemoryDataFeed::Init(const DataFeedDesc& data_feed_desc) { finish_init_ = true; input_type_ = data_feed_desc.input_type(); size_t pos = pipe_command_.find(".so"); - if (pos != std::string::npos) { + if (pos != std::string::npos) { // NOLINT pos = pipe_command_.rfind('|'); if (pos == std::string::npos) { so_parser_name_ = pipe_command_; @@ -2129,7 +2129,7 @@ void SlotRecordInMemoryDataFeed::Init(const DataFeedDesc& data_feed_desc) { #if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS) gpu_graph_data_generator_.SetConfig(data_feed_desc); #endif - if (gpu_graph_mode_) { + if (gpu_graph_mode_) { // NOLINT train_mode_ = true; } else { train_mode_ = data_feed_desc.graph_config().gpu_graph_training(); @@ -2780,7 +2780,7 @@ int SlotRecordInMemoryDataFeed::Next() { this->batch_size_ = batch.second; VLOG(3) << "batch_size_=" << this->batch_size_ << ", thread_id=" << thread_id_; - if (this->batch_size_ != 0) { + if (this->batch_size_ != 0) { // NOLINT PutToFeedVec(&records_[batch.first], this->batch_size_); } else { VLOG(3) << "finish reading for heterps, batch size zero, thread_id=" diff --git a/paddle/fluid/framework/data_set.cc b/paddle/fluid/framework/data_set.cc index f31cba2f1ecbbe..4c15c26359aeb5 100644 --- a/paddle/fluid/framework/data_set.cc +++ b/paddle/fluid/framework/data_set.cc @@ -966,7 +966,7 @@ void DatasetImpl::DynamicAdjustChannelNum(int channel_num, CHECK(output_channels_data_size == 0); // NOLINT cur_channel = 1; } - if (cur_channel == 0) { + if (cur_channel == 0) { // NOLINT origin_channels = &multi_output_channel_; other_channels = &multi_consume_channel_; origin_pv_channels = &multi_pv_output_; @@ -1111,8 +1111,8 @@ void DatasetImpl::CreateReaders() { if (input_pv_channel_ != nullptr) { readers_[i]->SetInputPvChannel(input_pv_channel_.get()); } - if (cur_channel_ == 0 && - static_cast(channel_idx) < multi_output_channel_.size()) { + if (cur_channel_ == 0 && static_cast(channel_idx) < + multi_output_channel_.size()) { // NOLINT readers_[i]->SetOutputChannel(multi_output_channel_[channel_idx].get()); readers_[i]->SetConsumeChannel(multi_consume_channel_[channel_idx].get()); readers_[i]->SetOutputPvChannel(multi_pv_output_[channel_idx].get()); @@ -1722,7 +1722,7 @@ void MultiSlotDataset::PreprocessChannel( const std::set& slots_to_replace, std::unordered_set& index_slots) { // NOLINT int out_channel_size = 0; - if (cur_channel_ == 0) { + if (cur_channel_ == 0) { // NOLINT for (auto& item : multi_output_channel_) { out_channel_size += static_cast(item->Size()); } @@ -1757,7 +1757,7 @@ void MultiSlotDataset::PreprocessChannel( input_channel_->ReadAll(slots_shuffle_original_data_); } else { CHECK(out_channel_size > 0); // NOLINT - if (cur_channel_ == 0) { + if (cur_channel_ == 0) { // NOLINT for (auto& item : multi_output_channel_) { std::vector vec_data; item->Close(); @@ -1792,7 +1792,7 @@ void MultiSlotDataset::PreprocessChannel( } else { // if already have original data for slots shuffle, clear channel input_channel_->Clear(); - if (cur_channel_ == 0) { + if (cur_channel_ == 0) { // NOLINT for (auto& item : multi_output_channel_) { if (!item) { continue; @@ -1809,7 +1809,7 @@ void MultiSlotDataset::PreprocessChannel( } } int end_size = 0; - if (cur_channel_ == 0) { + if (cur_channel_ == 0) { // NOLINT for (auto& item : multi_output_channel_) { if (!item) { continue; diff --git a/paddle/fluid/framework/details/nan_inf_utils_detail.cc b/paddle/fluid/framework/details/nan_inf_utils_detail.cc index 551a10f1ccacd1..d18cee16b19a64 100644 --- a/paddle/fluid/framework/details/nan_inf_utils_detail.cc +++ b/paddle/fluid/framework/details/nan_inf_utils_detail.cc @@ -264,7 +264,7 @@ void CheckOpHasNanOrInf(const framework::OperatorBase& op, if (IsSkipOp(op)) return; - if (op_var_nan_inf_white_list().count(op.Type()) == 0) { + if (op_var_nan_inf_white_list().count(op.Type()) == 0) { // NOLINT // NOTE. vname may destruct in the end of this func. for (auto& vname : op.OutputVars(true)) { auto* var = exec_scope.FindVar(vname); diff --git a/paddle/fluid/framework/dist_multi_trainer.cc b/paddle/fluid/framework/dist_multi_trainer.cc index 6fd95267ef6ab3..119b6e569cef32 100644 --- a/paddle/fluid/framework/dist_multi_trainer.cc +++ b/paddle/fluid/framework/dist_multi_trainer.cc @@ -157,7 +157,7 @@ void DistMultiTrainer::Run() { std::vector> wait_futures; CHECK_EQ(static_cast(pool.size()), thread_num_); for (int i = 0; i < thread_num_; ++i) { - if (!debug_) { + if (!debug_) { // NOLINT wait_futures.emplace_back( pool[i]->Run([this, i]() { workers_[i]->TrainFiles(); })); } else { diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index d935e9ea066bd3..fbc2565e755fa5 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -99,7 +99,7 @@ void Executor::CreateVariables(const ProgramDesc& pdesc, while (ancestor_scope->parent()) { ancestor_scope = ancestor_scope->parent(); } - if (ancestor_scope != scope) { + if (ancestor_scope != scope) { // NOLINT for (auto& var : global_block.AllVars()) { if (var->Name() == framework::kEmptyVarName) { continue; diff --git a/paddle/fluid/framework/heter_section_worker.cc b/paddle/fluid/framework/heter_section_worker.cc index 24be8e04d8d507..938a67712fdf8e 100644 --- a/paddle/fluid/framework/heter_section_worker.cc +++ b/paddle/fluid/framework/heter_section_worker.cc @@ -126,7 +126,7 @@ void HeterSectionWorker::Initialize(const TrainerDesc& desc) { bool is_first_stage = (pipeline_stage_ == 0); bool is_last_stage = (pipeline_stage_ + 1 == num_pipeline_stages_); - if (is_first_stage) { + if (is_first_stage) { // NOLINT for (auto& op_desc : program_->Block(0).AllOps()) { auto op = std::move(OpRegistry::CreateOp(*op_desc)); auto op_type = op->Type(); diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 88f0b496a8e4c6..f897a075353a73 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -658,7 +658,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, if (attr_ptr && !is_attr_var) { auto& attr = *attr_ptr; switch (AttrTypeID(attr)) { - case framework::proto::AttrType::INTS: + case framework::proto::AttrType::INTS: // NOLINT infer_meta_context.EmplaceBackAttr(std::move( phi::IntArray(PADDLE_GET_CONST(std::vector, attr)))); break; @@ -836,7 +836,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, attr_names[i])); } break; - case phi::AttributeType::FLOAT32S: + case phi::AttributeType::FLOAT32S: // NOLINT infer_meta_context.EmplaceBackAttr( PADDLE_GET_CONST(std::vector, attr)); break; diff --git a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc index 44cb004fec1729..966f4ea14967d9 100644 --- a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc +++ b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc @@ -134,7 +134,7 @@ class CoalesceGradTensorPass : public ir::Pass { auto &pinned_var_set = graph->GetOrInit(details::kPinnedVars); - if (IsUnifiedDtype(p_g_dense_grad, vars_info)) { + if (IsUnifiedDtype(p_g_dense_grad, vars_info)) { // NOLINT RecordGradients(p_g_dense_grad, vars_info, &pinned_var_set); CoalesceTensors(vars_info, p_g_dense_grad, &result); } else { diff --git a/paddle/fluid/framework/ir/generate_pass_tester.cc b/paddle/fluid/framework/ir/generate_pass_tester.cc index 760e1e8ce4ef83..58a3741a924aab 100644 --- a/paddle/fluid/framework/ir/generate_pass_tester.cc +++ b/paddle/fluid/framework/ir/generate_pass_tester.cc @@ -25,7 +25,7 @@ REGISTER_GENERATE_PASS(generate_fc_fuse) { VLOG(3) << "exec lambda func."; auto mul = OP_(mul)({{"X", x}, {"Y", y}}).Out("Out"); auto ewadd = OP_(elementwise_add)({{"X", mul}, {"Y", z}}).Out("Out"); - if (with_relu) { + if (with_relu) { // NOLINT return OP_(relu)({"X", ewadd}).Out("Out"); } else { return ewadd; diff --git a/paddle/fluid/framework/ir/identity_op_clean_pass.cc b/paddle/fluid/framework/ir/identity_op_clean_pass.cc index ab9df0ae4abee7..55316c1b823105 100644 --- a/paddle/fluid/framework/ir/identity_op_clean_pass.cc +++ b/paddle/fluid/framework/ir/identity_op_clean_pass.cc @@ -70,7 +70,7 @@ FindUselessOpPattern::FindUselessOpPattern(PDPattern* pattern, auto in_dtype = x->Op()->GetAttrIfExists("in_dtype"); auto out_dtype = x->Op()->GetAttrIfExists("out_dtype"); return in_dtype == out_dtype; - } else if (op_type == "c_identity") { + } else if (op_type == "c_identity") { // NOLINT return true; } else if (op_type == "assign") { const auto& in_name = x->Op()->Input("X")[0]; diff --git a/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc index 7a811aae50e236..e5d270a7d57b61 100644 --- a/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc @@ -161,7 +161,7 @@ class ComputePropagateScalesMkldnnPassTest : public testing::Test { begin(wh[i]), end(wh[i]), wh_tensor->mutable_data(phi::CPUPlace()) + i * wh[0].size()); - if (type == "gru") { + if (type == "gru") { // NOLINT ComputeGruWeightScales( graph, &scope, wx_name, wh_name, &var_quant_scales); } else { diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc index 5ffc2db337ada8..015bc6842788df 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc @@ -61,7 +61,7 @@ void SetOp(ProgramDesc* prog, op->SetOutput("Output", {outputs[0]}); } else if (type == "pool2d" || type == "fused_transpose" || type == "reshape2" || type == "nearest_interp" || - type == "nearest_interp_v2") { + type == "nearest_interp_v2" || type == "dropout") { op->SetInput("X", {inputs[0]}); op->SetOutput("Out", {outputs[0]}); } else if (type == "slice") { @@ -70,9 +70,6 @@ void SetOp(ProgramDesc* prog, } else if (type == "split") { op->SetInput("X", {inputs[0]}); op->SetOutput("Out", {outputs}); - } else if (type == "dropout") { - op->SetInput("X", {inputs[0]}); - op->SetOutput("Out", {outputs[0]}); } else if (type == "fc") { op->SetInput("Input", {inputs[0]}); if (inputs.size() > 1) op->SetInput("W", {inputs[1]}); diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc index 5b1cd5fe87aed0..afbcf97ab1e29b 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc @@ -41,7 +41,7 @@ void SetOp(ProgramDesc* prog, if (type != "dropout" && type != "quantize" && type != "dequantize") { op->SetAttr("mkldnn_data_type", mkldnn_data_type); } - if (type == "pool2d") { + if (type == "pool2d") { // NOLINT op->SetInput("X", {inputs[0]}); op->SetOutput("Out", {outputs[0]}); if (!scale.empty()) op->SetAttr("Scale_in", scale[0]); diff --git a/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass_tester.cc index fd6c7d20a95d2e..bd272635f15f1d 100644 --- a/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/int8_scale_calculation_mkldnn_pass_tester.cc @@ -70,14 +70,7 @@ ProgramDesc BuildProgramDesc(bool convWithExistingBias, } } - if (convWithExistingBias) { - SetOp(&prog, - "conv2d", - "conv", - std::vector({"c", "weights", "conv_bias"}), - std::vector({"f"}), - scale_weights); - } else if (scale_weights.size() > 1) { + if (convWithExistingBias || scale_weights.size() > 1) { SetOp(&prog, "conv2d", "conv", diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc index 295ef57cfdfead..cc20f521808718 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc @@ -933,7 +933,7 @@ bool ReduceSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result, void ReduceSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const { if (UseGPU()) { - if (strategy_.fuse_broadcast_ops_ == true) { + if (strategy_.fuse_broadcast_ops_ == true) { // NOLINT CreateFusedBroadcastOp(result, bcast_var_name_set_); } else { for (size_t dev_id = 0; dev_id < bcast_var_name_set_.size(); ++dev_id) { @@ -1193,7 +1193,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const { node->Op()->Type())); // Create fetch_barrier op handle to enable output on all devices. // **NOTE** fetch_barrier should output variables list same as recv op does. - if (node->Op()->Type() == "fetch_barrier") { + if (node->Op()->Type() == "fetch_barrier") { // NOLINT result->Get(kGraphOps).emplace_back( new details::FetchBarrierOpHandle( result->CreateOpNode(node->Op()), local_scopes_, places_)); @@ -1354,7 +1354,7 @@ void DistSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const { strategy_.reduce_ == details::BuildStrategy::ReduceStrategy::kReduce) { return; } - if (strategy_.fuse_broadcast_ops_ == true) { + if (strategy_.fuse_broadcast_ops_ == true) { // NOLINT CreateFusedBroadcastOp(result, bcast_var_name_set_); } else { for (size_t dev_id = 0; dev_id < bcast_var_name_set_.size(); ++dev_id) { diff --git a/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc b/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc index 9ca4d4f482f085..c8cab82d950e0e 100644 --- a/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc +++ b/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc @@ -239,7 +239,7 @@ void TransferLayoutElimPass::ApplyImpl(ir::Graph *graph) const { FusePassBase::Init(pattern_name, graph); auto transfer_format = [&](std::string data_format) -> std::string { - if (data_format == "NCHW") { + if (data_format == "NCHW") { // NOLINT return "NHWC"; } else if (data_format == "NHWC") { return "NCHW"; diff --git a/paddle/fluid/framework/new_executor/garbage_collector/garbage_collector.cc b/paddle/fluid/framework/new_executor/garbage_collector/garbage_collector.cc index 166853e2b18daf..0d73e2d3fede93 100644 --- a/paddle/fluid/framework/new_executor/garbage_collector/garbage_collector.cc +++ b/paddle/fluid/framework/new_executor/garbage_collector/garbage_collector.cc @@ -32,14 +32,14 @@ CreateInterpreterCoreGarbageCollector( const platform::Place& place, const std::vector>& vec_instruction) { if (platform::is_gpu_place(place)) { - if (IsInterpretercoreFastGCEnabled()) { + if (IsInterpretercoreFastGCEnabled()) { // NOLINT return std::unique_ptr( new InterpreterCoreFastGarbageCollector()); } else { return std::unique_ptr( new InterpreterCoreEventGarbageCollector(vec_instruction)); } - } else if (platform::is_xpu_place(place)) { + } else if (platform::is_xpu_place(place)) { // NOLINT // Because there is no multi-stream on XPU device, fast GC can // be used. // Previously, XPU used no_event GC. But `Wait` in no_event GC @@ -62,14 +62,14 @@ CreateInterpreterCoreGarbageCollector( const platform::Place& place, const std::vector& vec_instruction) { if (platform::is_gpu_place(place)) { - if (IsInterpretercoreFastGCEnabled()) { + if (IsInterpretercoreFastGCEnabled()) { // NOLINT return std::unique_ptr( new InterpreterCoreFastGarbageCollector()); } else { return std::unique_ptr( new InterpreterCoreEventGarbageCollector(vec_instruction)); } - } else if (platform::is_xpu_place(place)) { + } else if (platform::is_xpu_place(place)) { // NOLINT // Because there is no multi-stream on XPU device, fast GC can // be used. // Previously, XPU used no_event GC. But `Wait` in no_event GC diff --git a/paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc b/paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc index 3b7ebc18f36da1..d236e740679dd8 100644 --- a/paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc +++ b/paddle/fluid/framework/new_executor/garbage_collector/no_event_garbage_collector.cc @@ -49,9 +49,10 @@ void InterpreterCoreNoEventGarbageCollector::Add( if (var->IsType()) { Add(var->GetMutable()->MoveMemoryHolder(), ctx); - } else if (var->IsType< - operators::reader:: - OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { + } else if ( + var->IsType< + operators::reader:: + OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT // TODO(xiongkun03) in old executor, this type of variable is not support // eager deletion. so we just leave it here ? } else if (var->IsType()) { diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.cc b/paddle/fluid/framework/new_executor/new_executor_defs.cc index bfa7542b65b75c..2a2393142dce37 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.cc +++ b/paddle/fluid/framework/new_executor/new_executor_defs.cc @@ -96,7 +96,7 @@ void VariableScope::AddVar(const std::string& name, auto id = VarSize(); name2id_[name] = static_cast(id); vec_meta_info_.emplace_back(0, var_desc); - if (local_scope_ != nullptr) { + if (local_scope_ != nullptr) { // NOLINT var_list_.push_back(local_scope_->FindVar(name)); } else { var_list_.push_back(scope_->FindVar(name)); diff --git a/paddle/fluid/framework/new_executor/pir_interpreter.cc b/paddle/fluid/framework/new_executor/pir_interpreter.cc index 76bb419742d376..60a69f70b74585 100644 --- a/paddle/fluid/framework/new_executor/pir_interpreter.cc +++ b/paddle/fluid/framework/new_executor/pir_interpreter.cc @@ -701,7 +701,7 @@ void PirInterpreter::BuildInstruction() { continue; } } else if (op.dialect()->name() == "pd_op") { - if (op.isa()) { + if (op.isa()) { // NOLINT vec_instruction_base_.emplace_back(std::make_unique( op_idx++, place_, &op, value_exe_info_.get(), execution_config_)); sub_blocks_.insert( @@ -742,7 +742,7 @@ void PirInterpreter::BuildInstruction() { } VLOG(6) << "process " << op_name; - if (op.isa()) { + if (op.isa()) { // NOLINT CREATE_INSTR(LegacyKernelInstruction); } else { CREATE_INSTR(PhiKernelInstruction); diff --git a/paddle/fluid/framework/new_executor/standalone_executor.cc b/paddle/fluid/framework/new_executor/standalone_executor.cc index 2bb0a7197774ee..74e09a15d62464 100644 --- a/paddle/fluid/framework/new_executor/standalone_executor.cc +++ b/paddle/fluid/framework/new_executor/standalone_executor.cc @@ -57,7 +57,7 @@ StandaloneExecutor::StandaloneExecutor(const platform::Place& place, const std::string& job_type = job->Type(); std::shared_ptr program = nullptr; std::shared_ptr<::pir::Program> ir_program = nullptr; - if (FLAGS_enable_pir_api || FLAGS_enable_pir_in_executor) { + if (FLAGS_enable_pir_api || FLAGS_enable_pir_in_executor) { // NOLINT ir_program = plan_.IrProgram(job_type); } else { // NOTE (liuchenghao): std::make_shared will duplicate ProgramDesc object, diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index d731dd4b2086b4..edd7cf47037ceb 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1754,7 +1754,8 @@ void OperatorWithKernel::RunImpl(const Scope& scope, std::string phi_kernel_name; if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(type_)) { if (kernel_signature_ == nullptr || phi_kernel_ == nullptr) { - if (phi::KernelFactory::Instance().HasStructuredKernel(type_)) { + if (phi::KernelFactory::Instance().HasStructuredKernel( + type_)) { // NOLINT kernel_signature_ = std::make_unique(type_.c_str()); } else { @@ -1989,7 +1990,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, 1, platform::EventRole::kInnerOp); if (need_prepare_data_) { - if (fallback_to_cpu) { + if (fallback_to_cpu) { // NOLINT transfer_scope = PrepareData(scope, phi_cpu_kernel_key, &transfered_inplace_vars, @@ -2278,7 +2279,7 @@ OpKernelType OperatorWithKernel::InnerGetExpectedKernelType( phi::KernelKey OperatorWithKernel::ChoosePhiKernel( const ExecutionContext& ctx) const { std::string phi_kernel_name; - if (phi::KernelFactory::Instance().HasStructuredKernel(type_)) { + if (phi::KernelFactory::Instance().HasStructuredKernel(type_)) { // NOLINT kernel_signature_ = std::make_unique(type_.c_str()); } else { kernel_signature_ = std::make_unique( @@ -3104,7 +3105,7 @@ static void SetDnnAttrIntoDeviceContext( case proto::AttrType::STRING: one_dnn_ctx->SetDnnAttr(attr_name, PADDLE_GET_CONST(std::string, attr)); break; - case proto::AttrType::INTS: + case proto::AttrType::INTS: // NOLINT one_dnn_ctx->SetDnnAttr(attr_name, PADDLE_GET_CONST(std::vector, attr)); break; @@ -3358,7 +3359,7 @@ void OperatorWithKernel::BuildPhiKernelContext( case phi::AttributeType::INT_ARRAY: if (attr_iter != Attrs().end()) { switch (AttrTypeID(attr_iter->second)) { - case proto::AttrType::INTS: + case proto::AttrType::INTS: // NOLINT phi_kernel_context->EmplaceBackAttr(std::move(phi::IntArray( PADDLE_GET_CONST(std::vector, attr_iter->second)))); break; @@ -3497,7 +3498,7 @@ void OperatorWithKernel::BuildPhiKernelContext( phi_kernel_context->EmplaceBackAttr( PADDLE_GET_CONST(int64_t, attr_iter->second)); break; - case phi::AttributeType::INT32S: + case phi::AttributeType::INT32S: // NOLINT phi_kernel_context->EmplaceBackAttr( PADDLE_GET_CONST(std::vector, attr_iter->second)); break; @@ -3536,7 +3537,7 @@ void OperatorWithKernel::BuildPhiKernelContext( attr_names[i])); } break; - case phi::AttributeType::FLOAT32S: + case phi::AttributeType::FLOAT32S: // NOLINT phi_kernel_context->EmplaceBackAttr( PADDLE_GET_CONST(std::vector, attr_iter->second)); break; diff --git a/paddle/fluid/framework/section_worker.cc b/paddle/fluid/framework/section_worker.cc index f88dbc409d1704..02f2bfb3c568d7 100644 --- a/paddle/fluid/framework/section_worker.cc +++ b/paddle/fluid/framework/section_worker.cc @@ -238,7 +238,7 @@ void SectionWorker::TrainFiles() { #endif } // max_memory_size >= 0 - if (schedule_mode_ == 0) { + if (schedule_mode_ == 0) { // NOLINT RunFThenB(gc); } else { Run1F1B(gc); diff --git a/paddle/fluid/imperative/amp_auto_cast.cc b/paddle/fluid/imperative/amp_auto_cast.cc index 6637ddbe9ee670..ee8f855adf6304 100644 --- a/paddle/fluid/imperative/amp_auto_cast.cc +++ b/paddle/fluid/imperative/amp_auto_cast.cc @@ -187,7 +187,7 @@ AmpOperators::GetMutableUnsupportedOps(const phi::DataType& data_type) { true, phi::errors::InvalidArgument( "The data_type mismatch. It should be FLOAT16 or BFLOAT16.")); - if (data_type == phi::DataType::FLOAT16) { + if (data_type == phi::DataType::FLOAT16) { // NOLINT return unsupported_fp16_ops_; } else { return unsupported_bf16_ops_; @@ -377,7 +377,8 @@ template NameVarMap AutoCastInputs(const std::string& op_type, const NameVarMap& ins) { NameVarMap new_ins(ins); - if (AmpOperators::Instance().GetMutableAllowOps()->count(op_type)) { + if (AmpOperators::Instance().GetMutableAllowOps()->count( + op_type)) { // NOLINT for (auto& pair : new_ins) { // NOTE(zhiqiu): batch_norm and layer_norm support only input x is fp16. if ((op_type == "batch_norm" || op_type == "layer_norm" || diff --git a/paddle/fluid/imperative/gradient_accumulator.cc b/paddle/fluid/imperative/gradient_accumulator.cc index f92755025bed70..8e4e7504eddaa6 100644 --- a/paddle/fluid/imperative/gradient_accumulator.cc +++ b/paddle/fluid/imperative/gradient_accumulator.cc @@ -518,7 +518,7 @@ void VariableWrapperAdd(std::shared_ptr var, static platform::Place GetPlaceOfVar( const std::shared_ptr& var) { platform::Place place; - if (var->Var().IsType()) { + if (var->Var().IsType()) { // NOLINT place = var->Var().Get().place(); } else if (var->Var().IsType()) { place = var->Var().Get().place(); @@ -735,7 +735,7 @@ void SortedGradientAccumulator::SumGrad(std::shared_ptr var, } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - if (paddle::platform::is_gpu_place(place)) { + if (paddle::platform::is_gpu_place(place)) { // NOLINT // sum selected rows firstly for (auto& var_info : tmp_grad_vars_) { if (!var_info.var->Var().IsType()) { diff --git a/paddle/fluid/imperative/layout_autotune.cc b/paddle/fluid/imperative/layout_autotune.cc index 80e216563f07d4..2e0e5ea26da9a4 100644 --- a/paddle/fluid/imperative/layout_autotune.cc +++ b/paddle/fluid/imperative/layout_autotune.cc @@ -145,7 +145,7 @@ LayoutAutotuneGuard::LayoutAutotuneGuard(std::shared_ptr tracer, } LayoutAutotuneGuard::~LayoutAutotuneGuard() { - if (pre_layout_autotune_) { + if (pre_layout_autotune_) { // NOLINT tracer_->EnableLayoutAutoTune(); } else { tracer_->DisableLayoutAutoTune(); diff --git a/paddle/fluid/imperative/nccl_context.cc b/paddle/fluid/imperative/nccl_context.cc index d70d40808f915d..3ed9b97bfc362d 100644 --- a/paddle/fluid/imperative/nccl_context.cc +++ b/paddle/fluid/imperative/nccl_context.cc @@ -67,7 +67,7 @@ void NCCLParallelContext::Init() { std::vector nccl_ids; nccl_ids.resize(strategy_.nrings_); - if (strategy_.local_rank_ == 0) { + if (strategy_.local_rank_ == 0) { // NOLINT // generate the unique ncclid on the root worker for (auto &nccl_id : nccl_ids) { platform::dynload::ncclGetUniqueId(&nccl_id); diff --git a/paddle/fluid/imperative/partial_grad_engine.cc b/paddle/fluid/imperative/partial_grad_engine.cc index 3fd37d5ec36744..c107c9f80ef524 100644 --- a/paddle/fluid/imperative/partial_grad_engine.cc +++ b/paddle/fluid/imperative/partial_grad_engine.cc @@ -366,7 +366,7 @@ class GradientAccumulationInfo { if (!grad_var_) { grad_var_ = std::make_shared(true, mapped_grad_var_->Name()); grad_var_->SetOverriddenStopGradient(false); - if (sort_gradient_) { + if (sort_gradient_) { // NOLINT accumulator_ = std::make_unique( grad_var_->SharedVar().get()); } else { diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index 8129ea244f4895..a60c81a4c22d9e 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -660,7 +660,7 @@ void PreparedOp::Run(const NameVarMap& ins, const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs) { - if (run_phi_kernel_) { + if (run_phi_kernel_) { // NOLINT PreparedOpRunPtImpl(op_, kernel_key_, arg_map_fn_, @@ -692,7 +692,7 @@ void PreparedOp::Run(const NameVarMap& ins, const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs) { - if (run_phi_kernel_) { + if (run_phi_kernel_) { // NOLINT PreparedOpRunPtImpl(op_, kernel_key_, arg_map_fn_, @@ -724,7 +724,7 @@ void PreparedOp::Run(const NameVarMap& ins, const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs) { - if (run_phi_kernel_) { + if (run_phi_kernel_) { // NOLINT PreparedOpRunPtImpl(op_, kernel_key_, arg_map_fn_, diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index 461c2d3ff4bb87..5b8dc28d03111e 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -227,7 +227,7 @@ void SplitTensorsWithType( void Group::ConcatTensors(const platform::DeviceContext &context) { auto place = context.GetPlace(); - if (platform::is_gpu_place(place)) { + if (platform::is_gpu_place(place)) { // NOLINT #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) ConcatTensorsWithType(static_cast(context), dense_tensors_, @@ -263,7 +263,7 @@ void Group::ConcatTensors(const platform::DeviceContext &context) { void Group::SplitTensors(const platform::DeviceContext &context) { auto place = context.GetPlace(); - if (platform::is_gpu_place(place)) { + if (platform::is_gpu_place(place)) { // NOLINT #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) SplitTensorsWithType(static_cast(context), &dense_contents_, diff --git a/paddle/fluid/imperative/var_helper.cc b/paddle/fluid/imperative/var_helper.cc index bafea5a720d3a7..9561962935ffe4 100644 --- a/paddle/fluid/imperative/var_helper.cc +++ b/paddle/fluid/imperative/var_helper.cc @@ -50,7 +50,8 @@ void InitializeVariable(paddle::framework::Variable *var, var->GetMutable(); } else if (var_type == paddle::framework::proto::VarType::FEED_MINIBATCH) { var->GetMutable(); - } else if (var_type == paddle::framework::proto::VarType::FETCH_LIST) { + } else if (var_type == + paddle::framework::proto::VarType::FETCH_LIST) { // NOLINT var->GetMutable(); } else if (var_type == paddle::framework::proto::VarType::STEP_SCOPES) { var->GetMutable>(); diff --git a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc index ad95fe3091ce18..286fe961bed06e 100644 --- a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc @@ -271,7 +271,7 @@ void LiteSubgraphPass::SetUpEngine( Get>("nnadapter_model_cache_token"); lite_api::TargetType target_type = TARGET(kX86); - if (use_gpu) { + if (use_gpu) { // NOLINT target_type = TARGET(kCUDA); } else if (use_xpu) { target_type = TARGET(kXPU); @@ -417,13 +417,11 @@ void LiteSubgraphPass::ApplyImpl(framework::ir::Graph* graph) const { auto& lite_ops_filter = Get>("lite_ops_filter"); auto teller = [&lite_ops_filter](const Node* node) { - if (!node->IsOp() || !node->Op()) - return false; - else if (node->Op()->Type() == "feed" || node->Op()->Type() == "fetch") - return false; - else if (std::find(lite_ops_filter.begin(), - lite_ops_filter.end(), - node->Op()->Type()) != lite_ops_filter.end()) + if (!node->IsOp() || !node->Op() || node->Op()->Type() == "feed" || + node->Op()->Type() == "fetch" || + std::find(lite_ops_filter.begin(), + lite_ops_filter.end(), + node->Op()->Type()) != lite_ops_filter.end()) return false; return inference::lite::OpTeller::Global().Tell(node->Op()->Type(), *node->Op()); diff --git a/paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc b/paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc index 8106dfbb9e6aae..ea97be8f90a606 100644 --- a/paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc +++ b/paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc @@ -121,7 +121,7 @@ std::unique_ptr IrGraphBuildPass::LoadModel( bool model_from_memory, bool skip_load_params) { framework::Executor exe(place); - if (!model_from_memory) { + if (!model_from_memory) { // NOLINT return Load(&exe, scope, program_path, params_path, !skip_load_params); } else { return LoadFromMemory(&exe, scope, program_path, params_path); diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 96faaae05d8c00..9e6c50443ec646 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1170,7 +1170,7 @@ bool AnalysisPredictor::LoadConverterConfig( int64_t key = std::stoll(one_line[0]); for (size_t i = 1; i < one_line.size(); ++i) { int64_t val = std::stoll(one_line[i]); - if (ring_to_rank) { + if (ring_to_rank) { // NOLINT if (ring_id_to_ranks->find(key) == ring_id_to_ranks->end()) { ring_id_to_ranks->insert({key, std::vector()}); } @@ -1310,7 +1310,7 @@ bool AnalysisPredictor::Run(const std::vector &inputs, HookCollectShapeRangeInfo(); } - if (config_.new_executor_enabled()) { + if (config_.new_executor_enabled()) { // NOLINT executor_->RunInterpreterCore(); } else { // Run the inference program @@ -1383,7 +1383,7 @@ bool AnalysisPredictor::Run(const std::vector &inputs, HookCollectShapeRangeInfo(); } - if (config_.new_executor_enabled()) { + if (config_.new_executor_enabled()) { // NOLINT executor_->RunInterpreterCore(); } else { // Run the inference program @@ -1804,7 +1804,7 @@ void AnalysisPredictor::PrepareArgument() { if (deleted_passes.count(pass)) continue; pass_builder->AppendPass(pass); } - } else if (config_.use_xpu()) { + } else if (config_.use_xpu()) { // NOLINT // All passes support fp16. Not reset pass_builder. } else if (config_.use_custom_device()) { // All passes support fp16. Not reset pass_builder. @@ -1927,7 +1927,8 @@ void AnalysisPredictor::OptimizeInferenceProgram() { #else if (config_.mkldnn_enabled() || (config_.tensorrt_engine_enabled() && - config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8)) { + config_.tensorrt_precision_mode_ == + AnalysisConfig::Precision::kInt8)) { // NOLINT argument_->PartiallyRelease(); } else { argument_.reset(nullptr); @@ -2221,7 +2222,7 @@ std::unique_ptr AnalysisPredictor::GetInputTensor( const std::string &name) { framework::Scope *scope = nullptr; #if defined(PADDLE_WITH_DISTRIBUTE) && defined(PADDLE_WITH_PSCORE) - if (config_.dist_config().use_dist_model()) { + if (config_.dist_config().use_dist_model()) { // NOLINT scope = scope_.get(); } else { scope = executor_->GetScope(); @@ -2272,7 +2273,7 @@ std::unique_ptr AnalysisPredictor::GetOutputTensor( const std::string &name) { framework::Scope *scope; // NOLINT #if defined(PADDLE_WITH_DISTRIBUTE) && defined(PADDLE_WITH_PSCORE) - if (config_.dist_config().use_dist_model()) { + if (config_.dist_config().use_dist_model()) { // NOLINT scope = scope_.get(); } else { scope = executor_->GetScope(); @@ -2322,7 +2323,7 @@ std::unique_ptr AnalysisPredictor::GetOutputTensor( bool AnalysisPredictor::ZeroCopyRun(bool switch_stream) { inference::DisplayMemoryInfo(place_, "before run"); #if defined(PADDLE_WITH_DISTRIBUTE) && defined(PADDLE_WITH_PSCORE) - if (config_.dist_config().use_dist_model()) { + if (config_.dist_config().use_dist_model()) { // NOLINT VLOG(3) << "ZeroCopyRun will use the fleet executor."; fleet_exe_->Run(config_.dist_config().carrier_id()); return true; @@ -2381,7 +2382,7 @@ bool AnalysisPredictor::ZeroCopyRun(bool switch_stream) { } #endif - if (config_.new_executor_enabled()) { + if (config_.new_executor_enabled()) { // NOLINT executor_->RunInterpreterCore({}, false, switch_stream); } else { executor_->Run(); @@ -2647,7 +2648,7 @@ void AnalysisPredictor::StatisticShapeRangeInfo() { bool AnalysisPredictor::LoadProgramDesc() { // Initialize the inference program std::string filename; - if (!config_.model_dir().empty()) { + if (!config_.model_dir().empty()) { // NOLINT filename = config_.model_dir() + "/__model__"; } else if (!config_.prog_file().empty()) { // All parameters are saved in a single file. diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index d886885edb5ba5..e00d5d129a19e4 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -101,7 +101,7 @@ bool NativePaddlePredictor::Init( executor_ = std::make_unique(place_); // Initialize the inference program - if (!config_.model_dir.empty()) { + if (!config_.model_dir.empty()) { // NOLINT // Parameters are saved in separate files sited in // the specified `dirname`. inference_program_ = paddle::inference::Load( @@ -286,7 +286,7 @@ bool NativePaddlePredictor::SetFeed(const std::vector &inputs, } input.set_lod(lod); int idx = -1; - if (config_.specify_input_name) { + if (config_.specify_input_name) { // NOLINT idx = static_cast(feed_names_[inputs[i].name]); } else { idx = PADDLE_GET_CONST(int, feeds_[i]->GetAttr("col")); diff --git a/paddle/fluid/inference/api/mkldnn_quantizer.cc b/paddle/fluid/inference/api/mkldnn_quantizer.cc index 54a198b4e2f590..6ddcd7b37ba0bd 100644 --- a/paddle/fluid/inference/api/mkldnn_quantizer.cc +++ b/paddle/fluid/inference/api/mkldnn_quantizer.cc @@ -78,7 +78,7 @@ void AnalysisPredictor::MkldnnQuantizer::CalculateScalesForRNNWeights( check_var(wh_var, wh_name); phi::DenseTensor* wx_tensor = wx_var->GetMutable(); phi::DenseTensor* wh_tensor = wh_var->GetMutable(); - if (gru) { + if (gru) { // NOLINT scales_[wx_name] = GetMaxChGRUScalingFactor(*wx_tensor, *wh_tensor); } else { scales_[wx_name] = GetMaxChLSTMScalingFactor(*wx_tensor, *wh_tensor); @@ -215,6 +215,7 @@ void AnalysisPredictor::MkldnnQuantizer::CalculateSingleScale( switch (rule) { case ScaleAlgo::MAX: + case ScaleAlgo::KL: scales_[var_name] = GetMaxScalingFactor(var_tensor, is_unsigned); break; case ScaleAlgo::MAX_CH: @@ -227,9 +228,6 @@ void AnalysisPredictor::MkldnnQuantizer::CalculateSingleScale( is_unsigned, /*is_transposed*/ true); break; - case ScaleAlgo::KL: - scales_[var_name] = GetKLScalingFactor(var_tensor, is_unsigned); - break; default: throw std::runtime_error( "MkldnnQuantizer: Unexpected ScaleAlgo specified."); diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 9295e5c643e5f0..f6cf5a7c62d711 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -1857,7 +1857,7 @@ struct FillConstant2FullTranscriber : public OpTranscriber { } } switch (place_type) { - case -1: + case -1: // NOLINT attribute_map["place"] = paddle::dialect::PlaceAttribute::get( ctx, phi::Place(phi::AllocationType::UNDEFINED)); break; diff --git a/paddle/fluid/jit/property.cc b/paddle/fluid/jit/property.cc index 687468df83a3dc..37c426bb5401b0 100644 --- a/paddle/fluid/jit/property.cc +++ b/paddle/fluid/jit/property.cc @@ -99,7 +99,7 @@ std::unordered_map> Property::Values() { case ValueProto::STRING: *var->GetMutable() = GetString(n); break; - case ValueProto::FLOATS: + case ValueProto::FLOATS: // NOLINT *var->GetMutable>() = GetFloats(n); break; case ValueProto::INTS: diff --git a/paddle/fluid/operators/reader/buffered_reader.cc b/paddle/fluid/operators/reader/buffered_reader.cc index b73ffe4319be78..cc5034c86f90fb 100644 --- a/paddle/fluid/operators/reader/buffered_reader.cc +++ b/paddle/fluid/operators/reader/buffered_reader.cc @@ -380,7 +380,7 @@ void BufferedReader::ReadNextImpl(paddle::framework::LoDTensorArray *out) { return; } - if (platform::is_gpu_place(place_)) { + if (platform::is_gpu_place(place_)) { // NOLINT *out = std::move(cuda_buffer_[i]); } else if (platform::is_xpu_place(place_)) { *out = std::move(xpu_buffer_[i]); diff --git a/paddle/fluid/pir/drr/src/ir_operation_factory.cc b/paddle/fluid/pir/drr/src/ir_operation_factory.cc index f792ccbdaff922..61c12c281e1398 100644 --- a/paddle/fluid/pir/drr/src/ir_operation_factory.cc +++ b/paddle/fluid/pir/drr/src/ir_operation_factory.cc @@ -81,7 +81,7 @@ pir::Attribute CreateIrAttribute(const std::any& obj) { std::any_cast(obj)); } else if (obj.type() == typeid(phi::Place)) { return IrAttrbuteCreator()(std::any_cast(obj)); - } else if (obj.type() == typeid(std::vector)) { + } else if (obj.type() == typeid(std::vector)) { // NOLINT return IrAttrbuteCreator>()( std::any_cast>(obj)); } else if (obj.type() == typeid(std::vector)) { diff --git a/paddle/fluid/platform/place.cc b/paddle/fluid/platform/place.cc index 118ba7d6b782c6..df66cc63e39863 100644 --- a/paddle/fluid/platform/place.cc +++ b/paddle/fluid/platform/place.cc @@ -62,8 +62,6 @@ bool is_same_place(const Place &p1, const Place &p2) { if (places_are_same_class(p1, p2)) { if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) { return true; - } else if (is_xpu_place(p1) || is_ipu_place(p1) || is_custom_place(p1)) { - return p1 == p2; } else { return p1 == p2; } diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 816ae57ff4c068..2630b36d0e8ad9 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -200,8 +200,8 @@ RecordMemEvent::RecordMemEvent(const void *ptr, RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3]; RecordMemEvent::has_initialized["gpu"][place.GetDeviceId()] = true; } else { - current_allocated = - DEVICE_MEMORY_STAT_CURRENT_VALUE(Allocated, place.GetDeviceId()); + current_allocated = DEVICE_MEMORY_STAT_CURRENT_VALUE( + Allocated, place.GetDeviceId()); // NOLINT peak_allocated = DEVICE_MEMORY_STAT_PEAK_VALUE(Allocated, place.GetDeviceId()); RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][0] = @@ -283,10 +283,10 @@ RecordMemEvent::RecordMemEvent(const void *ptr, RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3]; RecordMemEvent::has_initialized["gpu"][place.GetDeviceId()] = true; } else { - current_reserved = - DEVICE_MEMORY_STAT_CURRENT_VALUE(Reserved, place.GetDeviceId()); - peak_reserved = - DEVICE_MEMORY_STAT_PEAK_VALUE(Reserved, place.GetDeviceId()); + current_reserved = DEVICE_MEMORY_STAT_CURRENT_VALUE( + Reserved, place.GetDeviceId()); // NOLINT + peak_reserved = DEVICE_MEMORY_STAT_PEAK_VALUE( + Reserved, place.GetDeviceId()); // NOLINT RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][1] = current_reserved; RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3] = @@ -366,10 +366,10 @@ RecordMemEvent::RecordMemEvent(const void *ptr, RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3]; RecordMemEvent::has_initialized["gpu"][place.GetDeviceId()] = true; } else { - current_allocated = - DEVICE_MEMORY_STAT_CURRENT_VALUE(Allocated, place.GetDeviceId()); - peak_allocated = - DEVICE_MEMORY_STAT_PEAK_VALUE(Allocated, place.GetDeviceId()); + current_allocated = DEVICE_MEMORY_STAT_CURRENT_VALUE( + Allocated, place.GetDeviceId()); // NOLINT + peak_allocated = DEVICE_MEMORY_STAT_PEAK_VALUE( + Allocated, place.GetDeviceId()); // NOLINT RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][0] = current_allocated; RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][2] = @@ -449,10 +449,10 @@ RecordMemEvent::RecordMemEvent(const void *ptr, RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3]; RecordMemEvent::has_initialized["gpu"][place.GetDeviceId()] = true; } else { - current_reserved = - DEVICE_MEMORY_STAT_CURRENT_VALUE(Reserved, place.GetDeviceId()); - peak_reserved = - DEVICE_MEMORY_STAT_PEAK_VALUE(Reserved, place.GetDeviceId()); + current_reserved = DEVICE_MEMORY_STAT_CURRENT_VALUE( + Reserved, place.GetDeviceId()); // NOLINT + peak_reserved = DEVICE_MEMORY_STAT_PEAK_VALUE( + Reserved, place.GetDeviceId()); // NOLINT RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][1] = current_reserved; RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3] = diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index 3cb3ccf964ec81..00b6ba994233f4 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -442,7 +442,7 @@ Placements ParsePlacementsArgs( Placements placements; const std::string& placements_key = "placements"; - if (kw_order_map[placements_key] <= args_num) { + if (kw_order_map[placements_key] <= args_num) { // NOLINT placements = CastPyArg2VectorOfPlacement( PyTuple_GET_ITEM(args, kw_order_map[placements_key] - 1), kw_order_map[placements_key] - 1); diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 60472c25c71827..17d9d50d8ab26a 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -644,7 +644,7 @@ PyObject* eager_api_run_custom_op(PyObject* self, } else if (attr_type_str == "std::string") { ctx.EmplaceBackAttr( CastPyArg2AttrString(obj, attr_start_idx + i)); // NOLINT - } else if (attr_type_str == "std::vector") { + } else if (attr_type_str == "std::vector") { // NOLINT ctx.EmplaceBackAttr(CastPyArg2VectorOfInt(obj, attr_start_idx + i)); } else if (attr_type_str == "std::vector") { ctx.EmplaceBackAttr(CastPyArg2VectorOfFloat(obj, attr_start_idx + i)); diff --git a/paddle/fluid/pybind/eager_math_op_patch.cc b/paddle/fluid/pybind/eager_math_op_patch.cc index 21fd549cb0b2db..17b36e9237e78f 100644 --- a/paddle/fluid/pybind/eager_math_op_patch.cc +++ b/paddle/fluid/pybind/eager_math_op_patch.cc @@ -818,10 +818,10 @@ static PyObject* tensor__rdiv__method(TensorObject* self, bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - if (PyFloat_Check(other_obj)) { + if (PyFloat_Check(other_obj)) { // NOLINT other_double = CastPyArg2Double(other_obj, "__rdiv__", 0); has_other_double = true; - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { + } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { // NOLINT other_double = CastPyArg2Double(other_obj, "__rdiv__", 0); has_other_double = true; } diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index d613c008b4958a..a2ce701f2aa952 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -647,7 +647,7 @@ std::vector> CastPyArg2VectorOfVectorOfSize_t( platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) { platform::Place place; - if (PyObject_TypeCheck(obj, g_place_pytype)) { + if (PyObject_TypeCheck(obj, g_place_pytype)) { // NOLINT place = ::pybind11::handle(obj).cast(); } else if (PyObject_TypeCheck(obj, g_cudaplace_pytype)) { place = ::pybind11::handle(obj).cast(); @@ -761,7 +761,8 @@ std::vector CastPyArg2VectorOfTensorBase(PyObject* obj, i)); } } - } else if (PyObject_TypeCheck(obj, g_framework_lodtensorarray_pytype)) { + } else if (PyObject_TypeCheck(obj, + g_framework_lodtensorarray_pytype)) { // NOLINT for (auto& tensor : (::pybind11::handle(obj).cast())) { result.emplace_back(tensor); @@ -788,7 +789,7 @@ using phi::distributed::Shard; Placements CastPyArg2VectorOfPlacement(PyObject* obj, ssize_t arg_pos) { Placements result; auto check_and_emplace = [&](PyObject* item, ssize_t i) { - if (PyObject_TypeCheck(item, g_placement_shard_pytype)) { + if (PyObject_TypeCheck(item, g_placement_shard_pytype)) { // NOLINT result.emplace_back( std::make_shared(::pybind11::handle(item).cast())); } else if (PyObject_TypeCheck(item, g_placement_replicated_pytype)) { diff --git a/paddle/fluid/pybind/parallel_executor.cc b/paddle/fluid/pybind/parallel_executor.cc index 9060e158c9ed9a..1b567fb51ba1e1 100644 --- a/paddle/fluid/pybind/parallel_executor.cc +++ b/paddle/fluid/pybind/parallel_executor.cc @@ -931,7 +931,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT .def_property( "memory_optimize", [](const BuildStrategy &self) -> py::object { - if (self.memory_optimize_) { + if (self.memory_optimize_) { // NOLINT return py::cast(self.memory_optimize_.get()); } else { return py::cast(nullptr); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index f1d53f3f88750c..58ef7c4cc8bbbf 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1240,7 +1240,7 @@ All parameter, weight, gradient are variables in Paddle. py::return_value_policy::reference) .def("get_bytes", [](Variable &self) { - if (self.IsType()) { + if (self.IsType()) { // NOLINT return py::bytes(*(self.GetMutable())); } else { return py::bytes( @@ -2229,7 +2229,7 @@ All parameter, weight, gradient are variables in Paddle. const std::string &var_name, size_t index) -> py::object { auto &var = framework::GetFetchVariable(scope, var_name, index); - if (data_is_lod_tensor(var)) { + if (data_is_lod_tensor(var)) { // NOLINT return py::cast(PADDLE_GET(phi::DenseTensor, var)); } else { return py::cast(PADDLE_GET(LoDTensorArray, var)); diff --git a/paddle/phi/core/compat/convert_utils.cc b/paddle/phi/core/compat/convert_utils.cc index d4c5de0dbe6dc9..37053cc0c09ec2 100644 --- a/paddle/phi/core/compat/convert_utils.cc +++ b/paddle/phi/core/compat/convert_utils.cc @@ -63,6 +63,7 @@ phi::Place TransToPhiPlace(const Backend& backend, bool set_device_id) { return phi::Place(); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) case phi::Backend::GPU: + case phi::Backend::GPUDNN: return phi::GPUPlace( set_device_id ? phi::backends::gpu::GetCurrentDeviceId() : 0); #endif @@ -70,11 +71,6 @@ phi::Place TransToPhiPlace(const Backend& backend, bool set_device_id) { case phi::Backend::ONEDNN: // NOLINT return phi::CPUPlace(); #endif -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - case phi::Backend::GPUDNN: - return phi::GPUPlace( - set_device_id ? phi::backends::gpu::GetCurrentDeviceId() : 0); -#endif #if defined(PADDLE_WITH_XPU) case phi::Backend::XPU: return phi::XPUPlace( diff --git a/paddle/phi/core/kernel_registry.cc b/paddle/phi/core/kernel_registry.cc index fa9d531b6534d6..6ce1af187e9a3f 100644 --- a/paddle/phi/core/kernel_registry.cc +++ b/paddle/phi/core/kernel_registry.cc @@ -47,139 +47,159 @@ void SetKernelArgsDef(const std::vector& args_type, ) { #endif // do nothing, skip context arg now - } else if (arg_type == std::type_index(typeid(const DenseTensor&))) { + } else if (arg_type == + std::type_index(typeid(const DenseTensor&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); } else if (arg_type == - std::type_index(typeid(const paddle::optional&))) { + std::type_index( + typeid(const paddle::optional&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); } else if (arg_type == - std::type_index(typeid( - const paddle::optional>&))) { + std::type_index( + typeid(const paddle::optional< + std::vector>&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); } else if (arg_type == - std::type_index(typeid(const paddle::optional&))) { + std::type_index( + typeid(const paddle::optional&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid( - const std::vector&))) { + } else if (arg_type == + std::type_index( + typeid(const std::vector&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); } else if (arg_type == - std::type_index(typeid(const phi::ExtendedTensor&))) { + std::type_index(typeid(const phi::ExtendedTensor&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid( - const std::vector&))) { + } else if (arg_type == + std::type_index(typeid( + const std::vector&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid( - const std::vector&))) { + } else if (arg_type == + std::type_index(typeid( + const std::vector&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); } else if (arg_type == - std::type_index(typeid(const std::vector&))) { + std::type_index( + typeid(const std::vector&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid( - const std::vector&))) { + } else if (arg_type == + std::type_index( + typeid(const std::vector&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(const SelectedRows&))) { + } else if (arg_type == + std::type_index(typeid(const SelectedRows&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(const StringTensor&))) { + } else if (arg_type == + std::type_index(typeid(const StringTensor&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(const SparseCooTensor&))) { + } else if (arg_type == + std::type_index(typeid(const SparseCooTensor&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid( - paddle::optional))) { + } else if (arg_type == + std::type_index(typeid( + paddle::optional))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(const SparseCsrTensor&))) { + } else if (arg_type == + std::type_index(typeid(const SparseCsrTensor&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid( - paddle::optional))) { + } else if (arg_type == + std::type_index(typeid( + paddle::optional))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(const TensorArray&))) { + } else if (arg_type == + std::type_index(typeid(const TensorArray&))) { // NOLINT args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(DenseTensor*))) { + } else if (arg_type == std::type_index(typeid(DenseTensor*))) { // NOLINT args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(std::vector))) { + } else if (arg_type == + std::type_index(typeid(std::vector))) { // NOLINT args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(SelectedRows*))) { + } else if (arg_type == std::type_index(typeid(SelectedRows*))) { // NOLINT args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(TensorArray*))) { + } else if (arg_type == std::type_index(typeid(TensorArray*))) { // NOLINT args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(SparseCooTensor*))) { + } else if (arg_type == + std::type_index(typeid(SparseCooTensor*))) { // NOLINT args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(SparseCsrTensor*))) { + } else if (arg_type == + std::type_index(typeid(SparseCsrTensor*))) { // NOLINT args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(StringTensor*))) { + } else if (arg_type == std::type_index(typeid(StringTensor*))) { // NOLINT args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid(ExtendedTensor*))) { + } else if (arg_type == + std::type_index(typeid(ExtendedTensor*))) { // NOLINT args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 9bee9357e1ed13..392d25fbc2787e 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -236,7 +236,7 @@ void ArgMinMaxInferMeta(const MetaTensor& x, if (!config.is_runtime && axis.FromTensor()) { std::vector vec; if (flatten) { - if (keepdims) { + if (keepdims) { // NOLINT vec = std::vector(x.dims().size(), -1); } else { vec = {}; @@ -307,7 +307,7 @@ void ArgMinMaxInferMeta(const MetaTensor& x, std::vector vec; if (flatten) { - if (keepdims) { + if (keepdims) { // NOLINT vec = std::vector(x.dims().size(), 1); } else { vec = {}; @@ -3989,7 +3989,8 @@ void SplitInferMeta(const MetaTensor& x, if ((sections.FromTensor() && !config.is_runtime) || axis_value == -1 || (axis_value >= 0 && x.dims().at(axis_value) <= 0)) { std::vector out_dims; - if ((sections.FromTensor() && !config.is_runtime) || axis_value == -1) { + if ((sections.FromTensor() && !config.is_runtime) || + axis_value == -1) { // NOLINT out_dims = std::vector( sections_data.size(), common::make_ddim(std::vector(x.dims().size(), -1))); @@ -4081,7 +4082,7 @@ void SplitWithNumInferMeta(const MetaTensor& x, // fill out dims with -1 if (axis_value == -1 || (axis_value >= 0 && x.dims().at(axis_value) <= 0)) { std::vector out_dims; - if (axis_value == -1) { + if (axis_value == -1) { // NOLINT out_dims = std::vector( num, common::make_ddim(std::vector(x.dims().size(), -1))); } else { @@ -5370,7 +5371,7 @@ void WeightQuantizeInferMeta(const MetaTensor& x, } std::vector dim_out; - if (algo == "weight_only_int8" || algo == "llm.int8") { + if (algo == "weight_only_int8" || algo == "llm.int8") { // NOLINT dim_out = std::vector({x_dims[1], x_dims[0]}); } else if (algo == "weight_only_int4") { dim_out = std::vector({x_dims[1] / 2, x_dims[0]}); diff --git a/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc b/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc index 1bdf25dd4eb82d..e9c5ae6a39e4ab 100644 --- a/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc @@ -611,7 +611,7 @@ void BatchNormDoubleGradKernel( EigenArrayMap ddy_arr( ctx.template Alloc(&transformed_ddy), C, sample_size); ddy_arr.setZero(); - if (use_global_stats) { + if (use_global_stats) { // NOLINT // math: ddy = r * ddx * inv_var + ddbias + // ddscale * (x - mean) * inv_var if (ddX) { diff --git a/paddle/phi/kernels/cpu/batch_norm_kernel.cc b/paddle/phi/kernels/cpu/batch_norm_kernel.cc index 39d53fec10a9fa..f6d5e97dc7245a 100644 --- a/paddle/phi/kernels/cpu/batch_norm_kernel.cc +++ b/paddle/phi/kernels/cpu/batch_norm_kernel.cc @@ -159,7 +159,7 @@ void BatchNormKernel(const Context& ctx, // use SavedMean and SavedVariance to do normalize Eigen::Array inv_std(C); - if (global_stats) { + if (global_stats) { // NOLINT ConstEigenVectorArrayMap var_arr(variance.data(), C); inv_std = (var_arr + epsilon).sqrt().inverse(); } else { @@ -178,7 +178,7 @@ void BatchNormKernel(const Context& ctx, auto* Bias = bias.get_ptr(); Eigen::Array new_scale(C); Eigen::Array new_bias(C); - if (Scale && Bias) { + if (Scale && Bias) { // NOLINT ConstEigenVectorArrayMap scale_arr(Scale->data(), C); ConstEigenVectorArrayMap bias_arr(Bias->data(), C); new_scale = inv_std * scale_arr; diff --git a/paddle/phi/kernels/cpu/elementwise_divide_kernel.cc b/paddle/phi/kernels/cpu/elementwise_divide_kernel.cc index b7fdefe023e73d..ed80148344e1f3 100644 --- a/paddle/phi/kernels/cpu/elementwise_divide_kernel.cc +++ b/paddle/phi/kernels/cpu/elementwise_divide_kernel.cc @@ -35,7 +35,7 @@ void DivideKernel(const Context& dev_ctx, } else { auto x_dims = x.dims(); auto y_dims = y.dims(); - if (x_dims.size() >= y_dims.size()) { + if (x_dims.size() >= y_dims.size()) { // NOLINT funcs::ElementwiseCompute, T>( dev_ctx, x, y, funcs::DivideFunctor(), out, -1); } else { diff --git a/paddle/phi/kernels/cpu/rnn_grad_kernel.cc b/paddle/phi/kernels/cpu/rnn_grad_kernel.cc index 3e0e4c7a3d7a5a..284f12381bd358 100644 --- a/paddle/phi/kernels/cpu/rnn_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_grad_kernel.cc @@ -1311,7 +1311,7 @@ void RnnGradKernel(const Context& dev_ctx, pre_state_grad, weight_grad_list); // run gru - } else if (is_rnn_relu(mode)) { + } else if (is_rnn_relu(mode)) { // NOLINT gate_num = 1; RnnGradFunc, SingleGradLayer, diff --git a/paddle/phi/kernels/cpu/rnn_kernel.cc b/paddle/phi/kernels/cpu/rnn_kernel.cc index a0035c6db4a75d..5b594089793c8c 100644 --- a/paddle/phi/kernels/cpu/rnn_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_kernel.cc @@ -868,7 +868,7 @@ void RnnKernel(const Context& dev_ctx, is_test, seed, reserve); - } else if (is_rnn_relu(mode)) { + } else if (is_rnn_relu(mode)) { // NOLINT gate_num = 1; RnnFunc { int64_t h = static_cast(lod[i + 1] - lod[i]); auto in_e = EigenMatrix::From(in_t, common::make_ddim({h, w})); auto out_e = EigenVector::Flatten(out_t); - if (pooltype == "AVERAGE") { + if (pooltype == "AVERAGE") { // NOLINT out_e.device(place) = in_e.mean(Eigen::array({{0}})); } else if (pooltype == "SQRT") { out_e.device(place) = in_e.sum(Eigen::array({{0}})) / diff --git a/paddle/phi/kernels/legacy/cpu/elementwise_kernel.cc b/paddle/phi/kernels/legacy/cpu/elementwise_kernel.cc index dafbf2889277d1..84ebbf04fee11c 100644 --- a/paddle/phi/kernels/legacy/cpu/elementwise_kernel.cc +++ b/paddle/phi/kernels/legacy/cpu/elementwise_kernel.cc @@ -55,7 +55,7 @@ void RemainderRawKernel(const Context& dev_ctx, dev_ctx.template Alloc(out); auto x_dims = x.dims(); auto y_dims = y.dims(); - if (x_dims.size() >= y_dims.size()) { + if (x_dims.size() >= y_dims.size()) { // NOLINT funcs::ElementwiseCompute, T>( dev_ctx, x, y, funcs::RemainderFunctor(), out, axis); } else { @@ -74,7 +74,7 @@ void FloorDivideRawKernel(const Context& dev_ctx, dev_ctx.template Alloc(out); auto x_dims = x.dims(); auto y_dims = y.dims(); - if (x_dims.size() >= y_dims.size()) { + if (x_dims.size() >= y_dims.size()) { // NOLINT funcs::ElementwiseCompute, T>( dev_ctx, x, y, funcs::FloorDivideFunctor(), out, axis); } else { diff --git a/test/cpp/fluid/framework/details/fused_broadcast_op_handle_test.cc b/test/cpp/fluid/framework/details/fused_broadcast_op_handle_test.cc index 786b857a80dcca..aee187d77f4843 100644 --- a/test/cpp/fluid/framework/details/fused_broadcast_op_handle_test.cc +++ b/test/cpp/fluid/framework/details/fused_broadcast_op_handle_test.cc @@ -56,7 +56,7 @@ struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle { // create op handle node nodes_.emplace_back( ir::CreateNodeForTest("fused_broadcast", ir::Node::Type::kOperation)); - if (use_device_ == p::kCUDA) { + if (use_device_ == p::kCUDA) { // NOLINT #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) op_handle_ = new FusedBroadcastOpHandle( nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); diff --git a/test/cpp/imperative/test_gradient_accmulator.cc b/test/cpp/imperative/test_gradient_accmulator.cc index 58fb2c54df7741..303c3589534e43 100644 --- a/test/cpp/imperative/test_gradient_accmulator.cc +++ b/test/cpp/imperative/test_gradient_accmulator.cc @@ -379,7 +379,7 @@ static framework::Variable RandomSelectedRows(framework::DDim dims, static std::unique_ptr CreateAccumulator( const std::shared_ptr& var, bool sort_gradient) { - if (sort_gradient) { + if (sort_gradient) { // NOLINT return std::unique_ptr( new SortedGradientAccumulator(var.get())); } else { @@ -403,7 +403,7 @@ static void TestGradientAccumulatorTestUnchangeInput( std::mt19937 engine(seed); auto create_var = [&](bool use_tensor) { - if (use_tensor) { + if (use_tensor) { // NOLINT return RandomTensor(dim, place); } else { return RandomSelectedRows(dim, place, dist(engine));