Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ bool ComputeInterceptor::IsInputReady() {
flag = flag && (ready_size_map.at(i) != 0);
}
if (flag) {
if (scope_id_to_finish_flag.empty()) {
if (scope_id_to_finish_flag.empty()) { // NOLINT
cur_scope_id_ = i;
return true;
} else if (scope_id_to_finish_flag.find(i) !=
Expand Down Expand Up @@ -303,7 +303,7 @@ void ComputeInterceptor::RunOps() {
cur_scope_id_));
}

if (!cores_.empty()) {
if (!cores_.empty()) { // NOLINT
cores_[cur_scope_id_]->Run(/*feed_names=*/{}, /*need_fetch=*/false);
} else {
for (auto op : node_->ops()) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/fleet_executor/dist_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ bool DistModel::Init() {
}

bool DistModel::PreparePlace() {
if (config_.place == "GPU") {
if (config_.place == "GPU") { // NOLINT
place_ = paddle::platform::CUDAPlace(config_.device_id);
} else if (config_.place == "CPU") {
place_ = paddle::platform::CPUPlace();
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/eager/custom_operator/custom_operator_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -558,7 +558,7 @@ std::vector<std::vector<phi::DDim>> RunInferShapeFn(
out_dims =
RunInferShapeFunc(ctx, infer_shape_func, inputs, outputs, inplace_map);
} else {
if (is_forward) {
if (is_forward) { // NOLINT
out_dims = RunDefaultInferShapeFunc(ctx, inputs, outputs, inplace_map);
} else {
out_dims =
Expand Down Expand Up @@ -592,7 +592,7 @@ std::vector<std::vector<phi::DataType>> RunInferDtypeFn(
out_dtypes =
RunInferDtypeFunc(ctx, infer_dtype_func, inputs, outputs, inplace_map);
} else {
if (is_forward) {
if (is_forward) { // NOLINT
out_dtypes = RunDefaultInferDtypeFunc(ctx, inputs, outputs, inplace_map);
} else {
out_dtypes =
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/grad_tensor_holder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ void GradTensorHolder::CopyValueFromTensor(size_t slot_id,
// Create new tensor->impl and fill it with 1.0
if (t.defined()) {
// Fill 1.0, use full to support complex, one_like don't support it.
if (t.is_dense_tensor()) {
if (t.is_dense_tensor()) { // NOLINT
buffer_[slot_id][rank] =
paddle::experimental::full(t.shape(), 1, t.dtype(), t.place());
} else if (t.is_sparse_csr_tensor() || t.is_sparse_coo_tensor()) {
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/data_feed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1813,7 +1813,7 @@ int PaddleBoxDataFeed::Next() {
this->batch_size_ = index;
VLOG(3) << "pv_batch_size_=" << this->batch_size_
<< ", thread_id=" << thread_id_;
if (this->batch_size_ != 0) {
if (this->batch_size_ != 0) { // NOLINT
PutToFeedVec(pv_vec);
} else {
VLOG(3) << "finish reading, output_pv_channel_ size="
Expand Down Expand Up @@ -2113,7 +2113,7 @@ void SlotRecordInMemoryDataFeed::Init(const DataFeedDesc& data_feed_desc) {
finish_init_ = true;
input_type_ = data_feed_desc.input_type();
size_t pos = pipe_command_.find(".so");
if (pos != std::string::npos) {
if (pos != std::string::npos) { // NOLINT
pos = pipe_command_.rfind('|');
if (pos == std::string::npos) {
so_parser_name_ = pipe_command_;
Expand All @@ -2129,7 +2129,7 @@ void SlotRecordInMemoryDataFeed::Init(const DataFeedDesc& data_feed_desc) {
#if defined(PADDLE_WITH_GPU_GRAPH) && defined(PADDLE_WITH_HETERPS)
gpu_graph_data_generator_.SetConfig(data_feed_desc);
#endif
if (gpu_graph_mode_) {
if (gpu_graph_mode_) { // NOLINT
train_mode_ = true;
} else {
train_mode_ = data_feed_desc.graph_config().gpu_graph_training();
Expand Down Expand Up @@ -2780,7 +2780,7 @@ int SlotRecordInMemoryDataFeed::Next() {
this->batch_size_ = batch.second;
VLOG(3) << "batch_size_=" << this->batch_size_
<< ", thread_id=" << thread_id_;
if (this->batch_size_ != 0) {
if (this->batch_size_ != 0) { // NOLINT
PutToFeedVec(&records_[batch.first], this->batch_size_);
} else {
VLOG(3) << "finish reading for heterps, batch size zero, thread_id="
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/framework/data_set.cc
Original file line number Diff line number Diff line change
Expand Up @@ -966,7 +966,7 @@ void DatasetImpl<T>::DynamicAdjustChannelNum(int channel_num,
CHECK(output_channels_data_size == 0); // NOLINT
cur_channel = 1;
}
if (cur_channel == 0) {
if (cur_channel == 0) { // NOLINT
origin_channels = &multi_output_channel_;
other_channels = &multi_consume_channel_;
origin_pv_channels = &multi_pv_output_;
Expand Down Expand Up @@ -1111,8 +1111,8 @@ void DatasetImpl<T>::CreateReaders() {
if (input_pv_channel_ != nullptr) {
readers_[i]->SetInputPvChannel(input_pv_channel_.get());
}
if (cur_channel_ == 0 &&
static_cast<size_t>(channel_idx) < multi_output_channel_.size()) {
if (cur_channel_ == 0 && static_cast<size_t>(channel_idx) <
multi_output_channel_.size()) { // NOLINT
readers_[i]->SetOutputChannel(multi_output_channel_[channel_idx].get());
readers_[i]->SetConsumeChannel(multi_consume_channel_[channel_idx].get());
readers_[i]->SetOutputPvChannel(multi_pv_output_[channel_idx].get());
Expand Down Expand Up @@ -1722,7 +1722,7 @@ void MultiSlotDataset::PreprocessChannel(
const std::set<std::string>& slots_to_replace,
std::unordered_set<uint16_t>& index_slots) { // NOLINT
int out_channel_size = 0;
if (cur_channel_ == 0) {
if (cur_channel_ == 0) { // NOLINT
for (auto& item : multi_output_channel_) {
out_channel_size += static_cast<int>(item->Size());
}
Expand Down Expand Up @@ -1757,7 +1757,7 @@ void MultiSlotDataset::PreprocessChannel(
input_channel_->ReadAll(slots_shuffle_original_data_);
} else {
CHECK(out_channel_size > 0); // NOLINT
if (cur_channel_ == 0) {
if (cur_channel_ == 0) { // NOLINT
for (auto& item : multi_output_channel_) {
std::vector<Record> vec_data;
item->Close();
Expand Down Expand Up @@ -1792,7 +1792,7 @@ void MultiSlotDataset::PreprocessChannel(
} else {
// if already have original data for slots shuffle, clear channel
input_channel_->Clear();
if (cur_channel_ == 0) {
if (cur_channel_ == 0) { // NOLINT
for (auto& item : multi_output_channel_) {
if (!item) {
continue;
Expand All @@ -1809,7 +1809,7 @@ void MultiSlotDataset::PreprocessChannel(
}
}
int end_size = 0;
if (cur_channel_ == 0) {
if (cur_channel_ == 0) { // NOLINT
for (auto& item : multi_output_channel_) {
if (!item) {
continue;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/nan_inf_utils_detail.cc
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ void CheckOpHasNanOrInf(const framework::OperatorBase& op,

if (IsSkipOp(op)) return;

if (op_var_nan_inf_white_list().count(op.Type()) == 0) {
if (op_var_nan_inf_white_list().count(op.Type()) == 0) { // NOLINT
// NOTE. vname may destruct in the end of this func.
for (auto& vname : op.OutputVars(true)) {
auto* var = exec_scope.FindVar(vname);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/dist_multi_trainer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ void DistMultiTrainer::Run() {
std::vector<std::future<void>> wait_futures;
CHECK_EQ(static_cast<int>(pool.size()), thread_num_);
for (int i = 0; i < thread_num_; ++i) {
if (!debug_) {
if (!debug_) { // NOLINT
wait_futures.emplace_back(
pool[i]->Run([this, i]() { workers_[i]->TrainFiles(); }));
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ void Executor::CreateVariables(const ProgramDesc& pdesc,
while (ancestor_scope->parent()) {
ancestor_scope = ancestor_scope->parent();
}
if (ancestor_scope != scope) {
if (ancestor_scope != scope) { // NOLINT
for (auto& var : global_block.AllVars()) {
if (var->Name() == framework::kEmptyVarName) {
continue;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/heter_section_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void HeterSectionWorker::Initialize(const TrainerDesc& desc) {
bool is_first_stage = (pipeline_stage_ == 0);
bool is_last_stage = (pipeline_stage_ + 1 == num_pipeline_stages_);

if (is_first_stage) {
if (is_first_stage) { // NOLINT
for (auto& op_desc : program_->Block(0).AllOps()) {
auto op = std::move(OpRegistry::CreateOp(*op_desc));
auto op_type = op->Type();
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
if (attr_ptr && !is_attr_var) {
auto& attr = *attr_ptr;
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::INTS:
case framework::proto::AttrType::INTS: // NOLINT
infer_meta_context.EmplaceBackAttr(std::move(
phi::IntArray(PADDLE_GET_CONST(std::vector<int32_t>, attr))));
break;
Expand Down Expand Up @@ -836,7 +836,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
attr_names[i]));
}
break;
case phi::AttributeType::FLOAT32S:
case phi::AttributeType::FLOAT32S: // NOLINT
infer_meta_context.EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<float>, attr));
break;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ class CoalesceGradTensorPass : public ir::Pass {

auto &pinned_var_set =
graph->GetOrInit<details::PinnedVars>(details::kPinnedVars);
if (IsUnifiedDtype(p_g_dense_grad, vars_info)) {
if (IsUnifiedDtype(p_g_dense_grad, vars_info)) { // NOLINT
RecordGradients(p_g_dense_grad, vars_info, &pinned_var_set);
CoalesceTensors(vars_info, p_g_dense_grad, &result);
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/generate_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ REGISTER_GENERATE_PASS(generate_fc_fuse) {
VLOG(3) << "exec lambda func.";
auto mul = OP_(mul)({{"X", x}, {"Y", y}}).Out("Out");
auto ewadd = OP_(elementwise_add)({{"X", mul}, {"Y", z}}).Out("Out");
if (with_relu) {
if (with_relu) { // NOLINT
return OP_(relu)({"X", ewadd}).Out("Out");
} else {
return ewadd;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/identity_op_clean_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ FindUselessOpPattern::FindUselessOpPattern(PDPattern* pattern,
auto in_dtype = x->Op()->GetAttrIfExists<int>("in_dtype");
auto out_dtype = x->Op()->GetAttrIfExists<int>("out_dtype");
return in_dtype == out_dtype;
} else if (op_type == "c_identity") {
} else if (op_type == "c_identity") { // NOLINT
return true;
} else if (op_type == "assign") {
const auto& in_name = x->Op()->Input("X")[0];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ class ComputePropagateScalesMkldnnPassTest : public testing::Test {
begin(wh[i]),
end(wh[i]),
wh_tensor->mutable_data<float>(phi::CPUPlace()) + i * wh[0].size());
if (type == "gru") {
if (type == "gru") { // NOLINT
ComputeGruWeightScales(
graph, &scope, wx_name, wh_name, &var_quant_scales);
} else {
Expand Down
5 changes: 1 addition & 4 deletions paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ void SetOp(ProgramDesc* prog,
op->SetOutput("Output", {outputs[0]});
} else if (type == "pool2d" || type == "fused_transpose" ||
type == "reshape2" || type == "nearest_interp" ||
type == "nearest_interp_v2") {
type == "nearest_interp_v2" || type == "dropout") {
op->SetInput("X", {inputs[0]});
op->SetOutput("Out", {outputs[0]});
} else if (type == "slice") {
Expand All @@ -70,9 +70,6 @@ void SetOp(ProgramDesc* prog,
} else if (type == "split") {
op->SetInput("X", {inputs[0]});
op->SetOutput("Out", {outputs});
} else if (type == "dropout") {
op->SetInput("X", {inputs[0]});
op->SetOutput("Out", {outputs[0]});
} else if (type == "fc") {
op->SetInput("Input", {inputs[0]});
if (inputs.size() > 1) op->SetInput("W", {inputs[1]});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ void SetOp(ProgramDesc* prog,
if (type != "dropout" && type != "quantize" && type != "dequantize") {
op->SetAttr("mkldnn_data_type", mkldnn_data_type);
}
if (type == "pool2d") {
if (type == "pool2d") { // NOLINT
op->SetInput("X", {inputs[0]});
op->SetOutput("Out", {outputs[0]});
if (!scale.empty()) op->SetAttr("Scale_in", scale[0]);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,7 @@ ProgramDesc BuildProgramDesc(bool convWithExistingBias,
}
}

if (convWithExistingBias) {
SetOp(&prog,
"conv2d",
"conv",
std::vector<std::string>({"c", "weights", "conv_bias"}),
std::vector<std::string>({"f"}),
scale_weights);
} else if (scale_weights.size() > 1) {
if (convWithExistingBias || scale_weights.size() > 1) {
SetOp(&prog,
"conv2d",
"conv",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -933,7 +933,7 @@ bool ReduceSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result,

void ReduceSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const {
if (UseGPU()) {
if (strategy_.fuse_broadcast_ops_ == true) {
if (strategy_.fuse_broadcast_ops_ == true) { // NOLINT
CreateFusedBroadcastOp(result, bcast_var_name_set_);
} else {
for (size_t dev_id = 0; dev_id < bcast_var_name_set_.size(); ++dev_id) {
Expand Down Expand Up @@ -1193,7 +1193,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
node->Op()->Type()));
// Create fetch_barrier op handle to enable output on all devices.
// **NOTE** fetch_barrier should output variables list same as recv op does.
if (node->Op()->Type() == "fetch_barrier") {
if (node->Op()->Type() == "fetch_barrier") { // NOLINT
result->Get<GraphOps>(kGraphOps).emplace_back(
new details::FetchBarrierOpHandle(
result->CreateOpNode(node->Op()), local_scopes_, places_));
Expand Down Expand Up @@ -1354,7 +1354,7 @@ void DistSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const {
strategy_.reduce_ == details::BuildStrategy::ReduceStrategy::kReduce) {
return;
}
if (strategy_.fuse_broadcast_ops_ == true) {
if (strategy_.fuse_broadcast_ops_ == true) { // NOLINT
CreateFusedBroadcastOp(result, bcast_var_name_set_);
} else {
for (size_t dev_id = 0; dev_id < bcast_var_name_set_.size(); ++dev_id) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/transfer_layout_elim_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ void TransferLayoutElimPass::ApplyImpl(ir::Graph *graph) const {
FusePassBase::Init(pattern_name, graph);

auto transfer_format = [&](std::string data_format) -> std::string {
if (data_format == "NCHW") {
if (data_format == "NCHW") { // NOLINT
return "NHWC";
} else if (data_format == "NHWC") {
return "NCHW";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,14 @@ CreateInterpreterCoreGarbageCollector(
const platform::Place& place,
const std::vector<std::unique_ptr<InstructionBase>>& vec_instruction) {
if (platform::is_gpu_place(place)) {
if (IsInterpretercoreFastGCEnabled()) {
if (IsInterpretercoreFastGCEnabled()) { // NOLINT
return std::unique_ptr<InterpreterCoreGarbageCollector>(
new InterpreterCoreFastGarbageCollector());
} else {
return std::unique_ptr<InterpreterCoreGarbageCollector>(
new InterpreterCoreEventGarbageCollector(vec_instruction));
}
} else if (platform::is_xpu_place(place)) {
} else if (platform::is_xpu_place(place)) { // NOLINT
// Because there is no multi-stream on XPU device, fast GC can
// be used.
// Previously, XPU used no_event GC. But `Wait` in no_event GC
Expand All @@ -62,14 +62,14 @@ CreateInterpreterCoreGarbageCollector(
const platform::Place& place,
const std::vector<Instruction>& vec_instruction) {
if (platform::is_gpu_place(place)) {
if (IsInterpretercoreFastGCEnabled()) {
if (IsInterpretercoreFastGCEnabled()) { // NOLINT
return std::unique_ptr<InterpreterCoreGarbageCollector>(
new InterpreterCoreFastGarbageCollector());
} else {
return std::unique_ptr<InterpreterCoreGarbageCollector>(
new InterpreterCoreEventGarbageCollector(vec_instruction));
}
} else if (platform::is_xpu_place(place)) {
} else if (platform::is_xpu_place(place)) { // NOLINT
// Because there is no multi-stream on XPU device, fast GC can
// be used.
// Previously, XPU used no_event GC. But `Wait` in no_event GC
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,10 @@ void InterpreterCoreNoEventGarbageCollector::Add(

if (var->IsType<phi::DenseTensor>()) {
Add(var->GetMutable<phi::DenseTensor>()->MoveMemoryHolder(), ctx);
} else if (var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) {
} else if (
var->IsType<
operators::reader::
OrderedMultiDeviceLoDTensorBlockingQueueHolder>()) { // NOLINT
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<LoDRankTable>()) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/new_executor/new_executor_defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ void VariableScope::AddVar(const std::string& name,
auto id = VarSize();
name2id_[name] = static_cast<int>(id);
vec_meta_info_.emplace_back(0, var_desc);
if (local_scope_ != nullptr) {
if (local_scope_ != nullptr) { // NOLINT
var_list_.push_back(local_scope_->FindVar(name));
} else {
var_list_.push_back(scope_->FindVar(name));
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/new_executor/pir_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -701,7 +701,7 @@ void PirInterpreter::BuildInstruction() {
continue;
}
} else if (op.dialect()->name() == "pd_op") {
if (op.isa<paddle::dialect::IfOp>()) {
if (op.isa<paddle::dialect::IfOp>()) { // NOLINT
vec_instruction_base_.emplace_back(std::make_unique<IfInstruction>(
op_idx++, place_, &op, value_exe_info_.get(), execution_config_));
sub_blocks_.insert(
Expand Down Expand Up @@ -742,7 +742,7 @@ void PirInterpreter::BuildInstruction() {
}
VLOG(6) << "process " << op_name;

if (op.isa<paddle::dialect::LegacyKernelOp>()) {
if (op.isa<paddle::dialect::LegacyKernelOp>()) { // NOLINT
CREATE_INSTR(LegacyKernelInstruction);
} else {
CREATE_INSTR(PhiKernelInstruction);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/new_executor/standalone_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ StandaloneExecutor::StandaloneExecutor(const platform::Place& place,
const std::string& job_type = job->Type();
std::shared_ptr<ProgramDesc> program = nullptr;
std::shared_ptr<::pir::Program> ir_program = nullptr;
if (FLAGS_enable_pir_api || FLAGS_enable_pir_in_executor) {
if (FLAGS_enable_pir_api || FLAGS_enable_pir_in_executor) { // NOLINT
ir_program = plan_.IrProgram(job_type);
} else {
// NOTE (liuchenghao): std::make_shared will duplicate ProgramDesc object,
Expand Down
Loading