diff --git a/paddle/fluid/framework/new_executor/interpreter/plan.cc b/paddle/fluid/framework/new_executor/interpreter/plan.cc index ee2f5dc57acc6a..18a354e79de41f 100644 --- a/paddle/fluid/framework/new_executor/interpreter/plan.cc +++ b/paddle/fluid/framework/new_executor/interpreter/plan.cc @@ -68,6 +68,7 @@ const std::vector>& Plan::JobList() const { const std::vector Plan::JobTypes() const { std::vector res; + res.reserve(type_to_program_.size()); for (auto kv : type_to_ir_program_) { res.emplace_back(kv.first); } diff --git a/paddle/fluid/jit/function_schema.cc b/paddle/fluid/jit/function_schema.cc index 0d2014153e1d76..3e19e5d63fcd37 100644 --- a/paddle/fluid/jit/function_schema.cc +++ b/paddle/fluid/jit/function_schema.cc @@ -28,6 +28,7 @@ const std::string& Argument::Name() const { return name_; } const std::vector FunctionSchema::InputArgNames() const { std::vector input_arg_names; + input_arg_names.reserve(input_args.size()); for (auto& arg : input_args) { input_arg_names.emplace_back(arg.Name()); } @@ -36,6 +37,7 @@ const std::vector FunctionSchema::InputArgNames() const { const std::vector FunctionSchema::OutputArgNames() const { std::vector output_arg_names; + output_arg_names.reserve(output_args.size()); for (auto& arg : output_args) { output_arg_names.emplace_back(arg.Name()); } diff --git a/paddle/fluid/pir/dialect/operator/utils/utils.cc b/paddle/fluid/pir/dialect/operator/utils/utils.cc index c0f88cc3dc4b9a..feb73367898b41 100644 --- a/paddle/fluid/pir/dialect/operator/utils/utils.cc +++ b/paddle/fluid/pir/dialect/operator/utils/utils.cc @@ -188,6 +188,7 @@ static std::unordered_map< if (element_type == AttrType::BOOL) { std::vector vec_bools; + vec_bools.reserve(attr_vec.size()); for (auto vec_element : attr_vec) { vec_bools.push_back( vec_element.dyn_cast().data()); @@ -195,6 +196,7 @@ static std::unordered_map< return VariantType{vec_bools}; } else if (element_type == AttrType::INT32) { std::vector vec_int32; + vec_int32.reserve(attr_vec.size()); for (auto vec_element : attr_vec) { vec_int32.push_back( vec_element.dyn_cast().data()); @@ -202,6 +204,7 @@ static std::unordered_map< return VariantType{vec_int32}; } else if (element_type == AttrType::INT64) { std::vector vec_int64; + vec_int64.reserve(attr_vec.size()); for (auto vec_element : attr_vec) { vec_int64.push_back( vec_element.dyn_cast().data()); @@ -209,6 +212,7 @@ static std::unordered_map< return VariantType{vec_int64}; } else if (element_type == AttrType::FLOAT) { std::vector vec_float; + vec_float.reserve(attr_vec.size()); for (auto vec_element : attr_vec) { vec_float.push_back( vec_element.dyn_cast().data()); @@ -216,6 +220,7 @@ static std::unordered_map< return VariantType{vec_float}; } else if (element_type == AttrType::DOUBLE) { std::vector vec_double; + vec_double.reserve(attr_vec.size()); for (auto vec_element : attr_vec) { vec_double.push_back( vec_element.dyn_cast().data()); @@ -223,6 +228,7 @@ static std::unordered_map< return VariantType{vec_double}; } else if (element_type == AttrType::STRING) { std::vector vec_string; + vec_string.reserve(attr_vec.size()); for (auto vec_element : attr_vec) { vec_string.push_back( vec_element.dyn_cast().AsString()); diff --git a/paddle/fluid/pir/drr/src/rewrite_pattern.cc b/paddle/fluid/pir/drr/src/rewrite_pattern.cc index d4efc8284b60fa..ee1742c2c9b5fb 100644 --- a/paddle/fluid/pir/drr/src/rewrite_pattern.cc +++ b/paddle/fluid/pir/drr/src/rewrite_pattern.cc @@ -75,6 +75,7 @@ bool DrrRewritePattern::PatternGraphMatch( return false; } std::vector drr_output_sequence; + drr_output_sequence.reserve(bind_map.size()); std::vector ir_output_sequence; std::unordered_map output_op_map; for (const auto& pair : bind_map) { diff --git a/paddle/phi/backends/device_manager.cc b/paddle/phi/backends/device_manager.cc index 87a163b2cb4fa2..e3ec68e7f91824 100644 --- a/paddle/phi/backends/device_manager.cc +++ b/paddle/phi/backends/device_manager.cc @@ -323,6 +323,7 @@ std::vector DeviceManager::GetAllDeviceTypes() { phi::AutoRDLock lock(&_global_device_manager_rw_lock); auto& dev_impl_map = Instance().device_impl_map_; std::vector devices; + devices.reserve(dev_impl_map.size()); for (const auto& map_item : dev_impl_map) { devices.push_back(map_item.first); } diff --git a/paddle/phi/core/distributed/auto_parallel/dist_mapper.cc b/paddle/phi/core/distributed/auto_parallel/dist_mapper.cc index 6b5ff5625f63ee..0e3a636fa0a1b1 100644 --- a/paddle/phi/core/distributed/auto_parallel/dist_mapper.cc +++ b/paddle/phi/core/distributed/auto_parallel/dist_mapper.cc @@ -26,6 +26,7 @@ void DistributedMapper::set_process_id_to_device_ids( const std::map>>& process_id_to_device_ids) { std::vector device_mesh_names; + device_mesh_names.reserve(device_meshes_.size()); for (const auto& item : device_meshes_) { device_mesh_names.push_back(item.first); } @@ -83,6 +84,7 @@ DistributedMapper DistributedMapper::from_proto( proto.process_id_to_device_ids(i).device_mesh_name(); std::vector device_ids; int num_devices = proto.process_id_to_device_ids(i).device_ids_size(); + device_ids.reserve(num_devices); for (int j = 0; j < num_devices; ++j) { device_ids.push_back(proto.process_id_to_device_ids(i).device_ids(j)); } diff --git a/paddle/phi/core/distributed/auto_parallel/reshard/s_to_r_reshard_function.cc b/paddle/phi/core/distributed/auto_parallel/reshard/s_to_r_reshard_function.cc index 892f34f90176b7..c094c5bf580f54 100644 --- a/paddle/phi/core/distributed/auto_parallel/reshard/s_to_r_reshard_function.cc +++ b/paddle/phi/core/distributed/auto_parallel/reshard/s_to_r_reshard_function.cc @@ -76,6 +76,7 @@ void ReshardSToRWithPadding(DeviceContext* dev_ctx, // Concat the result after split on correct axis. std::vector concat_input_vec; + concat_input_vec.reserve(split_out_vec.size()); for (const auto& tensor : split_out_vec) { concat_input_vec.emplace_back(&tensor); } diff --git a/paddle/phi/infermeta/spmd_rules/utils.cc b/paddle/phi/infermeta/spmd_rules/utils.cc index 1786e141f400fc..b67d7bd251b1b2 100644 --- a/paddle/phi/infermeta/spmd_rules/utils.cc +++ b/paddle/phi/infermeta/spmd_rules/utils.cc @@ -407,7 +407,7 @@ void AlignDimsSharding(std::vector* input_attrs_ptr, for (auto pair : partial_dim_to_type) { placements[pair.first] = std::make_shared(pair.second); } - new_input_attrs.emplace_back(FromPlacements(e, placements)); + new_input_attrs.emplace_back(FromPlacements(e, placements)); // NOLINT } std::swap(input_attrs, new_input_attrs); } diff --git a/paddle/phi/kernels/cpu/overlap_add_grad_kernel.cc b/paddle/phi/kernels/cpu/overlap_add_grad_kernel.cc index fe0ab8c309fc4a..16415533df18f7 100644 --- a/paddle/phi/kernels/cpu/overlap_add_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/overlap_add_grad_kernel.cc @@ -137,6 +137,7 @@ void OverlapAddGradKernel(const Context& dev_ctx, // Restore output dims when the number of dims is larger than 2. if (out_grad_rank > 2) { std::vector restored_x_grad_shape; + restored_x_grad_shape.reserve(preserved_dims.size()); for (int i = 0; i < preserved_dims.size(); i++) { restored_x_grad_shape.push_back(preserved_dims[i]); } diff --git a/paddle/phi/kernels/cpu/overlap_add_kernel.cc b/paddle/phi/kernels/cpu/overlap_add_kernel.cc index ac00eec3e9f138..3f55ad14deeb19 100644 --- a/paddle/phi/kernels/cpu/overlap_add_kernel.cc +++ b/paddle/phi/kernels/cpu/overlap_add_kernel.cc @@ -127,6 +127,7 @@ void OverlapAddKernel(const Context& dev_ctx, // Restore output dims when the number of dims is larger than 2. if (out_rank > 2) { std::vector restored_out_shape; + restored_out_shape.reserve(preserved_dims.size()); for (int i = 0; i < preserved_dims.size(); i++) { restored_out_shape.push_back(preserved_dims[i]); } diff --git a/paddle/phi/kernels/funcs/jit/benchmark.cc b/paddle/phi/kernels/funcs/jit/benchmark.cc index 4599937c481ca2..fac7227d806085 100644 --- a/paddle/phi/kernels/funcs/jit/benchmark.cc +++ b/paddle/phi/kernels/funcs/jit/benchmark.cc @@ -113,6 +113,7 @@ void BenchAllImpls(const typename KernelTuple::attr_type& attr, Args... args) { BenchFunc benchmark; std::vector> infos; auto funcs = jit::GetAllCandidateFuncsWithTypes(attr); + infos.reserve(funcs.size()); for (auto const& f : funcs) { infos.push_back(std::make_pair(f.first, benchmark(f.second, args...))); } diff --git a/paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc b/paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc index a73f876920ee33..177b89be10a869 100644 --- a/paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/sum_grad_kernel.cc @@ -82,6 +82,7 @@ void SumCooGradCPUKernel(const Context& dev_ctx, std::map, int64_t> indices_map; for (auto j = 0; j < dout_indices.dims()[1]; ++j) { std::vector pos; + pos.reserve(dout_indices.dims()[0]); for (int i = 0; i < dout_indices.dims()[0]; ++i) { pos.push_back(dout_indices_data[j + i * dout_indices.dims()[1]]); } diff --git a/paddle/phi/kernels/stride/split_kernel.cc b/paddle/phi/kernels/stride/split_kernel.cc index 5a92cd3bf78040..b5d9d0af696283 100644 --- a/paddle/phi/kernels/stride/split_kernel.cc +++ b/paddle/phi/kernels/stride/split_kernel.cc @@ -56,6 +56,7 @@ void SplitWithNumStridedKernel(const Context& dev_ctx, int axis_value = axis_scalar.to(); auto input_axis_dim = x.dims().at(axis_value); std::vector sections_vec; + sections_vec.reserve(num); for (int i = 0; i < num; ++i) { sections_vec.push_back(input_axis_dim / num); } diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 2ec070913bab25..9a857fe0eee6a6 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -26,6 +26,7 @@ int main(int argc, char** argv) { // NOLINT paddle::memory::allocation::UseAllocatorStrategyGFlag(); testing::InitGoogleTest(&argc, argv); std::vector new_argv; + new_argv.reserve(argc); for (int i = 0; i < argc; ++i) { new_argv.push_back(argv[i]); } diff --git a/test/cpp/fluid/framework/attribute_test.cc b/test/cpp/fluid/framework/attribute_test.cc index 83b9f099d2bd78..a1244729465e39 100644 --- a/test/cpp/fluid/framework/attribute_test.cc +++ b/test/cpp/fluid/framework/attribute_test.cc @@ -296,6 +296,7 @@ TEST(Attribute, ProtoAttrToAttribute_scalars) { proto_attr_scalars.set_type(paddle::framework::proto::SCALARS); std::vector scalars; + scalars.reserve(10); for (int i = 0; i < 10; i++) { scalars.emplace_back(i); } diff --git a/test/cpp/fluid/framework/op_desc_test.cc b/test/cpp/fluid/framework/op_desc_test.cc index 174612e6b3f738..ef604a16d231ec 100644 --- a/test/cpp/fluid/framework/op_desc_test.cc +++ b/test/cpp/fluid/framework/op_desc_test.cc @@ -29,6 +29,7 @@ TEST(OpDesc, SetScalarsAttr) { paddle::experimental::Scalar scalar(std::complex(42.1, 42.1)); std::vector scalars; + scalars.reserve(4); for (int i = 0; i < 4; i++) { scalars.emplace_back(i); } diff --git a/test/cpp/phi/kernels/test_fused_adam_kernel.cc b/test/cpp/phi/kernels/test_fused_adam_kernel.cc index b4edd6b0c19770..73e1b21ac31208 100644 --- a/test/cpp/phi/kernels/test_fused_adam_kernel.cc +++ b/test/cpp/phi/kernels/test_fused_adam_kernel.cc @@ -71,6 +71,7 @@ auto GenerateConstantTensorVectors( static auto ToConstTensorPtrVector(const std::vector &tensors) { std::vector results; + results.reserve(tensors.size()); for (const auto &t : tensors) { results.push_back(&t); } @@ -80,6 +81,7 @@ static auto ToConstTensorPtrVector(const std::vector &tensors) { static auto ToMutableTensorPtrVector( std::vector &tensors) { // NOLINT std::vector results; + results.reserve(tensors.size()); for (auto &t : tensors) { results.push_back(&t); } @@ -88,6 +90,7 @@ static auto ToMutableTensorPtrVector( static auto ToMetaTensorVector(const std::vector &tensors) { std::vector results; + results.reserve(tensors.size()); for (auto &t : tensors) { results.emplace_back(t); } @@ -97,6 +100,7 @@ static auto ToMetaTensorVector(const std::vector &tensors) { static auto ToConstMetaTensorPtrVector( const std::vector &meta_tensors) { std::vector results; + results.reserve(meta_tensors.size()); for (auto &t : meta_tensors) { results.push_back(&t); } @@ -106,6 +110,7 @@ static auto ToConstMetaTensorPtrVector( static auto ToMutableMetaTensorPtrVector( std::vector &meta_tensors) { // NOLINT std::vector results; + results.reserve(meta_tensors.size()); for (auto &t : meta_tensors) { results.push_back(&t); }