diff --git a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc index 19cf30d24db406..66c62085faed2b 100644 --- a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc @@ -49,8 +49,8 @@ FastThreadedSSAGraphExecutor::FastThreadedSSAGraphExecutor( /*disable_setting_default_stream_for_allocator=*/true, /*stream_priority=*/0); if (ir::IsTopologySortOperationsUnique(*graph_)) { - VLOG(10) - << "Change thread number to 1 because the toposort order is unique"; + VLOG(10) << "Change thread number to 1 because the topology sort order is " + "unique"; strategy_.num_threads_ = 1; traced_ops_.clear(); for (auto *op_node : TopologySortOperations(*graph_)) { diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 27be4b77176350..25108148af3494 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -39,7 +39,7 @@ FetchOpHandle::~FetchOpHandle() = default; void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { PADDLE_THROW(platform::errors::PermissionDenied( - "No nodes need to wait FetchOp. Unexpceted Error.")); + "No nodes need to wait FetchOp. Unexpected Error.")); } static void CheckDims(const framework::DDim &tensor_dims, diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 99ccbbe50d241e..7446afc87ccf71 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -2037,7 +2037,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, phi::KernelContext phi_kernel_context; if (enable_cache_runtime_context_ && !need_prepare_phi_data_ && !need_prepare_data_) { - // TODO(inference): Now we only suppor dense_tensor cache, we may be + // TODO(inference): Now we only support dense_tensor cache, we may be // support ScalarTensor, SparseTensor in future. bool all_dense_tensor_input_{true}; for (auto& iter : Inputs()) { @@ -2572,7 +2572,7 @@ Scope* OperatorWithKernel::PrepareData( // for some situation like InferShape(). // In this situation We cannot skip Var analysis, as // oneDNN shape of Var may differ from kNHWC Var - // In such situation corressponding resized Var + // In such situation corresponding resized Var // has to be created and registered if ((tensor_in->layout() == DataLayout::ONEDNN) && (var->IsType() == true) && @@ -3192,7 +3192,7 @@ void OperatorWithKernel::BuildPhiKernelContext( for (size_t i = 0; i < input_names.size(); ++i) { auto it = ctx.inputs.find(input_names[i]); - // calcute the start and end index of the input tensors + // calculate the start and end index of the input tensors size_t start_idx = (i == 0 ? 0 : phi_kernel_context->InputRangeAt(i - 1).second); // deal with optional here @@ -3398,7 +3398,7 @@ void OperatorWithKernel::BuildPhiKernelContext( attr_iter, Attrs().end(), platform::errors::NotFound("(%s) is not found in AttributeMap when " - "buildind static KernelContext.", + "building static KernelContext.", attr_names[i])); switch (AttrTypeID(attr_iter->second)) { case proto::AttrType::INTS: { @@ -3472,7 +3472,7 @@ void OperatorWithKernel::BuildPhiKernelContext( RuntimeAttrs().end(), platform::errors::NotFound( "(%s) is not found in AttributeMap when " - "buildind static KernelContext.", + "building static KernelContext.", attr_names[i])); } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 897e520813809c..c2b6c37e7dd6e6 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -639,15 +639,15 @@ void InitP2P(const std::vector &places) { for (int i = 0; i < count; ++i) { for (int j = 0; j < count; ++j) { if (devices[i] == devices[j]) continue; - int can_acess = -1; + int can_access = -1; #ifdef PADDLE_WITH_HIP hipError_t ret = - hipDeviceCanAccessPeer(&can_acess, devices[i], devices[j]); - if (ret != hipSuccess || can_acess != 1) { + hipDeviceCanAccessPeer(&can_access, devices[i], devices[j]); + if (ret != hipSuccess || can_access != 1) { #else cudaError_t ret = - cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]); - if (ret != cudaSuccess || can_acess != 1) { + cudaDeviceCanAccessPeer(&can_access, devices[i], devices[j]); + if (ret != cudaSuccess || can_access != 1) { #endif LOG(WARNING) << "Cannot enable P2P access from " << devices[i] << " to " << devices[j]; diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index fafde716b7bba7..bd869a05880671 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -710,8 +710,9 @@ void TensorFromStream(std::istream& is, PADDLE_THROW(platform::errors::Unimplemented( "XPUPlace is not supported when not compiled with XPU")); } else { - PADDLE_THROW(platform::errors::Unimplemented( - "CutomPlace is not supported when not compiled with CustomDevice")); + PADDLE_THROW( + platform::errors::Unimplemented("CustomPlace is not supported when " + "not compiled with CustomDevice")); } #endif } else { @@ -887,7 +888,8 @@ std::ostream& print_tensor(std::ostream& os, const phi::DenseTensor& tensor) { auto element_num = tensor.numel(); os << " - data: ["; - // Note: int8_t && uint8_t is typedf of char, ostream unable to print properly + // Note: int8_t && uint8_t is typedef of char, ostream unable to print + // properly if (typeid(int8_t) == typeid(T) || typeid(uint8_t) == typeid(T)) { if (element_num > 0) { os << signed(inspect[0]); diff --git a/paddle/fluid/framework/trainer_factory.cc b/paddle/fluid/framework/trainer_factory.cc index ba5dac4830aa18..81b2df6efc723d 100644 --- a/paddle/fluid/framework/trainer_factory.cc +++ b/paddle/fluid/framework/trainer_factory.cc @@ -26,8 +26,8 @@ namespace framework { class TrainerBase; -typedef std::shared_ptr (*CreatetrainerFunction)(); -typedef std::unordered_map trainerMap; +typedef std::shared_ptr (*CreateTrainerFunction)(); +typedef std::unordered_map trainerMap; trainerMap g_trainer_map; #define REGISTER_TRAINER_CLASS(trainer_class) \ diff --git a/paddle/fluid/operators/cvm_op.cc b/paddle/fluid/operators/cvm_op.cc index 578a59130495ac..1e414ff217c2f1 100644 --- a/paddle/fluid/operators/cvm_op.cc +++ b/paddle/fluid/operators/cvm_op.cc @@ -127,7 +127,7 @@ class CVMOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "(LodTensor, default LodTensor), a 2-D tensor with shape " "[N x D]," - " where N is the batch size and D is the emebdding dim. "); + " where N is the batch size and D is the embedding dim. "); AddInput("CVM", "(Tensor), a 2-D Tensor with shape [N x 2], where N is the batch " "size, 2 is show and click."); diff --git a/paddle/fluid/platform/float16_test.cu b/paddle/fluid/platform/float16_test.cu index 4575b54d48c9bf..555f83d61675ef 100644 --- a/paddle/fluid/platform/float16_test.cu +++ b/paddle/fluid/platform/float16_test.cu @@ -282,7 +282,7 @@ TEST(float16, compound_on_gpu) { TestDivAssign(6, 2, 3); } -TEST(float16, comparision_on_gpu) { +TEST(float16, comparison_on_gpu) { TestEqual(1, 1, true); TestEqual(1, 2, false); TestNotEqual(2, 3, true); diff --git a/paddle/fluid/prim/api/manual_prim/utils/utils.h b/paddle/fluid/prim/api/manual_prim/utils/utils.h index 90a25f8bf1e1fd..f3b21169e57f1a 100644 --- a/paddle/fluid/prim/api/manual_prim/utils/utils.h +++ b/paddle/fluid/prim/api/manual_prim/utils/utils.h @@ -29,7 +29,7 @@ namespace prim { // We put some api like utils here template Tensor empty(const paddle::experimental::IntArray& shape, - phi::DataType dype, + phi::DataType dtype, const paddle::Place& place); template @@ -37,7 +37,7 @@ Tensor empty_like(const Tensor& x, phi::DataType dtype, const paddle::Place& place); -// copy tensor for output ptr, in static need use assigh op +// copy tensor for output ptr, in static need use assign op template void by_pass(const Tensor& x, Tensor* out); @@ -114,7 +114,7 @@ static std::vector unsafe_vector_cast(const std::vector& src) { return dst; } -// This fucction compute unsqueeze dims for reshape to replace unsqueeze. +// This function compute unsqueeze dims for reshape to replace unsqueeze. static std::vector get_unsqueeze_dims( const Tensor& origin, const std::vector& axis) { auto origin_dims = origin.shape(); diff --git a/paddle/phi/kernels/prior_box_kernel.h b/paddle/phi/kernels/prior_box_kernel.h index 45a741c7a3a72b..132efb7b6cc722 100644 --- a/paddle/phi/kernels/prior_box_kernel.h +++ b/paddle/phi/kernels/prior_box_kernel.h @@ -35,25 +35,25 @@ void PriorBoxKernel(const Context& ctx, DenseTensor* out, DenseTensor* var); -inline void ExpandAspectRatios(const std::vector& input_aspect_ratior, +inline void ExpandAspectRatios(const std::vector& input_aspect_ratio, bool flip, - std::vector* output_aspect_ratior) { + std::vector* output_aspect_ratio) { constexpr float epsilon = 1e-6; - output_aspect_ratior->clear(); - output_aspect_ratior->push_back(1.0f); - for (size_t i = 0; i < input_aspect_ratior.size(); ++i) { - float ar = input_aspect_ratior[i]; + output_aspect_ratio->clear(); + output_aspect_ratio->push_back(1.0f); + for (size_t i = 0; i < input_aspect_ratio.size(); ++i) { + float ar = input_aspect_ratio[i]; bool already_exist = false; - for (size_t j = 0; j < output_aspect_ratior->size(); ++j) { - if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) { + for (size_t j = 0; j < output_aspect_ratio->size(); ++j) { + if (fabs(ar - output_aspect_ratio->at(j)) < epsilon) { already_exist = true; break; } } if (!already_exist) { - output_aspect_ratior->push_back(ar); + output_aspect_ratio->push_back(ar); if (flip) { - output_aspect_ratior->push_back(1.0f / ar); + output_aspect_ratio->push_back(1.0f / ar); } } }