diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 5b2bed7745fcf6..1b29ba37f5e669 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -754,7 +754,7 @@ std::string TensorRtSubgraphPass::CreateTensorRTOp( bool calibration_mode = (enable_int8 && calibration_data.empty() && use_calib_mode); if (calibration_mode) { - // calibraion mode means generate int8 calibration table data process. + // calibration mode means generate int8 calibration table data process. return calibration_engine_key; } diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index cae544ff2c234c..134c0799ec663d 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -253,7 +253,7 @@ struct PD_INFER_DECL AnalysisConfig { void SetModel(const std::string& model_dir) { model_dir_ = model_dir; } /// - /// \brief Set the combined model with two specific pathes for program and + /// \brief Set the combined model with two specific paths for program and /// parameters. /// /// \param prog_file_path model file path of the combined model. @@ -596,12 +596,12 @@ struct PD_INFER_DECL AnalysisConfig { /// \brief Control whether to perform IR graph optimization. /// If turned off, the AnalysisConfig will act just like a NativeConfig. /// - /// \param x Whether the ir graph optimization is actived. + /// \param x Whether the ir graph optimization is activated. /// void SwitchIrOptim(int x = true) { enable_ir_optim_ = x; } /// /// \brief A boolean state telling whether the ir graph optimization is - /// actived. + /// activated. /// /// \return bool Whether to use ir graph optimization. /// @@ -1213,7 +1213,7 @@ struct PD_INFER_DECL AnalysisConfig { std::string SerializeInfoCache(); protected: - // Model pathes. + // Model paths. std::string model_dir_; mutable std::string prog_file_; mutable std::string params_file_; diff --git a/paddle/fluid/inference/api/resource_manager.cc b/paddle/fluid/inference/api/resource_manager.cc index b18ca6e1c2a555..9f8a6651ebdf8c 100644 --- a/paddle/fluid/inference/api/resource_manager.cc +++ b/paddle/fluid/inference/api/resource_manager.cc @@ -191,7 +191,7 @@ void GPUContextResource::InitGpuEigenDevice() { gpu_eigen_device_ = std::make_unique(eigen_stream_.get()); } -void GPUContextResource::InitDnnHanlde() { +void GPUContextResource::InitDnnHandle() { phi::InitDnnHandle(&dnn_handle_, stream_, place_); } @@ -237,7 +237,7 @@ dnnHandle_t GPUContextResource::GetDnnHandle() const { return dnn_handle_; } std::function GPUContextResource::GetDnnHandleCreator() { return [&]() -> phi::dnnHandle_t { - InitDnnHanlde(); + InitDnnHandle(); return dnn_handle_; }; } @@ -367,7 +367,7 @@ ResourceManager& ResourceManager::Instance() { } void ResourceManager::InitCPUResource() { - std::lock_guard lock_gurad(cpu_mutex_); + std::lock_guard lock_guard(cpu_mutex_); if (cpu_resource_ == nullptr) { cpu_resource_ = std::make_unique(); } @@ -382,7 +382,7 @@ CPUContextResource* ResourceManager::GetCPUResource() const { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) void* ResourceManager::InitGPUResource(const phi::Place& place, void* stream) { - std::lock_guard lock_gurad(gpu_mutex_); + std::lock_guard lock_guard(gpu_mutex_); if (gpu_resources_.count(stream)) { Increase(stream); return stream; @@ -427,7 +427,7 @@ GPUContextResource* ResourceManager::GetGPUResource(void* stream) const { void ResourceManager::GpuResourceSwitchStream(void* old_stream, void* new_stream) { // NOTE: add lock to support stream rebind in multi-thread - std::lock_guard lock_gurad(gpu_mutex_); + std::lock_guard lock_guard(gpu_mutex_); if (old_stream == new_stream) return; PADDLE_ENFORCE_EQ( gpu_resources_.count(old_stream), diff --git a/paddle/fluid/inference/api/resource_manager.h b/paddle/fluid/inference/api/resource_manager.h index 1f4d4ea420e1b6..25b4050e7c4ddf 100644 --- a/paddle/fluid/inference/api/resource_manager.h +++ b/paddle/fluid/inference/api/resource_manager.h @@ -88,7 +88,7 @@ class GPUContextResource { void DestroyGPUResource(); void InitGpuProperties(); void InitGpuEigenDevice(); - void InitDnnHanlde(); + void InitDnnHandle(); void DestroyDnnHandle(); void DestroyBlasHandle(); void InitBlasLtHandle(); diff --git a/paddle/fluid/inference/capi/pd_config.cc b/paddle/fluid/inference/capi/pd_config.cc index 5197b8dede1927..c2c8036ece7a87 100644 --- a/paddle/fluid/inference/capi/pd_config.cc +++ b/paddle/fluid/inference/capi/pd_config.cc @@ -275,7 +275,7 @@ void PD_EnableDlnne( int max_batch_size, bool use_static_batch, std::string weight_share_mode, - std::unordered_set disable_nodes_by_ouputs, + std::unordered_set disable_nodes_by_outputs, std::map> dlnne_input_shape_dict, bool use_calib_mode, PD_ACPrecision precision_mode) { @@ -287,7 +287,7 @@ void PD_EnableDlnne( max_batch_size, use_static_batch, weight_share_mode, - disable_nodes_by_ouputs, + disable_nodes_by_outputs, dlnne_input_shape_dict, use_calib_mode, precision_mode); diff --git a/paddle/fluid/inference/capi/pd_predictor.cc b/paddle/fluid/inference/capi/pd_predictor.cc index 39575a196e4f91..72f1b6c2771532 100644 --- a/paddle/fluid/inference/capi/pd_predictor.cc +++ b/paddle/fluid/inference/capi/pd_predictor.cc @@ -92,7 +92,7 @@ bool PD_PredictorRun(const PD_AnalysisConfig* config, config, paddle::platform::errors::InvalidArgument( "The pointer of analysis configuration shouldn't be nullptr")); - VLOG(3) << "Predoctor: PD_PredictorRun. "; + VLOG(3) << "Predictor: PD_PredictorRun. "; static std::map> predictors; if (!predictors.count(config->config.model_dir())) {