Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -754,7 +754,7 @@ std::string TensorRtSubgraphPass::CreateTensorRTOp(
bool calibration_mode =
(enable_int8 && calibration_data.empty() && use_calib_mode);
if (calibration_mode) {
// calibraion mode means generate int8 calibration table data process.
// calibration mode means generate int8 calibration table data process.
return calibration_engine_key;
}

Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/inference/api/paddle_analysis_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ struct PD_INFER_DECL AnalysisConfig {
void SetModel(const std::string& model_dir) { model_dir_ = model_dir; }

///
/// \brief Set the combined model with two specific pathes for program and
/// \brief Set the combined model with two specific paths for program and
/// parameters.
///
/// \param prog_file_path model file path of the combined model.
Expand Down Expand Up @@ -596,12 +596,12 @@ struct PD_INFER_DECL AnalysisConfig {
/// \brief Control whether to perform IR graph optimization.
/// If turned off, the AnalysisConfig will act just like a NativeConfig.
///
/// \param x Whether the ir graph optimization is actived.
/// \param x Whether the ir graph optimization is activated.
///
void SwitchIrOptim(int x = true) { enable_ir_optim_ = x; }
///
/// \brief A boolean state telling whether the ir graph optimization is
/// actived.
/// activated.
///
/// \return bool Whether to use ir graph optimization.
///
Expand Down Expand Up @@ -1213,7 +1213,7 @@ struct PD_INFER_DECL AnalysisConfig {
std::string SerializeInfoCache();

protected:
// Model pathes.
// Model paths.
std::string model_dir_;
mutable std::string prog_file_;
mutable std::string params_file_;
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/inference/api/resource_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ void GPUContextResource::InitGpuEigenDevice() {
gpu_eigen_device_ = std::make_unique<Eigen::GpuDevice>(eigen_stream_.get());
}

void GPUContextResource::InitDnnHanlde() {
void GPUContextResource::InitDnnHandle() {
phi::InitDnnHandle(&dnn_handle_, stream_, place_);
}

Expand Down Expand Up @@ -237,7 +237,7 @@ dnnHandle_t GPUContextResource::GetDnnHandle() const { return dnn_handle_; }

std::function<phi::dnnHandle_t()> GPUContextResource::GetDnnHandleCreator() {
return [&]() -> phi::dnnHandle_t {
InitDnnHanlde();
InitDnnHandle();
return dnn_handle_;
};
}
Expand Down Expand Up @@ -367,7 +367,7 @@ ResourceManager& ResourceManager::Instance() {
}

void ResourceManager::InitCPUResource() {
std::lock_guard<std::mutex> lock_gurad(cpu_mutex_);
std::lock_guard<std::mutex> lock_guard(cpu_mutex_);
if (cpu_resource_ == nullptr) {
cpu_resource_ = std::make_unique<CPUContextResource>();
}
Expand All @@ -382,7 +382,7 @@ CPUContextResource* ResourceManager::GetCPUResource() const {

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
void* ResourceManager::InitGPUResource(const phi::Place& place, void* stream) {
std::lock_guard<std::mutex> lock_gurad(gpu_mutex_);
std::lock_guard<std::mutex> lock_guard(gpu_mutex_);
if (gpu_resources_.count(stream)) {
Increase(stream);
return stream;
Expand Down Expand Up @@ -427,7 +427,7 @@ GPUContextResource* ResourceManager::GetGPUResource(void* stream) const {
void ResourceManager::GpuResourceSwitchStream(void* old_stream,
void* new_stream) {
// NOTE: add lock to support stream rebind in multi-thread
std::lock_guard<std::mutex> lock_gurad(gpu_mutex_);
std::lock_guard<std::mutex> lock_guard(gpu_mutex_);
if (old_stream == new_stream) return;
PADDLE_ENFORCE_EQ(
gpu_resources_.count(old_stream),
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/api/resource_manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class GPUContextResource {
void DestroyGPUResource();
void InitGpuProperties();
void InitGpuEigenDevice();
void InitDnnHanlde();
void InitDnnHandle();
void DestroyDnnHandle();
void DestroyBlasHandle();
void InitBlasLtHandle();
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/capi/pd_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ void PD_EnableDlnne(
int max_batch_size,
bool use_static_batch,
std::string weight_share_mode,
std::unordered_set<std::string> disable_nodes_by_ouputs,
std::unordered_set<std::string> disable_nodes_by_outputs,
std::map<std::string, std::vector<int64_t>> dlnne_input_shape_dict,
bool use_calib_mode,
PD_ACPrecision precision_mode) {
Expand All @@ -287,7 +287,7 @@ void PD_EnableDlnne(
max_batch_size,
use_static_batch,
weight_share_mode,
disable_nodes_by_ouputs,
disable_nodes_by_outputs,
dlnne_input_shape_dict,
use_calib_mode,
precision_mode);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/capi/pd_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ bool PD_PredictorRun(const PD_AnalysisConfig* config,
config,
paddle::platform::errors::InvalidArgument(
"The pointer of analysis configuration shouldn't be nullptr"));
VLOG(3) << "Predoctor: PD_PredictorRun. ";
VLOG(3) << "Predictor: PD_PredictorRun. ";
static std::map<std::string, std::unique_ptr<paddle::PaddlePredictor>>
predictors;
if (!predictors.count(config->config.model_dir())) {
Expand Down