Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions paddle/fluid/framework/details/build_strategy.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@ struct BuildStrategy {
// Fuse ResUnit
bool fuse_resunit_{false};
// mkldnn_enabled_op_types specify the operator type list to
// use MKLDNN acceleration. It is null in default, means
// that all the operators supported by MKLDNN will be
// use OneDNN acceleration. It is null in default, means
// that all the operators supported by OneDNN will be
// accelerated. And it should not be set when
// FLAGS_use_mkldnn=false
std::unordered_set<std::string> mkldnn_enabled_op_types_;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -702,7 +702,7 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key,
&arguments);
}
#ifdef PADDLE_WITH_DNNL
// For input that is Extra, only MKLDNN will use Extra Inputs
// For input that is Extra, only OneDNN will use Extra Inputs
auto& extra_input_names =
paddle::operators::ExtraInfoUtils::Instance().GetExtraInputNamesMap(
op_with_kernel->Type());
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/op_kernel_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) {
bool ret =
(l != DataLayout::kAnyLayout && r != DataLayout::kAnyLayout && l != r);
#ifdef PADDLE_WITH_DNNL
// Layout transform needed for either non-MKLDNN to MKLDNN or vice versa
// Layout transform needed for either non-MKLDNN to OneDNN or vice versa
ret |= (l != DataLayout::ONEDNN && r == DataLayout::ONEDNN);
ret |= (l == DataLayout::ONEDNN && r != DataLayout::ONEDNN);
#endif
Expand Down
28 changes: 14 additions & 14 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ void RuntimeInferShapeContext::ShareLoD(const std::string& in,
// Workaround:
// Skip set_layout() when input layout is kMKLDNN
// This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
// OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
// OPKernel. In all OneDNN OPkernel, set_layout(kMKLDNN) should be called
// in Compute()
if (in_tensor.layout() != DataLayout::ONEDNN)
#endif
Expand Down Expand Up @@ -1571,12 +1571,12 @@ bool OperatorWithKernel::SupportsKernelType(
}
#endif

// NOTE(jiahongyu): If MKLDNN can be used, the function SupportsKernelType needs
// to check whether current op supports MKLDNN kernel. There are three
// NOTE(jiahongyu): If OneDNN can be used, the function SupportsKernelType needs
// to check whether current op supports OneDNN kernel. There are three
// statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 1. Whether onednn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) &&
this->CanMKLDNNBeUsed(exe_ctx, kernel_type.data_type_)) {
Expand Down Expand Up @@ -1771,7 +1771,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
// TODO(chenweihang): Now we are still reusing a lot of the original fluid
// implementation, this is a gradual replacement process
// TODO(chenweihang): in the first phase of project, we only support CPU, CUDA
// and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second
// and RCOM backend, the XPU, NPU and OneDNN will be supported in the second
// phase
phi::KernelKey phi_kernel_key;
std::string phi_kernel_name;
Expand Down Expand Up @@ -1846,13 +1846,13 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
}
} else {
phi_kernel_name = kernel_signature_->name;
// NOTE(jiahongyu): The registered MKLDNN kernel have library_type =
// NOTE(jiahongyu): The registered OneDNN kernel have library_type =
// LibraryType::kMKLDNN and data_layout_ = DataLayout::ONEDNN. But the default
// values are kPlain, so we need to modify the library_type and data_layout_
// here. There are three statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 1. Whether onednn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!this->DnnFallback() &&
!paddle::platform::in_mkldnn_white_list(type_) &&
Expand Down Expand Up @@ -2121,7 +2121,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
}

if (FLAGS_enable_unused_var_check) {
// skip op that uses mkldnn because it has different memory reuse strategy.
// skip op that uses onednn because it has different memory reuse strategy.
// use attr here because some GradMakers (like ActivationGradOpMaker) add
// input when use_mkldnn=true;
if (!(HasAttr("use_mkldnn") && Attr<bool>("use_mkldnn"))) {
Expand Down Expand Up @@ -2181,12 +2181,12 @@ OpKernelType OperatorWithKernel::InnerGetExpectedKernelType(
framework::TransPhiKernelKeyToOpKernelType(phi_kernel_key);

// NOTE(jiahongyu): PADDLE_WITH_DNNL codes are moved outside function
// GetExpectedKernelType, so that if MKLDNN can be used, the library_type_ and
// GetExpectedKernelType, so that if OneDNN can be used, the library_type_ and
// data_layout_ of expected_kernel_key need to be adjusted. There are three
// statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 1. Whether onednn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) &&
this->CanMKLDNNBeUsed(ctx, expected_kernel_key.data_type_)) {
Expand Down Expand Up @@ -2815,7 +2815,7 @@ Scope* OperatorWithKernel::PrepareData(
prepare_input_data(input_name, &ins_vector, &in_def, should_skip_input);
}
#ifdef PADDLE_WITH_DNNL
// For input that is Extra, only MKLDNN will use Extra Inputs
// For input that is Extra, only OneDNN will use Extra Inputs
auto& extra_input_names =
paddle::operators::ExtraInfoUtils::Instance().GetExtraInputNamesMap(
Type());
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -921,7 +921,7 @@ class OperatorWithKernel : public OperatorBase {
mutable std::mutex cache_update_mutex_;
mutable bool enable_cache_transfer_scope_ = false;
// NOTE(jiahongyu): Whether fallback to plain kernel after calling
// GetExpectedKernelType, use this bool flag to solve mkldnn and cudnn hard
// GetExpectedKernelType, use this bool flag to solve onednn and cudnn hard
// code
mutable bool dnn_fallback_ = false;
// NOTE(chenweihang): Similar op members are used to adapt to
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ PreparedOp PrepareImpl(
auto* dev_ctx = pool.Get(place);

#ifdef PADDLE_WITH_DNNL
// MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
// OneDNN variant of code reads attributes in some of GetKernelTypeForVar and
// GetKernelType functions, so we need to copy the attributes there.
// Const qualifier of Attrs had to be discarded to overwrite it.
if (FLAGS_use_mkldnn) {
Expand All @@ -190,13 +190,13 @@ PreparedOp PrepareImpl(
phi::KernelSignature kernel_signature;
std::string phi_kernel_name;

// NOTE(jiahongyu): The registered MKLDNN kernel have library_type =
// NOTE(jiahongyu): The registered OneDNN kernel have library_type =
// LibraryType::kMKLDNN and data_layout_ = DataLayout::ONEDNN. But the default
// values are kPlain, so we need to modify the library_type and data_layout_
// here. There are three statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 1. Whether onednn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!op.DnnFallback() && !paddle::platform::in_mkldnn_white_list(op.Type()) &&
op.CanMKLDNNBeUsed(dygraph_exe_ctx, expected_kernel_key.dtype())) {
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/inference/api/analysis_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
CP_MEMBER(dlnne_precision_mode_);
CP_MEMBER(dlnne_disable_nodes_by_outputs_);
CP_MEMBER(dlnne_input_shape_dict_);
// MKLDNN related.
// OneDNN related.
CP_MEMBER(use_mkldnn_);
CP_MEMBER(mkldnn_enabled_op_types_);
CP_MEMBER(mkldnn_cache_capacity_);
Expand Down Expand Up @@ -991,18 +991,18 @@ void AnalysisConfig::Update() {
#ifdef PADDLE_WITH_DNNL
// Since EnableMKLDNN is default, the pass_builder has created in the first
// time.
// Case1: User manually disable mkldnn after pass_builder
// Case1: User manually disable onednn after pass_builder
// create.(config.disable_mkldnn())
// Case2: User device is gpu/ipu/xpu, use
// EnableXpu(), EnableCUDNN(), PassStrategy has been reset in the above code
// block
// Case3: pass_builder_ has been created and belongs to
// GpuPassStrategy(or IpuPassStrategy), neither enable mkldnn and
// disable mkldnn will be executed
// GpuPassStrategy(or IpuPassStrategy), neither enable onednn and
// disable onednn will be executed
if ((!use_gpu() && !use_xpu() && !use_ipu() && !use_mkldnn_) ||
(use_mkldnn_ &&
!phi::backends::cpu::MayIUse(phi::backends::cpu::cpu_isa_t::avx2))) {
// User manually disable mkldnn or disable when not support AVX2
// User manually disable onednn or disable when not support AVX2
use_mkldnn_ = false;
pass_builder()->DisableMKLDNN();
}
Expand Down Expand Up @@ -1054,7 +1054,7 @@ void AnalysisConfig::Update() {
if (!use_gpu() && !use_xpu() && !use_ipu()) {
if (use_mkldnn_ && enable_ir_optim_) {
#ifdef PADDLE_WITH_DNNL
// default enable mkldnn when device is cpu and enable_ir_optim
// default enable onednn when device is cpu and enable_ir_optim
pass_builder()->EnableMKLDNN();
#endif
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/api/analysis_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ bool AnalysisPredictor::Init(
root_predictor_id_ = predictor_id_;
}

// no matter with or without MKLDNN
// no matter with or without OneDNN
paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());

// Use Optimized model to inference
Expand Down Expand Up @@ -781,7 +781,7 @@ bool AnalysisPredictor::PrepareProgram(
executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

// if enable_ir_optim_ is false,
// the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
// the analysis pass(op fuse, graph analysis, trt subgraph, onednn etc) will
// not be executed.
model_precision_ =
paddle::inference::GetModelPrecision(*inference_program_);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/api/analysis_predictor.h
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ class AnalysisPredictor : public PaddlePredictor {
void RegisterInputHook(const InputTensorHookFunc &hookfunc) override;

///
/// \brief Initialize mkldnn quantizer and execute mkldnn quantization pass
/// \brief Initialize onednn quantizer and execute onednn quantization pass
///
/// \return Whether the function executed successfully
///
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/api/api_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ bool NativePaddlePredictor::Init(
platform::EnableProfiler(tracking_device);
}

// no matter with or without MKLDNN
// no matter with or without OneDNN
paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());

if (config_.use_gpu) {
Expand Down
42 changes: 21 additions & 21 deletions paddle/fluid/inference/api/paddle_analysis_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -970,19 +970,19 @@ struct PD_INFER_DECL AnalysisConfig {
void SwitchIrDebug(int x = true, const std::vector<std::string>& passes = {});

///
/// \brief Turn on MKLDNN.
/// \brief Turn on OneDNN.
///
///
void EnableMKLDNN();

///
/// \brief Turn down MKLDNN.
/// \brief Turn down OneDNN.
///
///
void DisableMKLDNN();

///
/// \brief Set the cache capacity of different input shapes for MKLDNN.
/// \brief Set the cache capacity of different input shapes for OneDNN.
/// Default value 0 means not caching any shape.
/// Please see MKL-DNN Data Caching Design Document:
/// https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/mkldnn/caching/caching.md
Expand All @@ -991,9 +991,9 @@ struct PD_INFER_DECL AnalysisConfig {
///
void SetMkldnnCacheCapacity(int capacity);
///
/// \brief A boolean state telling whether to use the MKLDNN.
/// \brief A boolean state telling whether to use the OneDNN.
///
/// \return bool Whether to use the MKLDNN.
/// \return bool Whether to use the OneDNN.
///
bool mkldnn_enabled() const { return use_mkldnn_; }

Expand Down Expand Up @@ -1021,7 +1021,7 @@ struct PD_INFER_DECL AnalysisConfig {
///
NativeConfig ToNativeConfig() const;
///
/// \brief Specify the operator type list to use MKLDNN acceleration.
/// \brief Specify the operator type list to use OneDNN acceleration.
///
/// \param op_list The operator type list.
///
Expand All @@ -1030,47 +1030,47 @@ struct PD_INFER_DECL AnalysisConfig {
}

///
/// \brief Turn on MKLDNN quantization.
/// \brief Turn on OneDNN quantization.
///
///
void EnableMkldnnQuantizer();

///
/// \brief Turn on MKLDNN int8.
/// \brief Turn on OneDNN int8.
///
/// \param op_list The operator type list.
///
void EnableMkldnnInt8(const std::unordered_set<std::string>& op_list = {});

///
/// \brief A boolean state telling whether to use the MKLDNN Int8.
/// \brief A boolean state telling whether to use the OneDNN Int8.
///
/// \return bool Whether to use the MKLDNN Int8.
/// \return bool Whether to use the OneDNN Int8.
///
bool mkldnn_int8_enabled() const { return use_mkldnn_int8_; }

///
/// \brief Turn on MKLDNN bfloat16.
/// \brief Turn on OneDNN bfloat16.
///
///
void EnableMkldnnBfloat16();

///
/// \brief Turn off MKLDNN fc passes.
/// \brief Turn off OneDNN fc passes.
///
void DisableMkldnnFcPasses();

///
/// \brief A boolean state telling whether to disable the MKLDNN Fc passes.
/// \brief A boolean state telling whether to disable the OneDNN Fc passes.
///
/// \return bool Whether to disable the MKLDNN Fc passes.
/// \return bool Whether to disable the OneDNN Fc passes.
///
bool mkldnn_fc_passes_disabled() const { return disable_mkldnn_fc_passes_; }

///
/// \brief A boolean state telling whether to use the MKLDNN Bfloat16.
/// \brief A boolean state telling whether to use the OneDNN Bfloat16.
///
/// \return bool Whether to use the MKLDNN Bfloat16.
/// \return bool Whether to use the OneDNN Bfloat16.
///
bool mkldnn_bfloat16_enabled() const { return use_mkldnn_bfloat16_; }

Expand All @@ -1091,16 +1091,16 @@ struct PD_INFER_DECL AnalysisConfig {
bool thread_local_stream_enabled() const { return thread_local_stream_; }

///
/// \brief A boolean state telling whether the MKLDNN quantization is enabled.
/// \brief A boolean state telling whether the OneDNN quantization is enabled.
///
/// \return bool Whether the MKLDNN quantization is enabled.
/// \return bool Whether the OneDNN quantization is enabled.
///
bool mkldnn_quantizer_enabled() const { return use_mkldnn_quantizer_; }

///
/// \brief Get MKLDNN quantizer config.
/// \brief Get OneDNN quantizer config.
///
/// \return MkldnnQuantizerConfig* MKLDNN quantizer config.
/// \return MkldnnQuantizerConfig* OneDNN quantizer config.
///
MkldnnQuantizerConfig* mkldnn_quantizer_config() const;

Expand Down Expand Up @@ -1427,7 +1427,7 @@ struct PD_INFER_DECL AnalysisConfig {
// NNAdapter related
LiteNNAdapterConfig nnadapter_config_;

// mkldnn related.
// onednn related.
int mkldnn_cache_capacity_{10};
bool use_mkldnn_quantizer_{false};
std::shared_ptr<MkldnnQuantizerConfig> mkldnn_quantizer_config_;
Expand Down
Loading