From 19a59d37491a9059e96f5b3695efa7961bfc4a1f Mon Sep 17 00:00:00 2001 From: co63oc Date: Thu, 4 Apr 2024 16:57:57 +0800 Subject: [PATCH] Fix, test=document_fix --- .../fluid/framework/details/build_strategy.h | 4 +- .../new_executor/interpreter/data_transfer.cc | 2 +- paddle/fluid/framework/op_kernel_type.h | 2 +- paddle/fluid/framework/operator.cc | 28 ++++++------- paddle/fluid/framework/operator.h | 2 +- paddle/fluid/imperative/prepared_operator.cc | 8 ++-- paddle/fluid/inference/api/analysis_config.cc | 12 +++--- .../fluid/inference/api/analysis_predictor.cc | 4 +- .../fluid/inference/api/analysis_predictor.h | 2 +- paddle/fluid/inference/api/api_impl.cc | 2 +- .../inference/api/paddle_analysis_config.h | 42 +++++++++---------- .../api/paddle_mkldnn_quantizer_config.h | 2 +- .../fluid/inference/api/paddle_pass_builder.h | 30 ++++++------- paddle/fluid/inference/capi_exp/pd_config.h | 28 ++++++------- .../com_baidu_paddle_inference_Config.cpp | 2 +- .../com/baidu/paddle/inference/Config.java | 2 +- paddle/fluid/inference/goapi/config.go | 22 +++++----- paddle/fluid/platform/mkldnn_helper.h | 2 +- paddle/phi/api/lib/backend_set.h | 2 +- paddle/phi/backends/onednn/onednn_context.cc | 6 +-- paddle/phi/backends/onednn/onednn_context.h | 10 ++--- paddle/phi/core/dense_tensor.h | 2 +- paddle/phi/core/tensor_meta.h | 2 +- paddle/phi/infermeta/fusion.cc | 2 +- paddle/phi/kernels/cpu/log_softmax_kernel.cc | 2 +- paddle/phi/kernels/funcs/jit/README.en.md | 2 +- .../phi/kernels/onednn/batch_norm_kernel.cc | 2 +- paddle/phi/kernels/onednn/conv_grad_kernel.cc | 2 +- paddle/phi/kernels/transfer_layout_kernel.cc | 6 +-- test/cpp/inference/api/analyzer_dam_tester.cc | 4 +- .../analyzer_lexical_analysis_gru_tester.cc | 2 +- test/cpp/inference/infer_ut/run.sh | 2 +- .../inference/infer_ut/test_ppyolo_mbv3.cc | 2 +- test/cpp/inference/test_helper.h | 2 +- test/ir/inference/inference_pass_test.py | 2 +- test/ir/inference/quant_dequant_test.py | 2 +- ...st_mkldnn_conv_affine_channel_fuse_pass.py | 2 +- test/legacy_test/op_test.py | 2 +- test/legacy_test/test_conv2d_op.py | 16 +++---- test/legacy_test/test_conv2d_transpose_op.py | 2 +- test/legacy_test/test_conv3d_op.py | 8 ++-- test/legacy_test/test_elementwise_add_op.py | 10 ++--- test/legacy_test/test_elementwise_mul_op.py | 16 +++---- test/legacy_test/test_pool2d_op.py | 12 +++--- test/legacy_test/test_softmax_op.py | 8 ++-- test/mkldnn/test_concat_int8_mkldnn_op.py | 2 +- test/mkldnn/test_conv2d_int8_mkldnn_op.py | 2 +- test/mkldnn/test_conv2d_mkldnn_op.py | 4 +- .../mkldnn/test_conv2d_transpose_mkldnn_op.py | 2 +- test/mkldnn/test_dequantize_mkldnn_op.py | 2 +- test/mkldnn/test_elementwise_add_mkldnn_op.py | 2 +- test/mkldnn/test_elementwise_mul_onednn_op.py | 2 +- test/mkldnn/test_fc_mkldnn_op.py | 2 +- test/mkldnn/test_lrn_mkldnn_op.py | 4 +- test/mkldnn/test_mul_int8_mkldnn_op.py | 2 +- test/mkldnn/test_pool2d_int8_mkldnn_op.py | 2 +- test/mkldnn/test_quantize_mkldnn_op.py | 2 +- test/mkldnn/test_requantize_mkldnn_op.py | 2 +- test/mkldnn/test_softmax_mkldnn_op.py | 4 +- test/mkldnn/test_sum_mkldnn_op.py | 4 +- test/mkldnn/test_transpose_int8_mkldnn_op.py | 2 +- test/mkldnn/test_transpose_mkldnn_op.py | 4 +- test/xpu/test_conv2d_op_xpu.py | 8 ++-- test/xpu/test_conv3d_op_xpu.py | 6 +-- tools/remove_grad_op_and_kernel.py | 2 +- 65 files changed, 193 insertions(+), 193 deletions(-) diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h index e954fd6a7a348b..c0c7e6765b4dc6 100644 --- a/paddle/fluid/framework/details/build_strategy.h +++ b/paddle/fluid/framework/details/build_strategy.h @@ -141,8 +141,8 @@ struct BuildStrategy { // Fuse ResUnit bool fuse_resunit_{false}; // mkldnn_enabled_op_types specify the operator type list to - // use MKLDNN acceleration. It is null in default, means - // that all the operators supported by MKLDNN will be + // use OneDNN acceleration. It is null in default, means + // that all the operators supported by OneDNN will be // accelerated. And it should not be set when // FLAGS_use_mkldnn=false std::unordered_set mkldnn_enabled_op_types_; diff --git a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc index 3bc5893a162b33..00b5410247ddcc 100644 --- a/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc +++ b/paddle/fluid/framework/new_executor/interpreter/data_transfer.cc @@ -702,7 +702,7 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, &arguments); } #ifdef PADDLE_WITH_DNNL - // For input that is Extra, only MKLDNN will use Extra Inputs + // For input that is Extra, only OneDNN will use Extra Inputs auto& extra_input_names = paddle::operators::ExtraInfoUtils::Instance().GetExtraInputNamesMap( op_with_kernel->Type()); diff --git a/paddle/fluid/framework/op_kernel_type.h b/paddle/fluid/framework/op_kernel_type.h index ce0a138eb1a6a5..4839592aa43b7a 100644 --- a/paddle/fluid/framework/op_kernel_type.h +++ b/paddle/fluid/framework/op_kernel_type.h @@ -107,7 +107,7 @@ inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) { bool ret = (l != DataLayout::kAnyLayout && r != DataLayout::kAnyLayout && l != r); #ifdef PADDLE_WITH_DNNL - // Layout transform needed for either non-MKLDNN to MKLDNN or vice versa + // Layout transform needed for either non-MKLDNN to OneDNN or vice versa ret |= (l != DataLayout::ONEDNN && r == DataLayout::ONEDNN); ret |= (l == DataLayout::ONEDNN && r != DataLayout::ONEDNN); #endif diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index fe10a16375f340..32093f3383f026 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -504,7 +504,7 @@ void RuntimeInferShapeContext::ShareLoD(const std::string& in, // Workaround: // Skip set_layout() when input layout is kMKLDNN // This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN - // OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called + // OPKernel. In all OneDNN OPkernel, set_layout(kMKLDNN) should be called // in Compute() if (in_tensor.layout() != DataLayout::ONEDNN) #endif @@ -1571,12 +1571,12 @@ bool OperatorWithKernel::SupportsKernelType( } #endif -// NOTE(jiahongyu): If MKLDNN can be used, the function SupportsKernelType needs -// to check whether current op supports MKLDNN kernel. There are three +// NOTE(jiahongyu): If OneDNN can be used, the function SupportsKernelType needs +// to check whether current op supports OneDNN kernel. There are three // statements in if condition: -// 1. Whether mkldnn kernel fallbacks to plain kernel; +// 1. Whether onednn kernel fallbacks to plain kernel; // 2. Whether this op has specific implementation; -// 3. Whether mkldnn kernel can be used. +// 3. Whether onednn kernel can be used. #ifdef PADDLE_WITH_DNNL if (!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) && this->CanMKLDNNBeUsed(exe_ctx, kernel_type.data_type_)) { @@ -1771,7 +1771,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, // TODO(chenweihang): Now we are still reusing a lot of the original fluid // implementation, this is a gradual replacement process // TODO(chenweihang): in the first phase of project, we only support CPU, CUDA - // and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second + // and RCOM backend, the XPU, NPU and OneDNN will be supported in the second // phase phi::KernelKey phi_kernel_key; std::string phi_kernel_name; @@ -1846,13 +1846,13 @@ void OperatorWithKernel::RunImpl(const Scope& scope, } } else { phi_kernel_name = kernel_signature_->name; -// NOTE(jiahongyu): The registered MKLDNN kernel have library_type = +// NOTE(jiahongyu): The registered OneDNN kernel have library_type = // LibraryType::kMKLDNN and data_layout_ = DataLayout::ONEDNN. But the default // values are kPlain, so we need to modify the library_type and data_layout_ // here. There are three statements in if condition: -// 1. Whether mkldnn kernel fallbacks to plain kernel; +// 1. Whether onednn kernel fallbacks to plain kernel; // 2. Whether this op has specific implementation; -// 3. Whether mkldnn kernel can be used. +// 3. Whether onednn kernel can be used. #ifdef PADDLE_WITH_DNNL if (!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) && @@ -2121,7 +2121,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, } if (FLAGS_enable_unused_var_check) { - // skip op that uses mkldnn because it has different memory reuse strategy. + // skip op that uses onednn because it has different memory reuse strategy. // use attr here because some GradMakers (like ActivationGradOpMaker) add // input when use_mkldnn=true; if (!(HasAttr("use_mkldnn") && Attr("use_mkldnn"))) { @@ -2181,12 +2181,12 @@ OpKernelType OperatorWithKernel::InnerGetExpectedKernelType( framework::TransPhiKernelKeyToOpKernelType(phi_kernel_key); // NOTE(jiahongyu): PADDLE_WITH_DNNL codes are moved outside function -// GetExpectedKernelType, so that if MKLDNN can be used, the library_type_ and +// GetExpectedKernelType, so that if OneDNN can be used, the library_type_ and // data_layout_ of expected_kernel_key need to be adjusted. There are three // statements in if condition: -// 1. Whether mkldnn kernel fallbacks to plain kernel; +// 1. Whether onednn kernel fallbacks to plain kernel; // 2. Whether this op has specific implementation; -// 3. Whether mkldnn kernel can be used. +// 3. Whether onednn kernel can be used. #ifdef PADDLE_WITH_DNNL if (!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) && this->CanMKLDNNBeUsed(ctx, expected_kernel_key.data_type_)) { @@ -2815,7 +2815,7 @@ Scope* OperatorWithKernel::PrepareData( prepare_input_data(input_name, &ins_vector, &in_def, should_skip_input); } #ifdef PADDLE_WITH_DNNL - // For input that is Extra, only MKLDNN will use Extra Inputs + // For input that is Extra, only OneDNN will use Extra Inputs auto& extra_input_names = paddle::operators::ExtraInfoUtils::Instance().GetExtraInputNamesMap( Type()); diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 3ad9ec6c9d6983..dc025998cc0991 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -921,7 +921,7 @@ class OperatorWithKernel : public OperatorBase { mutable std::mutex cache_update_mutex_; mutable bool enable_cache_transfer_scope_ = false; // NOTE(jiahongyu): Whether fallback to plain kernel after calling - // GetExpectedKernelType, use this bool flag to solve mkldnn and cudnn hard + // GetExpectedKernelType, use this bool flag to solve onednn and cudnn hard // code mutable bool dnn_fallback_ = false; // NOTE(chenweihang): Similar op members are used to adapt to diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index a60c81a4c22d9e..79ccc4465a0ca9 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -166,7 +166,7 @@ PreparedOp PrepareImpl( auto* dev_ctx = pool.Get(place); #ifdef PADDLE_WITH_DNNL - // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and + // OneDNN variant of code reads attributes in some of GetKernelTypeForVar and // GetKernelType functions, so we need to copy the attributes there. // Const qualifier of Attrs had to be discarded to overwrite it. if (FLAGS_use_mkldnn) { @@ -190,13 +190,13 @@ PreparedOp PrepareImpl( phi::KernelSignature kernel_signature; std::string phi_kernel_name; -// NOTE(jiahongyu): The registered MKLDNN kernel have library_type = +// NOTE(jiahongyu): The registered OneDNN kernel have library_type = // LibraryType::kMKLDNN and data_layout_ = DataLayout::ONEDNN. But the default // values are kPlain, so we need to modify the library_type and data_layout_ // here. There are three statements in if condition: -// 1. Whether mkldnn kernel fallbacks to plain kernel; +// 1. Whether onednn kernel fallbacks to plain kernel; // 2. Whether this op has specific implementation; -// 3. Whether mkldnn kernel can be used. +// 3. Whether onednn kernel can be used. #ifdef PADDLE_WITH_DNNL if (!op.DnnFallback() && !paddle::platform::in_mkldnn_white_list(op.Type()) && op.CanMKLDNNBeUsed(dygraph_exe_ctx, expected_kernel_key.dtype())) { diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index efe7b83f7df168..b8570fa05e7c47 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -505,7 +505,7 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { CP_MEMBER(dlnne_precision_mode_); CP_MEMBER(dlnne_disable_nodes_by_outputs_); CP_MEMBER(dlnne_input_shape_dict_); - // MKLDNN related. + // OneDNN related. CP_MEMBER(use_mkldnn_); CP_MEMBER(mkldnn_enabled_op_types_); CP_MEMBER(mkldnn_cache_capacity_); @@ -991,18 +991,18 @@ void AnalysisConfig::Update() { #ifdef PADDLE_WITH_DNNL // Since EnableMKLDNN is default, the pass_builder has created in the first // time. - // Case1: User manually disable mkldnn after pass_builder + // Case1: User manually disable onednn after pass_builder // create.(config.disable_mkldnn()) // Case2: User device is gpu/ipu/xpu, use // EnableXpu(), EnableCUDNN(), PassStrategy has been reset in the above code // block // Case3: pass_builder_ has been created and belongs to - // GpuPassStrategy(or IpuPassStrategy), neither enable mkldnn and - // disable mkldnn will be executed + // GpuPassStrategy(or IpuPassStrategy), neither enable onednn and + // disable onednn will be executed if ((!use_gpu() && !use_xpu() && !use_ipu() && !use_mkldnn_) || (use_mkldnn_ && !phi::backends::cpu::MayIUse(phi::backends::cpu::cpu_isa_t::avx2))) { - // User manually disable mkldnn or disable when not support AVX2 + // User manually disable onednn or disable when not support AVX2 use_mkldnn_ = false; pass_builder()->DisableMKLDNN(); } @@ -1054,7 +1054,7 @@ void AnalysisConfig::Update() { if (!use_gpu() && !use_xpu() && !use_ipu()) { if (use_mkldnn_ && enable_ir_optim_) { #ifdef PADDLE_WITH_DNNL - // default enable mkldnn when device is cpu and enable_ir_optim + // default enable onednn when device is cpu and enable_ir_optim pass_builder()->EnableMKLDNN(); #endif } diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index a0a61c034d831b..5c82e249db42d7 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -408,7 +408,7 @@ bool AnalysisPredictor::Init( root_predictor_id_ = predictor_id_; } - // no matter with or without MKLDNN + // no matter with or without OneDNN paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads()); // Use Optimized model to inference @@ -781,7 +781,7 @@ bool AnalysisPredictor::PrepareProgram( executor_->CreateVariables(*inference_program_, 0, true, sub_scope_); // if enable_ir_optim_ is false, - // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will + // the analysis pass(op fuse, graph analysis, trt subgraph, onednn etc) will // not be executed. model_precision_ = paddle::inference::GetModelPrecision(*inference_program_); diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index fe494cab93a90b..d44ad5cec1a905 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -321,7 +321,7 @@ class AnalysisPredictor : public PaddlePredictor { void RegisterInputHook(const InputTensorHookFunc &hookfunc) override; /// - /// \brief Initialize mkldnn quantizer and execute mkldnn quantization pass + /// \brief Initialize onednn quantizer and execute onednn quantization pass /// /// \return Whether the function executed successfully /// diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 1ae582feb4acfa..9ae284402f196e 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -71,7 +71,7 @@ bool NativePaddlePredictor::Init( platform::EnableProfiler(tracking_device); } - // no matter with or without MKLDNN + // no matter with or without OneDNN paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads()); if (config_.use_gpu) { diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index dcf17dc4399c23..34db3504d35453 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -970,19 +970,19 @@ struct PD_INFER_DECL AnalysisConfig { void SwitchIrDebug(int x = true, const std::vector& passes = {}); /// - /// \brief Turn on MKLDNN. + /// \brief Turn on OneDNN. /// /// void EnableMKLDNN(); /// - /// \brief Turn down MKLDNN. + /// \brief Turn down OneDNN. /// /// void DisableMKLDNN(); /// - /// \brief Set the cache capacity of different input shapes for MKLDNN. + /// \brief Set the cache capacity of different input shapes for OneDNN. /// Default value 0 means not caching any shape. /// Please see MKL-DNN Data Caching Design Document: /// https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/mkldnn/caching/caching.md @@ -991,9 +991,9 @@ struct PD_INFER_DECL AnalysisConfig { /// void SetMkldnnCacheCapacity(int capacity); /// - /// \brief A boolean state telling whether to use the MKLDNN. + /// \brief A boolean state telling whether to use the OneDNN. /// - /// \return bool Whether to use the MKLDNN. + /// \return bool Whether to use the OneDNN. /// bool mkldnn_enabled() const { return use_mkldnn_; } @@ -1021,7 +1021,7 @@ struct PD_INFER_DECL AnalysisConfig { /// NativeConfig ToNativeConfig() const; /// - /// \brief Specify the operator type list to use MKLDNN acceleration. + /// \brief Specify the operator type list to use OneDNN acceleration. /// /// \param op_list The operator type list. /// @@ -1030,47 +1030,47 @@ struct PD_INFER_DECL AnalysisConfig { } /// - /// \brief Turn on MKLDNN quantization. + /// \brief Turn on OneDNN quantization. /// /// void EnableMkldnnQuantizer(); /// - /// \brief Turn on MKLDNN int8. + /// \brief Turn on OneDNN int8. /// /// \param op_list The operator type list. /// void EnableMkldnnInt8(const std::unordered_set& op_list = {}); /// - /// \brief A boolean state telling whether to use the MKLDNN Int8. + /// \brief A boolean state telling whether to use the OneDNN Int8. /// - /// \return bool Whether to use the MKLDNN Int8. + /// \return bool Whether to use the OneDNN Int8. /// bool mkldnn_int8_enabled() const { return use_mkldnn_int8_; } /// - /// \brief Turn on MKLDNN bfloat16. + /// \brief Turn on OneDNN bfloat16. /// /// void EnableMkldnnBfloat16(); /// - /// \brief Turn off MKLDNN fc passes. + /// \brief Turn off OneDNN fc passes. /// void DisableMkldnnFcPasses(); /// - /// \brief A boolean state telling whether to disable the MKLDNN Fc passes. + /// \brief A boolean state telling whether to disable the OneDNN Fc passes. /// - /// \return bool Whether to disable the MKLDNN Fc passes. + /// \return bool Whether to disable the OneDNN Fc passes. /// bool mkldnn_fc_passes_disabled() const { return disable_mkldnn_fc_passes_; } /// - /// \brief A boolean state telling whether to use the MKLDNN Bfloat16. + /// \brief A boolean state telling whether to use the OneDNN Bfloat16. /// - /// \return bool Whether to use the MKLDNN Bfloat16. + /// \return bool Whether to use the OneDNN Bfloat16. /// bool mkldnn_bfloat16_enabled() const { return use_mkldnn_bfloat16_; } @@ -1091,16 +1091,16 @@ struct PD_INFER_DECL AnalysisConfig { bool thread_local_stream_enabled() const { return thread_local_stream_; } /// - /// \brief A boolean state telling whether the MKLDNN quantization is enabled. + /// \brief A boolean state telling whether the OneDNN quantization is enabled. /// - /// \return bool Whether the MKLDNN quantization is enabled. + /// \return bool Whether the OneDNN quantization is enabled. /// bool mkldnn_quantizer_enabled() const { return use_mkldnn_quantizer_; } /// - /// \brief Get MKLDNN quantizer config. + /// \brief Get OneDNN quantizer config. /// - /// \return MkldnnQuantizerConfig* MKLDNN quantizer config. + /// \return MkldnnQuantizerConfig* OneDNN quantizer config. /// MkldnnQuantizerConfig* mkldnn_quantizer_config() const; @@ -1427,7 +1427,7 @@ struct PD_INFER_DECL AnalysisConfig { // NNAdapter related LiteNNAdapterConfig nnadapter_config_; - // mkldnn related. + // onednn related. int mkldnn_cache_capacity_{10}; bool use_mkldnn_quantizer_{false}; std::shared_ptr mkldnn_quantizer_config_; diff --git a/paddle/fluid/inference/api/paddle_mkldnn_quantizer_config.h b/paddle/fluid/inference/api/paddle_mkldnn_quantizer_config.h index 1208c29c79a9cc..c44f7a3e0d0495 100644 --- a/paddle/fluid/inference/api/paddle_mkldnn_quantizer_config.h +++ b/paddle/fluid/inference/api/paddle_mkldnn_quantizer_config.h @@ -53,7 +53,7 @@ enum class ScaleAlgo { /// /// \class MkldnnQuantizerConfig /// -/// \brief Config for mkldnn quantize. +/// \brief Config for onednn quantize. /// /// The MkldnnQuantizerConfig is used to configure Mkldnn's quantization /// parameters, including scale algorithm, warmup data, warmup batch size, diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index 79ef68c853cfb7..9c8ada8699829f 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -139,24 +139,24 @@ class PD_INFER_DECL PassStrategy : public PaddlePassBuilder { /// \brief Enable the use of cuDNN kernel. virtual void EnableCUDNN() {} - /// \brief Enable the use of MKLDNN. - /// The MKLDNN control exists in both CPU and GPU mode, because there can + /// \brief Enable the use of OneDNN. + /// The OneDNN control exists in both CPU and GPU mode, because there can /// still be some CPU kernels running in GPU mode. virtual void EnableMKLDNN() {} - /// \brief Disable the use of MKLDNN. + /// \brief Disable the use of OneDNN. virtual void DisableMKLDNN() {} - /// \brief Enable MKLDNN quantize optimization. + /// \brief Enable OneDNN quantize optimization. virtual void EnableMkldnnQuantizer() {} - /// \brief Enable MKLDNN bfloat16. + /// \brief Enable OneDNN bfloat16. virtual void EnableMkldnnBfloat16() {} - /// \brief Enable MKLDNN int8. + /// \brief Enable OneDNN int8. virtual void EnableMkldnnInt8() {} - /// \brief Disable MKLDNN fc passes. + /// \brief Disable OneDNN fc passes. virtual void DisableMkldnnFcPasses() {} /// \brief Check if we are using gpu. @@ -214,26 +214,26 @@ class PD_INFER_DECL CpuPassStrategy : public PassStrategy { /// \brief Enable the use of cuDNN kernel. void EnableCUDNN() override; - /// \brief Enable the use of MKLDNN. + /// \brief Enable the use of OneDNN. void EnableMKLDNN() override; - /// \brief Disable the use of MKLDNN. + /// \brief Disable the use of OneDNN. void DisableMKLDNN() override; - /// \brief Enable MKLDNN quantize optimization. + /// \brief Enable OneDNN quantize optimization. void EnableMkldnnQuantizer() override; - /// \brief Enable MKLDNN bfloat16. + /// \brief Enable OneDNN bfloat16. void EnableMkldnnBfloat16() override; - /// \brief Enable MKLDNN int8. + /// \brief Enable OneDNN int8. void EnableMkldnnInt8() override; - /// \brief Disable MKLDNN fc passes. + /// \brief Disable OneDNN fc passes. void DisableMkldnnFcPasses() override; protected: - /// \brief Erase MKLDNN fc passes. + /// \brief Erase OneDNN fc passes. void EraseFcMkldnnPasses(); /// \cond Protected @@ -276,7 +276,7 @@ class PD_INFER_DECL GpuPassStrategy : public PassStrategy { /// \brief Not supported in GPU mode yet. void EnableMkldnnInt8() override; - /// \brief Disable MKLDNN fc passes. + /// \brief Disable OneDNN fc passes. void DisableMkldnnFcPasses() override; /// \brief Default destructor. diff --git a/paddle/fluid/inference/capi_exp/pd_config.h b/paddle/fluid/inference/capi_exp/pd_config.h index 427e9b95ac499b..f1bfe828cbcf2a 100644 --- a/paddle/fluid/inference/capi_exp/pd_config.h +++ b/paddle/fluid/inference/capi_exp/pd_config.h @@ -526,14 +526,14 @@ PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigLiteEngineEnabled( PADDLE_CAPI_EXPORT extern void PD_ConfigSwitchIrDebug( __pd_keep PD_Config* pd_config, PD_Bool x); /// -/// \brief Turn on MKLDNN. +/// \brief Turn on OneDNN. /// /// \param[in] pd_config config /// PADDLE_CAPI_EXPORT extern void PD_ConfigEnableMKLDNN( __pd_keep PD_Config* pd_config); /// -/// \brief Set the cache capacity of different input shapes for MKLDNN. +/// \brief Set the cache capacity of different input shapes for OneDNN. /// Default value 0 means not caching any shape. /// Please see MKL-DNN Data Caching Design Document: /// https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/mkldnn/caching/caching.md @@ -544,10 +544,10 @@ PADDLE_CAPI_EXPORT extern void PD_ConfigEnableMKLDNN( PADDLE_CAPI_EXPORT extern void PD_ConfigSetMkldnnCacheCapacity( __pd_keep PD_Config* pd_config, int32_t capacity); /// -/// \brief A boolean state telling whether to use the MKLDNN. +/// \brief A boolean state telling whether to use the OneDNN. /// /// \param[in] pd_config config -/// \return Whether to use the MKLDNN. +/// \return Whether to use the OneDNN. /// PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigMkldnnEnabled( __pd_keep PD_Config* pd_config); @@ -570,7 +570,7 @@ PADDLE_CAPI_EXPORT extern void PD_ConfigSetCpuMathLibraryNumThreads( PADDLE_CAPI_EXPORT extern int32_t PD_ConfigGetCpuMathLibraryNumThreads( __pd_keep PD_Config* pd_config); /// -/// \brief Specify the operator type list to use MKLDNN acceleration. +/// \brief Specify the operator type list to use OneDNN acceleration. /// /// \param[in] pd_config config /// \param[in] ops_num The number of operator type list. @@ -579,32 +579,32 @@ PADDLE_CAPI_EXPORT extern int32_t PD_ConfigGetCpuMathLibraryNumThreads( PADDLE_CAPI_EXPORT extern void PD_ConfigSetMkldnnOp( __pd_keep PD_Config* pd_config, size_t ops_num, const char** op_list); /// -/// \brief Turn on MKLDNN quantization. +/// \brief Turn on OneDNN quantization. /// /// \param[in] pd_config config /// PADDLE_CAPI_EXPORT extern void PD_ConfigEnableMkldnnQuantizer( __pd_keep PD_Config* pd_config); /// -/// \brief A boolean state telling whether the MKLDNN quantization is enabled. +/// \brief A boolean state telling whether the OneDNN quantization is enabled. /// /// \param[in] pd_config config -/// \return Whether the MKLDNN quantization is enabled. +/// \return Whether the OneDNN quantization is enabled. /// PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigMkldnnQuantizerEnabled( __pd_keep PD_Config* pd_config); /// -/// \brief Turn on MKLDNN bfloat16. +/// \brief Turn on OneDNN bfloat16. /// /// \param[in] pd_config config /// PADDLE_CAPI_EXPORT extern void PD_ConfigEnableMkldnnBfloat16( __pd_keep PD_Config* pd_config); /// -/// \brief A boolean state telling whether to use the MKLDNN Bfloat16. +/// \brief A boolean state telling whether to use the OneDNN Bfloat16. /// /// \param[in] pd_config config -/// \return Whether to use the MKLDNN Bfloat16. +/// \return Whether to use the OneDNN Bfloat16. /// PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigMkldnnBfloat16Enabled( __pd_keep PD_Config* pd_config); @@ -617,17 +617,17 @@ PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigMkldnnBfloat16Enabled( PADDLE_CAPI_EXPORT extern void PD_ConfigSetBfloat16Op( __pd_keep PD_Config* pd_config, size_t ops_num, const char** op_list); /// -/// \brief Turn on MKLDNN int8. +/// \brief Turn on OneDNN int8. /// /// \param[in] pd_config config /// PADDLE_CAPI_EXPORT extern void PD_ConfigEnableMkldnnInt8( __pd_keep PD_Config* pd_config); /// -/// \brief A boolean state telling whether to use the MKLDNN int8. +/// \brief A boolean state telling whether to use the OneDNN int8. /// /// \param[in] pd_config config -/// \return Whether to use the MKLDNN int8. +/// \return Whether to use the OneDNN int8. /// PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigMkldnnInt8Enabled( __pd_keep PD_Config* pd_config); diff --git a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.cpp b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.cpp index 0d585f938be8c8..b4cf4a09531692 100644 --- a/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.cpp +++ b/paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.cpp @@ -122,7 +122,7 @@ Java_com_baidu_paddle_inference_Config_cpuMathLibraryNumThreads( return mathThreadsNum; } -// 5. MKLDNN settings +// 5. OneDNN settings JNIEXPORT void JNICALL Java_com_baidu_paddle_inference_Config_enableMKLDNN( JNIEnv* env, jobject obj, jlong cppPaddleConfigPointer) { diff --git a/paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Config.java b/paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Config.java index a312cc73fde223..e9bef0d271f05f 100644 --- a/paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Config.java +++ b/paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Config.java @@ -208,7 +208,7 @@ public void resetCppPaddleConfigPointer() { private native int cpuMathLibraryNumThreads(long cppPaddleConfigPointer); - // 5. MKLDNN settings + // 5. OneDNN settings private native void enableMKLDNN(long cppPaddleConfigPointer); diff --git a/paddle/fluid/inference/goapi/config.go b/paddle/fluid/inference/goapi/config.go index 9d0a1e5864418e..c2e2b410e40613 100644 --- a/paddle/fluid/inference/goapi/config.go +++ b/paddle/fluid/inference/goapi/config.go @@ -554,14 +554,14 @@ func (config *Config) SwitchIrDebug(x bool) { } /// -/// \brief Turn on MKLDNN. +/// \brief Turn on OneDNN. /// func (config *Config) EnableMKLDNN() { C.PD_ConfigEnableMKLDNN(config.c) } /// -/// \brief Set the cache capacity of different input shapes for MKLDNN. +/// \brief Set the cache capacity of different input shapes for OneDNN. /// Default value 0 means not caching any shape. /// Please see MKL-DNN Data Caching Design Document: /// https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/mkldnn/caching/caching.md @@ -573,9 +573,9 @@ func (config *Config) SetMkldnnCacheCapacity(capacity int32) { } /// -/// \brief A boolean state telling whether to use the MKLDNN. +/// \brief A boolean state telling whether to use the OneDNN. /// -/// \return bool Whether to use the MKLDNN. +/// \return bool Whether to use the OneDNN. /// func (config *Config) MkldnnEnabled() bool { return cvtPDBoolToGo(C.PD_ConfigMkldnnEnabled(config.c)) @@ -609,7 +609,7 @@ func (config *Config) CpuMathLibraryNumThreads() int32 { // NativeConfig ToNativeConfig() const; /// -/// \brief Specify the operator type list to use MKLDNN acceleration. +/// \brief Specify the operator type list to use OneDNN acceleration. /// /// \param opList The operator type list. /// @@ -627,23 +627,23 @@ func (config *Config) SetMKLDNNOp(opList []string) { } /// -/// \brief Turn on MKLDNN quantization. +/// \brief Turn on OneDNN quantization. /// func (config *Config) EnableMkldnnQuantizer() { C.PD_ConfigEnableMkldnnQuantizer(config.c) } /// -/// \brief Turn on MKLDNN bfloat16. +/// \brief Turn on OneDNN bfloat16. /// func (config *Config) EnableMkldnnBfloat16() { C.PD_ConfigEnableMkldnnBfloat16(config.c) } /// -/// \brief A boolean state telling whether to use the MKLDNN Bfloat16. +/// \brief A boolean state telling whether to use the OneDNN Bfloat16. /// -/// \return bool Whether to use the MKLDNN Bfloat16. +/// \return bool Whether to use the OneDNN Bfloat16. /// func (config *Config) MkldnnBfloat16Enabled() bool { return cvtPDBoolToGo(C.PD_ConfigMkldnnBfloat16Enabled(config.c)) @@ -677,9 +677,9 @@ func (config *Config) ThreadLocalStreamEnabled() bool { } /// -/// \brief A boolean state telling whether the MKLDNN quantization is enabled. +/// \brief A boolean state telling whether the OneDNN quantization is enabled. /// -/// \return bool Whether the MKLDNN quantization is enabled. +/// \return bool Whether the OneDNN quantization is enabled. /// func (config *Config) MkldnnQuantizerEnabled() bool { return cvtPDBoolToGo(C.PD_ConfigMkldnnQuantizerEnabled(config.c)) diff --git a/paddle/fluid/platform/mkldnn_helper.h b/paddle/fluid/platform/mkldnn_helper.h index 6132aa9292e56e..145f42f669d9d0 100644 --- a/paddle/fluid/platform/mkldnn_helper.h +++ b/paddle/fluid/platform/mkldnn_helper.h @@ -52,7 +52,7 @@ inline void DontClearMKLDNNCache(const platform::Place& place) { } } -// If MKLDNN build and CPU place then register suffix in DeviceContext +// If OneDNN build and CPU place then register suffix in DeviceContext inline void AttachPointerHashToMKLDNNKey(void* ptr, const platform::Place& place) { if (platform::is_cpu_place(place)) { diff --git a/paddle/phi/api/lib/backend_set.h b/paddle/phi/api/lib/backend_set.h index af4de2580f5784..13077fb9167ad4 100644 --- a/paddle/phi/api/lib/backend_set.h +++ b/paddle/phi/api/lib/backend_set.h @@ -26,7 +26,7 @@ namespace experimental { * and the higher backend bit has a higher priority. * * A Tensor may belong to multiple backends at the same time, such CPU and - * MKLDNN. Only one backend value cannot + * OneDNN. Only one backend value cannot */ class BackendSet final { public: diff --git a/paddle/phi/backends/onednn/onednn_context.cc b/paddle/phi/backends/onednn/onednn_context.cc index b7789f29740f06..1a27e83af50fb8 100644 --- a/paddle/phi/backends/onednn/onednn_context.cc +++ b/paddle/phi/backends/onednn/onednn_context.cc @@ -189,7 +189,7 @@ struct OneDNNContext::Impl { std::lock_guard lock(*p_mutex_); - // Find ShapeBlob for current mkldnn session id. + // Find ShapeBlob for current onednn session id. auto map_it = pMap->find(sid); if (map_it == pMap->end()) { @@ -259,7 +259,7 @@ struct OneDNNContext::Impl { std::lock_guard lock(*p_mutex_); - // Find ShapeBlob for current mkldnn session id firstly + // Find ShapeBlob for current onednn session id firstly auto map_it = pMap->find(sid); // (jczaja): After first iteration of model's execution we // should have all elements cached (mostly) so failures are unlikely (less @@ -366,7 +366,7 @@ struct OneDNNContext::Impl { unsigned int block_next_cache_clearing_ = 0; // Holds some attributes only used by the onednn kernel calculation - // Since original mkldnn op kernel directly adds the operations that require + // Since original onednn op kernel directly adds the operations that require // fusion to the native kernel operations, and uses the attribute `fuse_xxx` // to control, for onednn, there will be some attributes that seem to be // independent of the device are also saved here. diff --git a/paddle/phi/backends/onednn/onednn_context.h b/paddle/phi/backends/onednn/onednn_context.h index 499be346500988..0e4654cb50a77e 100644 --- a/paddle/phi/backends/onednn/onednn_context.h +++ b/paddle/phi/backends/onednn/onednn_context.h @@ -28,7 +28,7 @@ namespace phi { using TensorNameMap = std::map>; class OneDNNContextThreadLocals { - // default mkldnn session id + // default onednn session id typedef OneDNNContextThreadLocals self; struct Body { @@ -38,7 +38,7 @@ class OneDNNContextThreadLocals { // - For fixed-shape, it's a null string in default. // - For dynamic-shape, it's user specific. std::string cur_input_shape_str; - // the cache capacity of different input shapes for MKLDNN. + // the cache capacity of different input shapes for OneDNN. // Default 1 means fixed input shape, not dynamic shape. int cur_input_shape_cache_capacity; // Recently registered data_format. This is needed to @@ -73,9 +73,9 @@ class OneDNNContextThreadLocals { OneDNNContextThreadLocals(const OneDNNContextThreadLocals& c) = delete; public: - // default mkldnn session id + // default onednn session id static constexpr size_t kMKLDNNSessionID_Default = 0; - // mkldnn session id for cache clearing mode + // onednn session id for cache clearing mode static constexpr size_t kMKLDNNSessionID_CacheClearing = -1; TEST_API static Body& fetch(); }; @@ -89,7 +89,7 @@ class OneDNNContext : public CPUContext { template using umap_key_string_t = umap_value_smart_t; - // Following three maps are used to cache MKLDNN primitives. + // Following three maps are used to cache OneDNN primitives. // There relations are: // - BlobMap = Map // - ShapeBlob = Map diff --git a/paddle/phi/core/dense_tensor.h b/paddle/phi/core/dense_tensor.h index b78cec14832722..c2d804199d2c72 100644 --- a/paddle/phi/core/dense_tensor.h +++ b/paddle/phi/core/dense_tensor.h @@ -203,7 +203,7 @@ class TEST_API DenseTensor : public TensorBase, * * 1. Some hardware or third-party libraries add some additional storage * properties on top of the description of the basic DenseTensor, such as - * memory desc of MKLDNN, storage_format and storage_layout of NPU, + * memory desc of OneDNN, storage_format and storage_layout of NPU, * these members are necessary for optimal performance, but if the properties * of each device are added to the DenseTensor with different macro isolation, * the memory layout of the DenseTensor will become more fragmented. diff --git a/paddle/phi/core/tensor_meta.h b/paddle/phi/core/tensor_meta.h index f493e0249d7bf4..613ba5f1f7f1f7 100644 --- a/paddle/phi/core/tensor_meta.h +++ b/paddle/phi/core/tensor_meta.h @@ -75,7 +75,7 @@ struct TEST_API DenseTensorMeta { bool is_scalar{false}; /// \brief Determine whether using gpudnn speed-up library in the new dygraph. - /// It maybe also support MKLDNN library in the near future. + /// It maybe also support OneDNN library in the near future. bool use_gpudnn{true}; DDim dims; DataType dtype{DataType::UNDEFINED}; diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index b56e7fab0bfe67..50cd6b6a23f759 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -3032,7 +3032,7 @@ void FusedConv2dAddActInferMeta(const MetaTensor& input, MetaTensor* output, std::vector outputs, MetaConfig config) { - // TODO(liuyuanle): mkldnn seems only support nchw. + // TODO(liuyuanle): onednn seems only support nchw. const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); std::vector out_shape = ComputeOutputShape(input, filter, diff --git a/paddle/phi/kernels/cpu/log_softmax_kernel.cc b/paddle/phi/kernels/cpu/log_softmax_kernel.cc index a57ab908d24caa..26e894945284c8 100644 --- a/paddle/phi/kernels/cpu/log_softmax_kernel.cc +++ b/paddle/phi/kernels/cpu/log_softmax_kernel.cc @@ -122,7 +122,7 @@ void LogSoftmaxKernel(const Context& dev_ctx, } // namespace phi -// TODO(YuanRisheng): The layout of mkldnn kernel should be MKLDNN, we should +// TODO(YuanRisheng): The layout of onednn kernel should be OneDNN, we should // support specifying the exact layout when the kernel is registered PD_REGISTER_KERNEL( log_softmax, CPU, ALL_LAYOUT, phi::LogSoftmaxKernel, float, double) {} diff --git a/paddle/phi/kernels/funcs/jit/README.en.md b/paddle/phi/kernels/funcs/jit/README.en.md index 0e1958a5c14151..cf661d5468a6cf 100644 --- a/paddle/phi/kernels/funcs/jit/README.en.md +++ b/paddle/phi/kernels/funcs/jit/README.en.md @@ -100,4 +100,4 @@ Add more implementations of `your_key` for performance enhancement. 1. Add functions based on generated code in `gen`. It should be derived from `JitCode` and should have corresponding creator from `JitCodeCreator` which will be registered on the `your_key`. 2. If new attribute type is added, you should specialize `JitCodeKey` of this type. -3. Add more functions in `more`,you can use any third party you wish, like mkl, mkldnn or intrinsic code to reach the best performance. +3. Add more functions in `more`,you can use any third party you wish, like mkl, onednn or intrinsic code to reach the best performance. diff --git a/paddle/phi/kernels/onednn/batch_norm_kernel.cc b/paddle/phi/kernels/onednn/batch_norm_kernel.cc index 9925aed9932565..ede572b243a553 100644 --- a/paddle/phi/kernels/onednn/batch_norm_kernel.cc +++ b/paddle/phi/kernels/onednn/batch_norm_kernel.cc @@ -100,7 +100,7 @@ void BatchNormKernel(const Context &dev_ctx, if (!global_stats) { const unsigned int C = common::vectorize(mean.dims())[0]; - // mkldnn only compute stats for current batch + // onednn only compute stats for current batch // so we need compute momentum stats via Eigen lib EigenVectorArrayMap batch_mean_e(dev_ctx.template Alloc(saved_mean), C); diff --git a/paddle/phi/kernels/onednn/conv_grad_kernel.cc b/paddle/phi/kernels/onednn/conv_grad_kernel.cc index 8c7121740370e2..359e14c7f5a2a5 100644 --- a/paddle/phi/kernels/onednn/conv_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/conv_grad_kernel.cc @@ -86,7 +86,7 @@ void ConvGradKernel(const Context& dev_ctx, input_grad, unique_name); - // create mkldnn memory from input tensors (data/weights) + // create onednn memory from input tensors (data/weights) auto& astream = OneDNNContext::tls().get_stream(); if (filter_grad) { diff --git a/paddle/phi/kernels/transfer_layout_kernel.cc b/paddle/phi/kernels/transfer_layout_kernel.cc index 569be5ce9781fa..8bbd77beba4673 100644 --- a/paddle/phi/kernels/transfer_layout_kernel.cc +++ b/paddle/phi/kernels/transfer_layout_kernel.cc @@ -151,7 +151,7 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, } if (src_layout != DataLayout::ONEDNN && dst_layout == DataLayout::ONEDNN) { - // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel + // Case1 - transform from Non-MKLDNN OPKernel to OneDNN OPKernel // Just set layout/format. No real transform occur out->ShareDataWith(x); // For NHWC data we need reshape of tensors as MKL-DNN @@ -166,8 +166,8 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, out->set_mem_desc(out_mem_desc); } else if (src_layout == DataLayout::ONEDNN && dst_layout != DataLayout::ONEDNN) { - // Case2 - transform from MKLDNN OPKernel to Non-MKLDNN OPKernel - // Do transform via MKLDNN lib + // Case2 - transform from OneDNN OPKernel to Non-MKLDNN OPKernel + // Do transform via OneDNN lib funcs::TransDataLayoutFromOneDNN( src_layout, dst_layout, x, out, dev_ctx.GetPlace()); } else if (src_layout == DataLayout::ONEDNN && diff --git a/test/cpp/inference/api/analyzer_dam_tester.cc b/test/cpp/inference/api/analyzer_dam_tester.cc index 3770aac10e3716..f33e8b52bfaaf0 100644 --- a/test/cpp/inference/api/analyzer_dam_tester.cc +++ b/test/cpp/inference/api/analyzer_dam_tester.cc @@ -210,7 +210,7 @@ void profile(bool use_mkldnn = false) { if (use_mkldnn) { cfg.EnableMKLDNN(); - // Enable all the mkldnn supported ops except conv3d in dam + // Enable all the onednn supported ops except conv3d in dam std::unordered_set op_list = { "softmax", "elementwise_add", "relu", "fc"}; cfg.SetMKLDNNOp(op_list); @@ -270,7 +270,7 @@ void compare(bool use_mkldnn = false) { SetConfig(&cfg); if (use_mkldnn) { cfg.EnableMKLDNN(); - // Enable all the mkldnn supported ops except conv3d in dam + // Enable all the onednn supported ops except conv3d in dam std::unordered_set op_list = { "softmax", "elementwise_add", "relu"}; cfg.SetMKLDNNOp(op_list); diff --git a/test/cpp/inference/api/analyzer_lexical_analysis_gru_tester.cc b/test/cpp/inference/api/analyzer_lexical_analysis_gru_tester.cc index 2d0355d361b2d3..5a6e59248ec04a 100644 --- a/test/cpp/inference/api/analyzer_lexical_analysis_gru_tester.cc +++ b/test/cpp/inference/api/analyzer_lexical_analysis_gru_tester.cc @@ -274,7 +274,7 @@ TEST(Analyzer_lexical_test, Analyzer_lexical_analysis) { } else if (FLAGS_enable_int8_qat) { analysis_cfg.EnableMkldnnInt8(); } else { - // if fp32 => disable mkldnn fc passes + // if fp32 => disable onednn fc passes // when passes are enabled dnnl error occurs for iterations==0 analysis_cfg.DisableMkldnnFcPasses(); } diff --git a/test/cpp/inference/infer_ut/run.sh b/test/cpp/inference/infer_ut/run.sh index f55f7811606aa9..88cdb3bacc1e58 100755 --- a/test/cpp/inference/infer_ut/run.sh +++ b/test/cpp/inference/infer_ut/run.sh @@ -38,7 +38,7 @@ current_dir=`pwd` build_dir=${current_dir}/build log_dir=${current_dir}/log -# check mkldnn installation +# check onednn installation if [ $2 == ON ]; then # You can export yourself if move the install path MKL_LIB=${inference_install_dir}/third_party/install/mklml/lib diff --git a/test/cpp/inference/infer_ut/test_ppyolo_mbv3.cc b/test/cpp/inference/infer_ut/test_ppyolo_mbv3.cc index f0b128d97b5c76..90c3f1655b1a42 100644 --- a/test/cpp/inference/infer_ut/test_ppyolo_mbv3.cc +++ b/test/cpp/inference/infer_ut/test_ppyolo_mbv3.cc @@ -104,7 +104,7 @@ TEST(tensorrt_tester_ppyolo_mbv3, multi_thread4_trt_fp32_bz2) { } TEST(DISABLED_mkldnn_tester_ppyolo_mbv3, multi_thread4_mkl_bz2) { - // TODO(OliverLPH): mkldnn multi thread will fail + // TODO(OliverLPH): onednn multi thread will fail int thread_num = 4; // init input data auto input_data_map = PrepareInput(2); diff --git a/test/cpp/inference/test_helper.h b/test/cpp/inference/test_helper.h index cbef6a3f588096..3700ed0b602b1c 100644 --- a/test/cpp/inference/test_helper.h +++ b/test/cpp/inference/test_helper.h @@ -225,7 +225,7 @@ void TestInference(const std::string& dirname, fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; } - // 6. If export Flags_use_mkldnn=True, use mkldnn related ops. + // 6. If export Flags_use_mkldnn=True, use onednn related ops. if (FLAGS_use_mkldnn) executor.EnableMKLDNN(*inference_program); // 7. Run the inference program diff --git a/test/ir/inference/inference_pass_test.py b/test/ir/inference/inference_pass_test.py index 6b9f531201e5c5..03da6514a0674c 100644 --- a/test/ir/inference/inference_pass_test.py +++ b/test/ir/inference/inference_pass_test.py @@ -280,7 +280,7 @@ def check_output_with_option( err_msg='Output has diff between GPU and TensorRT. ', ) - # Check whether the mkldnn results and the CPU results are the same. + # Check whether the onednn results and the CPU results are the same. if (not use_gpu) and self.enable_mkldnn: mkldnn_outputs = self._get_inference_outs( self._get_analysis_config( diff --git a/test/ir/inference/quant_dequant_test.py b/test/ir/inference/quant_dequant_test.py index 9a3715176d1599..c176e802a525cb 100644 --- a/test/ir/inference/quant_dequant_test.py +++ b/test/ir/inference/quant_dequant_test.py @@ -387,7 +387,7 @@ def check_output_with_option( err_msg='Output has diff between GPU and TensorRT. ', ) - # Check whether the mkldnn results and the CPU results are the same. + # Check whether the onednn results and the CPU results are the same. if (not use_gpu) and self.enable_mkldnn: mkldnn_outputs = self._get_inference_outs( self._get_analysis_config( diff --git a/test/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py b/test/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py index a075a6a19021a1..e83b5494c294e2 100644 --- a/test/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py +++ b/test/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py @@ -141,7 +141,7 @@ def teller1(program_config, predictor_config): return True return False - # mkldnn Output has diff with bias! + # onednn Output has diff with bias! def teller2(program_config, predictor_config): return ( predictor_config.mkldnn_enabled() diff --git a/test/legacy_test/op_test.py b/test/legacy_test/op_test.py index 21984c78dcb6f3..60c648719573e7 100644 --- a/test/legacy_test/op_test.py +++ b/test/legacy_test/op_test.py @@ -501,7 +501,7 @@ def is_complex_test(): "This test of %s op needs check_grad." % cls.op_type ) - # check for op test with fp64 precision, but not check mkldnn op test for now + # check for op test with fp64 precision, but not check onednn op test for now if ( cls.dtype in [np.float32, np.float64] and cls.op_type diff --git a/test/legacy_test/test_conv2d_op.py b/test/legacy_test/test_conv2d_op.py index 37ef304015f33e..a3bfa75d1225f8 100644 --- a/test/legacy_test/test_conv2d_op.py +++ b/test/legacy_test/test_conv2d_op.py @@ -498,7 +498,7 @@ def has_cuda(self): def test_check_output(self): place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output_with_place( place, atol=1e-5, @@ -512,7 +512,7 @@ def test_check_grad(self): ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, {'Input', 'Filter'}, @@ -528,7 +528,7 @@ def test_check_grad_no_filter(self): ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, ['Input'], @@ -545,7 +545,7 @@ def test_check_grad_no_input(self): ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, ['Filter'], @@ -831,7 +831,7 @@ def has_cuda(self): ) def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() self.check_output_with_place( place, @@ -841,7 +841,7 @@ def test_check_output(self): ) def test_check_grad(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() @@ -855,7 +855,7 @@ def test_check_grad(self): ) def test_check_grad_no_filter(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() @@ -870,7 +870,7 @@ def test_check_grad_no_filter(self): ) def test_check_grad_no_input(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() diff --git a/test/legacy_test/test_conv2d_transpose_op.py b/test/legacy_test/test_conv2d_transpose_op.py index 3bb2a6f09f4d26..0c8000003de8ce 100644 --- a/test/legacy_test/test_conv2d_transpose_op.py +++ b/test/legacy_test/test_conv2d_transpose_op.py @@ -223,7 +223,7 @@ def setUp(self): self.outputs = {'Output': output} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( diff --git a/test/legacy_test/test_conv3d_op.py b/test/legacy_test/test_conv3d_op.py index cd0d6449020ca0..143deb493c756a 100644 --- a/test/legacy_test/test_conv3d_op.py +++ b/test/legacy_test/test_conv3d_op.py @@ -454,7 +454,7 @@ def has_cudnn(self): return core.is_compiled_with_cuda() and self.use_cudnn def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() self.check_output_with_place( place, @@ -466,7 +466,7 @@ def test_check_output(self): def test_check_grad(self): place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, {'Input', 'Filter'}, @@ -479,7 +479,7 @@ def test_check_grad(self): def test_check_grad_no_filter(self): place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, ['Input'], @@ -493,7 +493,7 @@ def test_check_grad_no_filter(self): def test_check_grad_no_input(self): place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, ['Filter'], diff --git a/test/legacy_test/test_elementwise_add_op.py b/test/legacy_test/test_elementwise_add_op.py index 81fc27044e04d8..1f640e61056bb7 100644 --- a/test/legacy_test/test_elementwise_add_op.py +++ b/test/legacy_test/test_elementwise_add_op.py @@ -54,7 +54,7 @@ def check_dygraph(self): return not self.use_mkldnn and self.axis == -1 def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output( check_dygraph=self.check_dygraph(), check_pir=self.check_dygraph(), @@ -62,7 +62,7 @@ def test_check_output(self): ) def test_check_grad_normal(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.dtype == np.float16: return self.check_grad( @@ -76,7 +76,7 @@ def test_check_grad_normal(self): ) def test_check_grad_ignore_x(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.dtype == np.float16: return self.check_grad( @@ -91,7 +91,7 @@ def test_check_grad_ignore_x(self): ) def test_check_grad_ignore_y(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.dtype == np.float16: return self.check_grad( @@ -152,7 +152,7 @@ def init_dtype(self): self.dtype = np.float16 def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode place = core.CUDAPlace(0) self.check_output_with_place( place, diff --git a/test/legacy_test/test_elementwise_mul_op.py b/test/legacy_test/test_elementwise_mul_op.py index e6959b299ae9d0..5f100a064fff66 100644 --- a/test/legacy_test/test_elementwise_mul_op.py +++ b/test/legacy_test/test_elementwise_mul_op.py @@ -46,7 +46,7 @@ def setUp(self): self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output( check_dygraph=(not self.use_mkldnn), check_pir=(not self.use_mkldnn), @@ -54,7 +54,7 @@ def test_check_output(self): ) def test_check_grad_normal(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['X', 'Y'], 'Out', @@ -66,7 +66,7 @@ def test_check_grad_normal(self): ) def test_check_grad_ignore_x(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['Y'], 'Out', @@ -79,7 +79,7 @@ def test_check_grad_ignore_x(self): ) def test_check_grad_ignore_y(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['X'], 'Out', @@ -451,14 +451,14 @@ def if_enable_cinn(self): pass def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output( check_dygraph=(not self.use_mkldnn), check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_normal(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['X', 'Y'], 'Out', @@ -470,7 +470,7 @@ def test_check_grad_normal(self): ) def test_check_grad_ignore_x(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['Y'], 'Out', @@ -483,7 +483,7 @@ def test_check_grad_ignore_x(self): ) def test_check_grad_ignore_y(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['X'], 'Out', diff --git a/test/legacy_test/test_pool2d_op.py b/test/legacy_test/test_pool2d_op.py index 0a63c3f85352dd..b2f10e3af1b266 100644 --- a/test/legacy_test/test_pool2d_op.py +++ b/test/legacy_test/test_pool2d_op.py @@ -419,7 +419,7 @@ def has_cudnn(self): return core.is_compiled_with_cuda() and self.use_cudnn def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.has_cudnn(): place = core.CUDAPlace(0) self.check_output_with_place( @@ -440,7 +440,7 @@ def test_check_output(self): def test_check_grad(self): if self.dtype == np.float16: return - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.has_cudnn() and self.pool_type != "max": place = core.CUDAPlace(0) self.check_grad_with_place( @@ -595,7 +595,7 @@ def init_kernel_type(self): self.dtype = np.float16 def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): @@ -607,7 +607,7 @@ def test_check_output(self): ) def test_check_grad(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode place = core.CUDAPlace(0) if ( core.is_float16_supported(place) @@ -638,7 +638,7 @@ def init_kernel_type(self): self.dtype = np.float16 def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): @@ -650,7 +650,7 @@ def test_check_output(self): ) def test_check_grad(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode place = core.CUDAPlace(0) if ( core.is_float16_supported(place) diff --git a/test/legacy_test/test_softmax_op.py b/test/legacy_test/test_softmax_op.py index 1876424cf4d4ba..facf487ebafc5e 100644 --- a/test/legacy_test/test_softmax_op.py +++ b/test/legacy_test/test_softmax_op.py @@ -82,7 +82,7 @@ def init_kernel_type(self): pass def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( @@ -102,7 +102,7 @@ def test_check_output(self): ) def test_check_grad(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.use_cudnn or self.dtype == np.float16: place = core.CUDAPlace(0) if core.is_float16_supported(place): @@ -160,7 +160,7 @@ def setUp(self): self.enable_cinn = False def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( @@ -207,7 +207,7 @@ def setUp(self): self.enable_cinn = False def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( diff --git a/test/mkldnn/test_concat_int8_mkldnn_op.py b/test/mkldnn/test_concat_int8_mkldnn_op.py index 546f6d4978f500..9b258ebb15d344 100644 --- a/test/mkldnn/test_concat_int8_mkldnn_op.py +++ b/test/mkldnn/test_concat_int8_mkldnn_op.py @@ -36,7 +36,7 @@ def setUp(self): self.outputs = {'Out': self.output} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output(check_dygraph=False, check_pir_onednn=True) # --------------------test concat s8 in with axis 0-------------------- diff --git a/test/mkldnn/test_conv2d_int8_mkldnn_op.py b/test/mkldnn/test_conv2d_int8_mkldnn_op.py index c72f70b07e2183..5d28ea8e3bacf6 100644 --- a/test/mkldnn/test_conv2d_int8_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_int8_mkldnn_op.py @@ -182,7 +182,7 @@ def residual_helper(init_low, init_high, output_): self.outputs = {'Output': output} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode # the atol for integer tests should be 1 self.check_output_with_place( core.CPUPlace(), diff --git a/test/mkldnn/test_conv2d_mkldnn_op.py b/test/mkldnn/test_conv2d_mkldnn_op.py index 606c86ce62f4b7..8ff67267f71422 100644 --- a/test/mkldnn/test_conv2d_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_mkldnn_op.py @@ -65,7 +65,7 @@ def setUp(self): output = self.outputs['Output'] - # mkldnn only support either conv-sum-relu, or conv-relu. + # onednn only support either conv-sum-relu, or conv-relu. if self.fuse_bias and self.bias_size is not None: bias = np.random.random(self.bias_size).astype(self.dtype) output = conv2d_bias_naive(output, bias) @@ -146,7 +146,7 @@ def setUp(self): output = self.outputs['Output'] - # mkldnn only support either conv-sum-relu, or conv-relu. + # onednn only support either conv-sum-relu, or conv-relu. if self.fuse_bias and self.bias_size is not None: bias = np.random.random(self.bias_size).astype(self.dtype) output = conv2d_bias_naive(output, bias) diff --git a/test/mkldnn/test_conv2d_transpose_mkldnn_op.py b/test/mkldnn/test_conv2d_transpose_mkldnn_op.py index 54fa3f4eabea5d..2ff110386fe704 100644 --- a/test/mkldnn/test_conv2d_transpose_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_transpose_mkldnn_op.py @@ -41,7 +41,7 @@ def test_check_grad_no_filter(self): return def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( diff --git a/test/mkldnn/test_dequantize_mkldnn_op.py b/test/mkldnn/test_dequantize_mkldnn_op.py index 81772ae6e254c1..6c09e85e595e01 100644 --- a/test/mkldnn/test_dequantize_mkldnn_op.py +++ b/test/mkldnn/test_dequantize_mkldnn_op.py @@ -65,7 +65,7 @@ def prepare_output_int8(self): self.outputs = {'Output': output} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output(check_dygraph=False, check_pir_onednn=True) def check_raise_error(self, msg): diff --git a/test/mkldnn/test_elementwise_add_mkldnn_op.py b/test/mkldnn/test_elementwise_add_mkldnn_op.py index 8b9dded0129bdd..8f5e62aa80aea9 100644 --- a/test/mkldnn/test_elementwise_add_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_add_mkldnn_op.py @@ -149,7 +149,7 @@ def init_scales(self): self.attrs['scale_out'] = 1.0 def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.init_scales() self.check_output( check_dygraph=(not self.use_mkldnn), diff --git a/test/mkldnn/test_elementwise_mul_onednn_op.py b/test/mkldnn/test_elementwise_mul_onednn_op.py index 7e1bcdde300024..71d4057e428fe9 100644 --- a/test/mkldnn/test_elementwise_mul_onednn_op.py +++ b/test/mkldnn/test_elementwise_mul_onednn_op.py @@ -149,7 +149,7 @@ def init_scales(self): self.attrs['scale_out'] = 1.0 def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.init_scales() self.check_output(check_dygraph=(not self.use_mkldnn)) diff --git a/test/mkldnn/test_fc_mkldnn_op.py b/test/mkldnn/test_fc_mkldnn_op.py index 9a587ea2e95cf4..e6605a4fa2331f 100644 --- a/test/mkldnn/test_fc_mkldnn_op.py +++ b/test/mkldnn/test_fc_mkldnn_op.py @@ -54,7 +54,7 @@ def setUp(self): } def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad_normal(self): diff --git a/test/mkldnn/test_lrn_mkldnn_op.py b/test/mkldnn/test_lrn_mkldnn_op.py index b8de14359cebfb..c5aab505a54952 100644 --- a/test/mkldnn/test_lrn_mkldnn_op.py +++ b/test/mkldnn/test_lrn_mkldnn_op.py @@ -25,7 +25,7 @@ def get_attrs(self): def test_check_output(self): # We cannot validate MidOut as LRN REF has different meaning in it - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output( atol=0.002, no_check_set=['MidOut'], @@ -34,7 +34,7 @@ def test_check_output(self): ) def test_check_grad_normal(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['X'], 'Out', max_relative_error=0.01, check_dygraph=False ) diff --git a/test/mkldnn/test_mul_int8_mkldnn_op.py b/test/mkldnn/test_mul_int8_mkldnn_op.py index 56b9966cbeaeab..039255b2a18d66 100644 --- a/test/mkldnn/test_mul_int8_mkldnn_op.py +++ b/test/mkldnn/test_mul_int8_mkldnn_op.py @@ -77,7 +77,7 @@ def init_data(self): self.outputs = {'Out': output} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output_with_place( core.CPUPlace(), atol=0, check_dygraph=False, check_pir_onednn=True ) diff --git a/test/mkldnn/test_pool2d_int8_mkldnn_op.py b/test/mkldnn/test_pool2d_int8_mkldnn_op.py index ece60311054267..5b97cb5856675d 100644 --- a/test/mkldnn/test_pool2d_int8_mkldnn_op.py +++ b/test/mkldnn/test_pool2d_int8_mkldnn_op.py @@ -53,7 +53,7 @@ def setUp(self): self.outputs = {'Out': output} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output_with_place( core.CPUPlace(), atol=1e-5, diff --git a/test/mkldnn/test_quantize_mkldnn_op.py b/test/mkldnn/test_quantize_mkldnn_op.py index cb175f62219e17..0d2d73d069e1c0 100644 --- a/test/mkldnn/test_quantize_mkldnn_op.py +++ b/test/mkldnn/test_quantize_mkldnn_op.py @@ -64,7 +64,7 @@ def prepare_output(self): self.outputs = {'Output': output} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output(check_dygraph=False, check_pir_onednn=True) def check_raise_error(self, msg): diff --git a/test/mkldnn/test_requantize_mkldnn_op.py b/test/mkldnn/test_requantize_mkldnn_op.py index 43daa9baadb8ca..786d7ca484df86 100644 --- a/test/mkldnn/test_requantize_mkldnn_op.py +++ b/test/mkldnn/test_requantize_mkldnn_op.py @@ -90,7 +90,7 @@ def prepare_output(self): self.outputs = {'Output': self.output} def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.assertTrue( self.input_data_type == 'uint8' or self.shift_in == 0.0, 'Input data must be unsigned if it has nonzero shift.', diff --git a/test/mkldnn/test_softmax_mkldnn_op.py b/test/mkldnn/test_softmax_mkldnn_op.py index c8e4e3d8948e8c..2bc06aee3b80d8 100644 --- a/test/mkldnn/test_softmax_mkldnn_op.py +++ b/test/mkldnn/test_softmax_mkldnn_op.py @@ -69,7 +69,7 @@ def setUp(self): } def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( @@ -79,7 +79,7 @@ def test_check_output(self): self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if self.use_cudnn or self.dtype == np.float16: place = core.CUDAPlace(0) if core.is_float16_supported(place): diff --git a/test/mkldnn/test_sum_mkldnn_op.py b/test/mkldnn/test_sum_mkldnn_op.py index fc86c6834b9404..5874fd102d2519 100644 --- a/test/mkldnn/test_sum_mkldnn_op.py +++ b/test/mkldnn/test_sum_mkldnn_op.py @@ -38,11 +38,11 @@ def init_data_type(self): self.dtype = np.float32 def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['x0'], 'Out', check_dygraph=False, check_pir_onednn=True ) diff --git a/test/mkldnn/test_transpose_int8_mkldnn_op.py b/test/mkldnn/test_transpose_int8_mkldnn_op.py index e2a3fba8d2bc07..2787e40dd5259f 100644 --- a/test/mkldnn/test_transpose_int8_mkldnn_op.py +++ b/test/mkldnn/test_transpose_int8_mkldnn_op.py @@ -48,7 +48,7 @@ def init_op_type(self): self.op_type = "transpose2" def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output_with_place( core.CPUPlace(), 1e-5, diff --git a/test/mkldnn/test_transpose_mkldnn_op.py b/test/mkldnn/test_transpose_mkldnn_op.py index 34a25cf2f8b1e7..2ca4d2ad538026 100644 --- a/test/mkldnn/test_transpose_mkldnn_op.py +++ b/test/mkldnn/test_transpose_mkldnn_op.py @@ -37,13 +37,13 @@ def init_op_type(self): self.use_mkldnn = True def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_output( no_check_set=['XShape'], check_dygraph=False, check_pir_onednn=True ) def test_check_grad(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad( ['X'], 'Out', check_dygraph=False, check_pir_onednn=True ) diff --git a/test/xpu/test_conv2d_op_xpu.py b/test/xpu/test_conv2d_op_xpu.py index 1e2795b1d29643..df36f226408ebe 100644 --- a/test/xpu/test_conv2d_op_xpu.py +++ b/test/xpu/test_conv2d_op_xpu.py @@ -415,13 +415,13 @@ def has_cuda(self): ) def test_check_output(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if core.is_compiled_with_xpu(): paddle.enable_static() self.check_output_with_place(place=self.place) def test_check_grad(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): @@ -431,7 +431,7 @@ def test_check_grad(self): ) def test_check_grad_no_filter(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): @@ -441,7 +441,7 @@ def test_check_grad_no_filter(self): ) def test_check_grad_no_input(self): - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): diff --git a/test/xpu/test_conv3d_op_xpu.py b/test/xpu/test_conv3d_op_xpu.py index 9c65c81d12cf39..021c57821c12d4 100644 --- a/test/xpu/test_conv3d_op_xpu.py +++ b/test/xpu/test_conv3d_op_xpu.py @@ -255,7 +255,7 @@ def test_check_output(self): def test_check_grad(self): place = paddle.XPUPlace(0) - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, {'Input', 'Filter'}, @@ -265,7 +265,7 @@ def test_check_grad(self): def test_check_grad_no_filter(self): place = paddle.XPUPlace(0) - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, ['Input'], @@ -276,7 +276,7 @@ def test_check_grad_no_filter(self): def test_check_grad_no_input(self): place = paddle.XPUPlace(0) - # TODO(wangzhongpu): support mkldnn op in dygraph mode + # TODO(wangzhongpu): support onednn op in dygraph mode self.check_grad_with_place( place, ['Filter'], diff --git a/tools/remove_grad_op_and_kernel.py b/tools/remove_grad_op_and_kernel.py index cb2201dc4b72b4..4db50a23ab166f 100644 --- a/tools/remove_grad_op_and_kernel.py +++ b/tools/remove_grad_op_and_kernel.py @@ -126,7 +126,7 @@ def update_operator_cmake(cmake_file): xpu_kernel_pattern1 = r'REGISTER_OP_XPU_KERNEL\(.*?\);?' xpu_kernel_pattern2 = r'REGISTER_OP_XPU_KERNEL\(.*?_grad,.*?\);?' - # remove custom grad kernel, mkldnn or cudnn etc. + # remove custom grad kernel, onednn or cudnn etc. op_kernel_pattern1 = r'REGISTER_OP_KERNEL\(.*?\);?' op_kernel_pattern2 = r'REGISTER_OP_KERNEL\(.*?_grad,.*?\);?'