Skip to content

Commit e0caaf7

Browse files
authored
Replace is_run_mkldnn_kernel is_run_onednn_kernel [fluid_ops] (#74314)
1 parent c6742c5 commit e0caaf7

File tree

7 files changed

+16
-16
lines changed

7 files changed

+16
-16
lines changed

paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -375,9 +375,9 @@ OneDNNPhiKernelInstruction::OneDNNPhiKernelInstruction(
375375
}
376376
TensorNameMap(op, *value_exec_info_, yaml_info_parser, inputs_, outputs_);
377377

378-
// Step4: Mark is_run_mkldnn_kernel=true
378+
// Step4: Mark is_run_onednn_kernel=true
379379
phi::MetaConfig new_config = infer_meta_context_.GetMetaConfig();
380-
new_config.is_run_mkldnn_kernel = true;
380+
new_config.is_run_onednn_kernel = true;
381381
infer_meta_context_.SetMetaConfig(new_config);
382382

383383
// Step5: Handle skip_transform_inputs

paddle/fluid/framework/new_executor/instruction/onednn/onednn_legacy_instruction.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -229,9 +229,9 @@ OneDNNLegacyKernelInstruction::OneDNNLegacyKernelInstruction(
229229
}
230230
}
231231

232-
// Step3: Mark is_run_mkldnn_kernel=true
232+
// Step3: Mark is_run_onednn_kernel=true
233233
phi::MetaConfig new_config = infer_meta_context_.GetMetaConfig();
234-
new_config.is_run_mkldnn_kernel = true;
234+
new_config.is_run_onednn_kernel = true;
235235
infer_meta_context_.SetMetaConfig(new_config);
236236

237237
// Step4: Handle skip_transform_inputs

paddle/phi/core/meta_tensor.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@ namespace phi {
2525

2626
struct TEST_API MetaConfig {
2727
bool is_runtime{true};
28-
bool is_run_mkldnn_kernel{false};
28+
bool is_run_onednn_kernel{false};
2929
MetaConfig() = default;
3030

3131
// supporting implicit construction is easier to use
32-
MetaConfig(bool is_runtime, bool is_run_mkldnn_kernel)
32+
MetaConfig(bool is_runtime, bool is_run_onednn_kernel)
3333
: is_runtime(is_runtime),
34-
is_run_mkldnn_kernel(is_run_mkldnn_kernel) {} // NOLINT
34+
is_run_onednn_kernel(is_run_onednn_kernel) {} // NOLINT
3535
};
3636

3737
class TEST_API MetaTensor {

paddle/phi/infermeta/binary.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -587,7 +587,7 @@ void ConvInferMeta(const MetaTensor& input,
587587
"dilation is %d.",
588588
dilations[i]));
589589
}
590-
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
590+
const bool channel_last = (config.is_run_onednn_kernel == false) &&
591591
(data_format == "NHWC" || data_format == "NDHWC");
592592

593593
PADDLE_ENFORCE_EQ(
@@ -773,7 +773,7 @@ void ConvTransposeInferMeta(const MetaTensor& x,
773773
std::vector<int> paddings_ = paddings;
774774
std::vector<int> dilations_ = dilations;
775775

776-
const DataLayout data_layout = config.is_run_mkldnn_kernel
776+
const DataLayout data_layout = config.is_run_onednn_kernel
777777
? DataLayout::kNCHW
778778
: common::StringToDataLayout(data_format);
779779

@@ -1700,7 +1700,7 @@ void ElementwiseRawInferMeta(const MetaTensor& x,
17001700

17011701
#ifdef PADDLE_WITH_DNNL
17021702
bool should_rotate =
1703-
config.is_run_mkldnn_kernel &&
1703+
config.is_run_onednn_kernel &&
17041704
(phi::OneDNNContext::tls().get_cur_paddle_data_layout() ==
17051705
phi::DataLayout::kNHWC) &&
17061706
(x_dims.size() >= 3 || y_dims.size() >= 3);
@@ -3374,7 +3374,7 @@ void PReluInferMeta(const MetaTensor& x,
33743374
"For mode 'channel', data_format must be one of "
33753375
"NCHW and NHWC. But received data_format: %s",
33763376
data_format));
3377-
if (data_format == "NCHW" || config.is_run_mkldnn_kernel) {
3377+
if (data_format == "NCHW" || config.is_run_onednn_kernel) {
33783378
PADDLE_ENFORCE_EQ(product(alpha.dims()) == x_dim[1],
33793379
true,
33803380
common::errors::InvalidArgument(

paddle/phi/infermeta/fusion.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2914,7 +2914,7 @@ void BNActXPUInferMeta(const MetaTensor& x,
29142914
x_dims,
29152915
x_dims.size()));
29162916

2917-
const int64_t C = ((config.is_run_mkldnn_kernel == true) ||
2917+
const int64_t C = ((config.is_run_onednn_kernel == true) ||
29182918
(data_layout_str == DataLayout::kNCHW)
29192919
? x_dims[1]
29202920
: x_dims[x_dims.size() - 1]);

paddle/phi/infermeta/multiary.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -965,7 +965,7 @@ void BatchNormInferMeta(const MetaTensor& x,
965965
x_dims,
966966
x_dims.size()));
967967

968-
const int64_t C = ((config.is_run_mkldnn_kernel == true) ||
968+
const int64_t C = ((config.is_run_onednn_kernel == true) ||
969969
(data_layout == DataLayout::kNCHW)
970970
? x_dims[1]
971971
: x_dims[x_dims.size() - 1]);

paddle/phi/infermeta/unary.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3625,7 +3625,7 @@ void Pool2DInferMeta(const MetaTensor& x,
36253625
const std::string& padding_algorithm,
36263626
MetaTensor* out,
36273627
MetaConfig config) {
3628-
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
3628+
const bool channel_last = (config.is_run_onednn_kernel == false) &&
36293629
(data_format == "NHWC" || data_format == "NDHWC");
36303630
if (!config.is_runtime && kernel_size.FromTensor()) {
36313631
auto x_dims = x.dims();
@@ -3755,7 +3755,7 @@ void PoolInferMeta(const MetaTensor& x,
37553755

37563756
// MKL-DNN Kernels are using NCHW order of dims description
37573757
// so we ignore data_format consideration for MKL-DNN kernel
3758-
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
3758+
const bool channel_last = (config.is_run_onednn_kernel == false) &&
37593759
(data_format == "NHWC" || data_format == "NDHWC");
37603760

37613761
// update paddings if "SAME" or global_pooling
@@ -4430,7 +4430,7 @@ void Shape64InferMeta(const MetaTensor& input,
44304430
MetaConfig config) {
44314431
auto in_dim = input.dims();
44324432
out->set_dims(common::make_ddim({in_dim.size()}));
4433-
if (config.is_run_mkldnn_kernel) {
4433+
if (config.is_run_onednn_kernel) {
44344434
out->set_dtype(DataType::INT32);
44354435
} else {
44364436
out->set_dtype(DataType::INT64);

0 commit comments

Comments
 (0)