Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions paddle/fluid/pir/dialect/operator/ir/ops_onednn_extra.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,11 @@
extra_args : bool is_test=false
data_format_tensors : input, out_grad

# - op : depthwise_conv2d
- op : depthwise_conv2d
extra_args : bool is_test=false, bool fuse_relu_before_depthwise_conv=false, bool use_quantizer=false, str mkldnn_data_type="float32", bool fuse_relu=false, str fuse_activation="", float fuse_alpha=0.0, float fuse_beta=0.0, bool use_addto=false, bool fuse_residual_connection=false, float scale_in=1.0, float scale_out=1.0, float scale_in_eltwise=1.0, float[] scale_weights={1.0f}, bool force_fp32_output=false

# - op : depthwise_conv2d_grad
- op : depthwise_conv2d_grad
extra_args : bool is_test=false, bool fuse_relu_before_depthwise_conv=false, bool use_quantizer=false, str mkldnn_data_type="float32", bool fuse_relu=false, str fuse_activation="", float fuse_alpha=0.0, float fuse_beta=0.0, bool use_addto=false, bool fuse_residual_connection=false, float scale_in=1.0, float scale_out=1.0, float scale_in_eltwise=1.0, float[] scale_weights={1.0f}, bool force_fp32_output=false

- op : divide

Expand All @@ -75,7 +77,8 @@

# - op : expand_grad

# - op : fc
- op : fc
extra_args : bool ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE=true, bool use_quantizer=false, str mkldnn_data_type="float32", float scale_in=1.0, float[] scale_weights={1.0f}, float scale_out=1.0, bool force_fp32_output=false

# - op : flatten

Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/api/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -733,6 +733,8 @@
{input : Input, filter : Filter}
outputs :
out : Output
attrs :
{scale_in : Scale_in, scale_out : Scale_out, scale_in_eltwise : Scale_in_eltwise, scale_weights : Scale_weights}
extra :
attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
Expand Down Expand Up @@ -1070,6 +1072,8 @@
bias : Bias
outputs :
out : Out
attrs :
{scale_in : Scale_in, scale_out : Scale_out, scale_weights : Scale_weights}
extra :
attrs : [bool ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE = true, bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32", float Scale_in = 1.0f, "float[] Scale_weights = {1.0f}", float Scale_out = 1.0f, bool force_fp32_output = false]

Expand Down
2 changes: 1 addition & 1 deletion test/mkldnn/test_fc_bf16_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def setUp(self):
self.outputs = {'Out': self.output}

def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True)

def test_check_grad_normal(self):
pass
Expand Down
2 changes: 1 addition & 1 deletion test/mkldnn/test_fc_int8_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def generate_data(self):

def test_check_output(self):
int_atol = 2
self.check_output(int_atol)
self.check_output(int_atol, check_pir_onednn=True, check_dygraph=False)


class TestFCINT8NoBiasOneDNNOp(TestFCINT8OneDNNOp):
Expand Down
2 changes: 1 addition & 1 deletion test/mkldnn/test_fc_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def setUp(self):

def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output(check_dygraph=False)
self.check_output(check_dygraph=False, check_pir_onednn=True)

def test_check_grad_normal(self):
pass
Expand Down