Skip to content
6 changes: 2 additions & 4 deletions paddle/fluid/pir/dialect/operator/ir/ops_onednn_extra.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
extra_args : bool fuse_with_relu=false
data_format_tensors : x, out_grad

- op : bilinear_interp
# - op : bilinear_interp

# - op : cast

Expand Down Expand Up @@ -165,7 +165,7 @@

- op : mish_grad

- op : multi_gru
# - op : multi_gru

- op : multiply

Expand Down Expand Up @@ -255,10 +255,8 @@
- op : subtract_grad

- op : sum
extra_args : str mkldnn_data_type="float32"

- op : sum_grad
extra_args : str mkldnn_data_type="float32"

# - op : swish

Expand Down
5 changes: 1 addition & 4 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,7 @@ def test_check_output(self):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(
['X'],
'Out',
)
self.check_grad(['X'], 'Out', check_pir_onednn=self.check_pir_onednn)

def init_dtype(self):
self.dtype = np.float64
Expand Down
2 changes: 2 additions & 0 deletions test/mkldnn/test_gaussian_random_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,14 @@
class TestMKLDNNGaussianRandomOpSeed10(TestGaussianRandomOp):
def init_kernel_type(self):
self.use_mkldnn = True
self.check_pir_onednn = True


class TestMKLDNNGaussianRandomOpSeed0(TestGaussianRandomOp):
def setUp(self):
TestGaussianRandomOp.setUp(self)
self.use_mkldnn = True
self.check_pir_onednn = True
self.attrs = {
"shape": [123, 92],
"mean": 1.0,
Expand Down
18 changes: 16 additions & 2 deletions test/mkldnn/test_reduce_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,25 @@ def setUp(self):
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.attrs = {'use_mkldnn': self.use_mkldnn}
self.check_pir_onednn = True

def test_check_output(self):
self.check_output(check_dygraph=False, check_pir=False)
self.check_output(
check_dygraph=False,
check_pir=False,
check_pir_onednn=self.check_pir_onednn,
)


class TestReduceDefaultWithGradOneDNNOp(TestReduceSumDefaultOneDNNOp):
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_dygraph=False, check_pir=False)
self.check_grad(
['X'],
'Out',
check_dygraph=False,
check_pir=False,
check_pir_onednn=self.check_pir_onednn,
)


class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp):
Expand Down Expand Up @@ -96,6 +107,7 @@ def setUp(self):
self.outputs = {
'Out': self.inputs['X'].sum(keepdims=self.attrs['keep_dim'])
}
self.check_pir_onednn = False


class TestReduceSum4DReduceAllOneDNNOp(TestReduceDefaultWithGradOneDNNOp):
Expand All @@ -105,6 +117,7 @@ def setUp(self):
self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")}
self.attrs = {'reduce_all': True, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.inputs['X'].sum()}
self.check_pir_onednn = False


@OpTestTool.skip_if(
Expand Down Expand Up @@ -238,6 +251,7 @@ def setUp(self):
'Out': self.inputs['X'].sum()
/ np.asarray(self.inputs['X'].shape).prod()
}
self.check_pir_onednn = False


if __name__ == '__main__':
Expand Down