Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 13 additions & 6 deletions test/legacy_test/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
from paddle.autograd.ir_backward import grad as ir_grad
from paddle.base import Scope, core, unique_name
from paddle.base.backward import append_backward
from paddle.base.core import DataType, VarDesc
from paddle.base.executor import Executor, scope_guard
from paddle.base.framework import (
OpProtoHolder,
Expand Down Expand Up @@ -164,19 +165,25 @@ def product(dim):
tensor_to_check = scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.shape())
tensor_to_check_dtype = tensor_to_check._dtype()
if tensor_to_check_dtype == paddle.float32:
if tensor_to_check_dtype in [VarDesc.VarType.FP32, DataType.FLOAT32]:
tensor_to_check_dtype = np.float32
elif tensor_to_check_dtype == paddle.float64:
elif tensor_to_check_dtype in [VarDesc.VarType.FP64, DataType.FLOAT64]:
tensor_to_check_dtype = np.float64
elif tensor_to_check_dtype == paddle.float16:
elif tensor_to_check_dtype in [VarDesc.VarType.FP16, DataType.FLOAT16]:
tensor_to_check_dtype = np.float16
# set delta as np.float16, will automatic convert to float32, float64
delta = np.array(delta).astype(np.float16)
elif tensor_to_check_dtype == paddle.bfloat16:
elif tensor_to_check_dtype in [VarDesc.VarType.BF16, DataType.BFLOAT16]:
tensor_to_check_dtype = np.float32
elif tensor_to_check_dtype == paddle.complex64:
elif tensor_to_check_dtype in [
VarDesc.VarType.COMPLEX64,
DataType.COMPLEX64,
]:
tensor_to_check_dtype = np.complex64
elif tensor_to_check_dtype == paddle.complex128:
elif tensor_to_check_dtype in [
VarDesc.VarType.COMPLEX128,
DataType.COMPLEX128,
]:
tensor_to_check_dtype = np.complex128
else:
raise ValueError(
Expand Down
53 changes: 30 additions & 23 deletions test/legacy_test/test_conv2d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,13 @@
import unittest

import numpy as np
from op_test import OpTest, convert_float_to_uint16, get_numeric_gradient
from op_test import (
OpTest,
convert_float_to_uint16,
get_device_place,
get_numeric_gradient,
is_custom_device,
)
from testsuite import create_op

import paddle
Expand Down Expand Up @@ -162,7 +168,8 @@ def init_kernel_type(self):

def create_test_cudnn_fp16_class(parent, grad_check=True):
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
not (core.is_compiled_with_cuda() or is_custom_device()),
"core is not compiled with CUDA",
)
class TestConv2DCUDNNFp16(parent):
def init_kernel_type(self):
Expand All @@ -171,19 +178,19 @@ def init_kernel_type(self):

def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
place = get_device_place()
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)

def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
place = get_device_place()
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set={'Filter'}
)

def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
place = get_device_place()
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Filter'], 'Output', no_grad_set={'Input'}
Expand All @@ -196,8 +203,8 @@ def test_check_grad_no_input(self):

def create_test_cudnn_bf16_class(parent):
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
not (core.is_compiled_with_cuda() or is_custom_device())
or not core.is_bfloat16_supported(get_device_place()),
"core is not compiled with CUDA and do not support bfloat16",
)
class TestConv2DCUDNNBF16(parent):
Expand All @@ -217,11 +224,11 @@ def init_kernel_type(self):
self.dtype = np.uint16

def test_check_output(self):
place = core.CUDAPlace(0)
place = get_device_place()
self.check_output_with_place(place, atol=1e-2)

def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
place = get_device_place()
numeric_grads = self.get_numeric_grad(place, 'Input')
self.check_grad_with_place(
place,
Expand All @@ -232,7 +239,7 @@ def test_check_grad_no_filter(self):
)

def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
place = get_device_place()
numeric_grads = self.get_numeric_grad(place, 'Filter')
self.check_grad_with_place(
place,
Expand Down Expand Up @@ -294,20 +301,20 @@ def init_kernel_type(self):
self.dtype = np.float16

def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_compiled_with_cuda() or is_custom_device():
place = get_device_place()
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)

def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
place = get_device_place()
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set={'Filter'}
)

def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
place = get_device_place()
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Filter'], 'Output', no_grad_set={'Input'}
Expand Down Expand Up @@ -491,12 +498,12 @@ def setUp(self):
self.outputs = {'Output': output}

def has_cuda(self):
return core.is_compiled_with_cuda() and (
return (core.is_compiled_with_cuda() or is_custom_device()) and (
self.use_cudnn or self.use_cuda
)

def test_check_output(self):
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
place = get_device_place() if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support onednn op in dygraph mode
self.check_output_with_place(
place,
Expand All @@ -510,7 +517,7 @@ def test_check_grad(self):
hasattr(self, "no_need_check_grad") and self.no_need_check_grad
):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
place = get_device_place() if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support onednn op in dygraph mode
self.check_grad_with_place(
place,
Expand All @@ -526,7 +533,7 @@ def test_check_grad_no_filter(self):
hasattr(self, "no_need_check_grad") and self.no_need_check_grad
):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
place = get_device_place() if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support onednn op in dygraph mode
self.check_grad_with_place(
place,
Expand All @@ -543,7 +550,7 @@ def test_check_grad_no_input(self):
hasattr(self, "no_need_check_grad") and self.no_need_check_grad
):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
place = get_device_place() if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support onednn op in dygraph mode
self.check_grad_with_place(
place,
Expand Down Expand Up @@ -830,7 +837,7 @@ def has_cuda(self):

def test_check_output(self):
# TODO(wangzhongpu): support onednn op in dygraph mode
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
place = get_device_place() if self.has_cuda() else core.CPUPlace()
self.check_output_with_place(
place,
atol=1e-5,
Expand All @@ -842,7 +849,7 @@ def test_check_grad(self):
# TODO(wangzhongpu): support onednn op in dygraph mode
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
place = get_device_place() if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place(
place,
{'Input', 'Filter'},
Expand All @@ -856,7 +863,7 @@ def test_check_grad_no_filter(self):
# TODO(wangzhongpu): support onednn op in dygraph mode
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
place = get_device_place() if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place(
place,
['Input'],
Expand All @@ -871,7 +878,7 @@ def test_check_grad_no_input(self):
# TODO(wangzhongpu): support onednn op in dygraph mode
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
place = get_device_place() if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place(
place,
['Filter'],
Expand Down