Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 12 additions & 13 deletions test/legacy_test/test_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,32 +34,31 @@ def setUp(self):
np.random.seed(2022)

def test_np_dtype_convert(self):
DT = core.VarDesc.VarType
convert = convert_np_dtype_to_dtype_
self.assertEqual(DT.FP32, convert(np.float32))
self.assertEqual(DT.FP16, convert("float16"))
self.assertEqual(DT.FP64, convert("float64"))
self.assertEqual(DT.INT32, convert("int32"))
self.assertEqual(DT.INT16, convert("int16"))
self.assertEqual(DT.INT64, convert("int64"))
self.assertEqual(DT.BOOL, convert("bool"))
self.assertEqual(DT.INT8, convert("int8"))
self.assertEqual(DT.UINT8, convert("uint8"))
self.assertEqual(paddle.float32, convert(np.float32))
self.assertEqual(paddle.float16, convert("float16"))
self.assertEqual(paddle.float64, convert("float64"))
self.assertEqual(paddle.int32, convert("int32"))
self.assertEqual(paddle.int16, convert("int16"))
self.assertEqual(paddle.int64, convert("int64"))
self.assertEqual(paddle.bool, convert("bool"))
self.assertEqual(paddle.int8, convert("int8"))
self.assertEqual(paddle.uint8, convert("uint8"))

def test_var(self):
b = default_main_program().current_block()
w = b.create_var(
dtype="float64", shape=[784, 100], lod_level=0, name="fc.w"
)
self.assertNotEqual(str(w), "")
self.assertEqual(core.VarDesc.VarType.FP64, w.dtype)
self.assertEqual(paddle.float64, w.dtype)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual("fc.w@GRAD", w.grad_name)
self.assertEqual(0, w.lod_level)

w = b.create_var(name='fc.w')
self.assertEqual(core.VarDesc.VarType.FP64, w.dtype)
self.assertEqual(paddle.float64, w.dtype)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual("fc.w@GRAD", w.grad_name)
Expand Down Expand Up @@ -440,7 +439,7 @@ def test_variable_in_dygraph_mode(self):

self.assertTrue(var.name.startswith('_generated_var_'))
self.assertEqual(var.shape, (1, 1))
self.assertEqual(var.dtype, base.core.VarDesc.VarType.FP64)
self.assertEqual(var.dtype, paddle.float64)
self.assertEqual(var.type, base.core.VarDesc.VarType.LOD_TENSOR)

def test_create_selected_rows(self):
Expand Down
3 changes: 1 addition & 2 deletions test/sequence/test_sequence_pad_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
from op_test import OpTest

import paddle
from paddle.base import core


class TestSequencePadOp(OpTest):
Expand Down Expand Up @@ -191,7 +190,7 @@ def test_length_dtype(self):
x=x, pad_value=pad_value
)
# check if the dtype of length is int64 in compile time
self.assertEqual(length.dtype, core.VarDesc.VarType.INT64)
self.assertEqual(length.dtype, paddle.int64)


if __name__ == '__main__':
Expand Down
18 changes: 9 additions & 9 deletions test/xpu/test_gaussian_random_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@

import paddle
from paddle import base
from paddle.base import core

paddle.enable_static()
from paddle.base import core
from paddle.tensor import random

typeid_dict = {
Expand Down Expand Up @@ -285,22 +285,22 @@ def test_default_dtype(self):
def test_default_fp16():
paddle.framework.set_default_dtype('float16')
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16)
self.assertEqual(out.dtype, paddle.float16)

def test_default_bf16():
paddle.framework.set_default_dtype('bfloat16')
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, base.core.VarDesc.VarType.BF16)
self.assertEqual(out.dtype, paddle.bfloat16)

def test_default_fp32():
paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP32)
self.assertEqual(out.dtype, paddle.float32)

def test_default_fp64():
paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64)
self.assertEqual(out.dtype, paddle.float64)

test_default_fp64()
test_default_fp32()
Expand All @@ -317,22 +317,22 @@ def test_default_dtype(self):
def test_default_fp16():
paddle.framework.set_default_dtype('float16')
out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16)
self.assertEqual(out.dtype, paddle.float16)

def test_default_bf16():
paddle.framework.set_default_dtype('bfloat16')
out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, base.core.VarDesc.VarType.BF16)
self.assertEqual(out.dtype, paddle.bfloat16)

def test_default_fp32():
paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP32)
self.assertEqual(out.dtype, paddle.float32)

def test_default_fp64():
paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64)
self.assertEqual(out.dtype, paddle.float64)

test_default_fp64()
test_default_fp32()
Expand Down