Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 12 additions & 14 deletions test/dygraph_to_static/test_tensor_memcpy_on_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np
from dygraph_to_static_utils import (
Dy2StTestBase,
enable_to_static_guard,
test_legacy_and_pt,
test_legacy_and_pt_and_pir,
)
Expand All @@ -43,19 +44,18 @@ def tensor_copy_to_cuda_with_warning(x, device_id=None, blocking=True):


class TestTensorCopyToCpuOnDefaultCPU(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x1 = paddle.ones([1, 2, 3])
x2 = paddle.jit.to_static(tensor_copy_to_cpu)(x1)
return x1.place, x2.place, x2.numpy()

@test_legacy_and_pt_and_pir
def test_tensor_cpu_on_default_cpu(self):
paddle.framework._set_expected_place(paddle.CPUPlace())
dygraph_x1_place, dygraph_place, dygraph_res = self._run(
to_static=False
)
static_x1_place, static_place, static_res = self._run(to_static=True)
with enable_to_static_guard(False):
dygraph_x1_place, dygraph_place, dygraph_res = self._run()

static_x1_place, static_place, static_res = self._run()
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
self.assertTrue(dygraph_x1_place.is_cpu_place())
self.assertTrue(static_x1_place.is_cpu_place())
Expand All @@ -64,8 +64,7 @@ def test_tensor_cpu_on_default_cpu(self):


class TestTensorCopyToCUDAOnDefaultCPU(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x1 = paddle.ones([1, 2, 3])
x2 = paddle.jit.to_static(tensor_copy_to_cuda)(x1)
return x1.place, x2.place, x2.numpy()
Expand All @@ -82,10 +81,10 @@ def test_tensor_cuda_on_default_cpu(self):
See ConstructDeviceContext() in interpreter_util.cc.
"""
paddle.framework._set_expected_place(paddle.CPUPlace())
dygraph_x1_place, dygraph_place, dygraph_res = self._run(
to_static=False
)
static_x1_place, static_place, static_res = self._run(to_static=True)
with enable_to_static_guard(False):
dygraph_x1_place, dygraph_place, dygraph_res = self._run()

static_x1_place, static_place, static_res = self._run()
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
self.assertTrue(dygraph_x1_place.is_cpu_place())
self.assertTrue(static_x1_place.is_cpu_place())
Expand All @@ -94,8 +93,7 @@ def test_tensor_cuda_on_default_cpu(self):


class TestTensorCopyToCUDAWithWarningOnCPU(unittest.TestCase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x1 = paddle.ones([1, 2, 3])
x2 = paddle.jit.to_static(tensor_copy_to_cuda_with_warning)(
x1, device_id=1, blocking=False
Expand Down
26 changes: 12 additions & 14 deletions test/dygraph_to_static/test_tensor_memcpy_on_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import numpy as np
from dygraph_to_static_utils import (
Dy2StTestBase,
enable_to_static_guard,
test_legacy_and_pt_and_pir,
)

Expand All @@ -43,8 +44,7 @@ def tensor_copy_to_cuda_with_warning(x, device_id=None, blocking=True):


class TestTensorCopyToCpuOnDefaultGPU(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x1 = paddle.ones([1, 2, 3])
x2 = paddle.jit.to_static(tensor_copy_to_cpu)(x1)
return x1.place, x2.place, x2.numpy()
Expand All @@ -55,10 +55,10 @@ def test_tensor_cpu_on_default_gpu(self):
return
place = paddle.CUDAPlace(int(os.environ.get('FLAGS_selected_gpus', 0)))
paddle.framework._set_expected_place(place)
dygraph_x1_place, dygraph_place, dygraph_res = self._run(
to_static=False
)
static_x1_place, static_place, static_res = self._run(to_static=True)
with enable_to_static_guard(False):
dygraph_x1_place, dygraph_place, dygraph_res = self._run()

static_x1_place, static_place, static_res = self._run()
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
self.assertTrue(dygraph_x1_place.is_gpu_place())
self.assertTrue(static_x1_place.is_gpu_place())
Expand All @@ -67,8 +67,7 @@ def test_tensor_cpu_on_default_gpu(self):


class TestTensorCopyToCUDAOnDefaultGPU(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x1 = paddle.ones([1, 2, 3])
x2 = paddle.jit.to_static(tensor_copy_to_cuda)(x1)
return x1.place, x2.place, x2.numpy()
Expand All @@ -79,10 +78,10 @@ def test_tensor_cuda_on_default_gpu(self):
return
place = paddle.CUDAPlace(int(os.environ.get('FLAGS_selected_gpus', 0)))
paddle.framework._set_expected_place(place)
dygraph_x1_place, dygraph_place, dygraph_res = self._run(
to_static=False
)
static_x1_place, static_place, static_res = self._run(to_static=True)
with enable_to_static_guard(False):
dygraph_x1_place, dygraph_place, dygraph_res = self._run()

static_x1_place, static_place, static_res = self._run()
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
self.assertTrue(dygraph_x1_place.is_gpu_place())
self.assertTrue(static_x1_place.is_gpu_place())
Expand All @@ -91,8 +90,7 @@ def test_tensor_cuda_on_default_gpu(self):


class TestTensorCopyToCUDAWithWarningOnGPU(unittest.TestCase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x1 = paddle.ones([1, 2, 3])
x2 = paddle.jit.to_static(tensor_copy_to_cuda_with_warning)(
x1, device_id=1, blocking=False
Expand Down
43 changes: 24 additions & 19 deletions test/dygraph_to_static/test_tensor_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np
from dygraph_to_static_utils import (
Dy2StTestBase,
enable_to_static_guard,
test_ast_only,
test_legacy_and_pt_and_pir,
)
Expand All @@ -31,16 +32,17 @@ def tensor_clone(x):


class TestTensorClone(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x = paddle.ones([1, 2, 3])
return paddle.jit.to_static(tensor_clone)(x).numpy()

@test_legacy_and_pt_and_pir
def test_tensor_clone(self):
paddle.disable_static()
dygraph_res = self._run(to_static=False)
static_res = self._run(to_static=True)
with enable_to_static_guard(False):
dygraph_res = self._run()

static_res = self._run()
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)


Expand All @@ -51,8 +53,7 @@ def tensor_numpy(x):


class TestTensorDygraphOnlyMethodError(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x = paddle.zeros([2, 2])
y = paddle.jit.to_static(tensor_numpy)(x)
return y.numpy()
Expand All @@ -61,9 +62,11 @@ def _run(self, to_static):
@test_legacy_and_pt_and_pir
def test_to_static_numpy_report_error(self):
paddle.disable_static()
dygraph_res = self._run(to_static=False)
with enable_to_static_guard(False):
dygraph_res = self._run()

with self.assertRaises(AssertionError):
static_res = self._run(to_static=True)
static_res = self._run()


def tensor_item(x):
Expand All @@ -73,16 +76,17 @@ def tensor_item(x):


class TestTensorItem(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x = paddle.ones([1])
return paddle.jit.to_static(tensor_item)(x)

@test_legacy_and_pt_and_pir
def test_tensor_clone(self):
paddle.disable_static()
dygraph_res = self._run(to_static=False)
static_res = self._run(to_static=True)
with enable_to_static_guard(False):
dygraph_res = self._run()

static_res = self._run()
np.testing.assert_allclose(dygraph_res, static_res)


Expand All @@ -95,7 +99,6 @@ def tensor_size(x):

class TestTensorSize(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
x = paddle.ones([1, 2, 3])
if not to_static:
return tensor_size(x)
Expand All @@ -107,7 +110,9 @@ def _run(self, to_static):
@test_legacy_and_pt_and_pir
def test_tensor_clone(self):
paddle.disable_static()
dygraph_res = self._run(to_static=False)
with enable_to_static_guard(False):
dygraph_res = self._run(to_static=False)

static_res = self._run(to_static=True)
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-5)

Expand All @@ -118,17 +123,17 @@ def true_div(x, y):


class TestTrueDiv(Dy2StTestBase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
def _run(self):
x = paddle.to_tensor([3], dtype='int64')
y = paddle.to_tensor([4], dtype='int64')
return paddle.jit.to_static(true_div)(x, y).numpy()

@test_legacy_and_pt_and_pir
def test_ture_div(self):
def test_true_div(self):
paddle.disable_static()
dygraph_res = self._run(to_static=False)
static_res = self._run(to_static=True)
with enable_to_static_guard(False):
dygraph_res = self._run()
static_res = self._run()
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-5)


Expand Down
11 changes: 6 additions & 5 deletions test/dygraph_to_static/test_tsm.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import numpy as np
from dygraph_to_static_utils import (
Dy2StTestBase,
enable_to_static_guard,
test_default_and_pir,
)
from tsm_config_utils import merge_configs, parse_config, print_configs
Expand Down Expand Up @@ -292,9 +293,7 @@ def create_optimizer(cfg, params):
return optimizer


def train(args, fake_data_reader, to_static):
paddle.jit.enable_to_static(to_static)

def train(args, fake_data_reader):
config = parse_config(args.config)
train_config = merge_configs(config, 'train', vars(args))
valid_config = merge_configs(config, 'valid', vars(args))
Expand Down Expand Up @@ -381,8 +380,10 @@ def test_dygraph_static_same_loss(self):
paddle.set_flags({"FLAGS_cudnn_deterministic": True})
args = parse_args()
fake_data_reader = FakeDataReader("train", parse_config(args.config))
dygraph_loss = train(args, fake_data_reader, to_static=False)
static_loss = train(args, fake_data_reader, to_static=True)
with enable_to_static_guard(False):
dygraph_loss = train(args, fake_data_reader)

static_loss = train(args, fake_data_reader)
np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05)


Expand Down
11 changes: 6 additions & 5 deletions test/dygraph_to_static/test_word2vec.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import numpy as np
from dygraph_to_static_utils import (
Dy2StTestBase,
enable_to_static_guard,
test_legacy_and_pt_and_pir,
)

Expand Down Expand Up @@ -275,9 +276,7 @@ def forward(self, center_words, target_words, label):
total_steps = len(dataset) * epoch_num // batch_size


def train(to_static):
paddle.jit.enable_to_static(to_static)

def train():
random.seed(0)
np.random.seed(0)

Expand Down Expand Up @@ -322,8 +321,10 @@ def train(to_static):
class TestWord2Vec(Dy2StTestBase):
@test_legacy_and_pt_and_pir
def test_dygraph_static_same_loss(self):
dygraph_loss = train(to_static=False)
static_loss = train(to_static=True)
with enable_to_static_guard(False):
dygraph_loss = train()

static_loss = train()
np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05)


Expand Down
11 changes: 6 additions & 5 deletions test/dygraph_to_static/test_yolov3.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import numpy as np
from dygraph_to_static_utils import (
Dy2StTestBase,
enable_to_static_guard,
test_default_and_pir,
)
from yolov3 import YOLOv3, cfg
Expand Down Expand Up @@ -81,9 +82,7 @@ def generator():
fake_data_reader = FakeDataReader()


def train(to_static):
paddle.jit.enable_to_static(to_static)

def train():
random.seed(0)
np.random.seed(0)

Expand Down Expand Up @@ -172,8 +171,10 @@ def train(to_static):
class TestYolov3(Dy2StTestBase):
@test_default_and_pir
def test_dygraph_static_same_loss(self):
dygraph_loss = train(to_static=False)
static_loss = train(to_static=True)
with enable_to_static_guard(False):
dygraph_loss = train()

static_loss = train()
np.testing.assert_allclose(
dygraph_loss, static_loss, rtol=0.001, atol=1e-05
)
Expand Down