Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddlenlp/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2304,8 +2304,8 @@ def _load_optimizer_and_scheduler(self, checkpoint):
checkpoint, OPTIMIZER_NAME, self.model_wrapped
)
else:
use_unified_checkpoint = False
if self.args.unified_checkpoint:
use_unified_checkpoint = False
if self.is_unified_checkpoint(checkpoint):
use_unified_checkpoint = True
else:
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ testpaths = [
"tests/layers",
"tests/metrics",
"tests/ops",
"tests/trainer",
"tests/transformers",
"tests/peft",
"tests/prompt",
Expand Down
15 changes: 15 additions & 0 deletions llm/llama/tests/parallel_launch.py → tests/parallel_launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
watch_local_trainers,
)

from paddlenlp.utils.downloader import get_path_from_url_with_filelock

logger = logging.getLogger("root")


Expand Down Expand Up @@ -187,6 +189,11 @@ def run_8gpu(self, *args, **kwargs):
self.selected_gpus = get_gpus("0,1,2,3,4,5,6,7")
self.run_n_gpu(*args, **kwargs)

def run_n1c2(self, *args, **kwargs):
self.selected_gpus = get_gpus("0,1")
self.num_nodes = 1
self.run_n_gpu(*args, **kwargs)

def run_n1c8(self, *args, **kwargs):
self.selected_gpus = get_gpus("0,1,2,3,4,5,6,7")
self.num_nodes = 1
Expand Down Expand Up @@ -247,6 +254,14 @@ def run_n_gpu(
finally:
terminate_local_procs(procs)

def prepare_inputs_data(self, input_dir, files):
os.makedirs(input_dir, exist_ok=True)
for file in files:
file_name = file.split("/")[-1]
file_path = os.path.join(input_dir, file_name)
if not os.path.exists(file_path):
get_path_from_url_with_filelock(file, root_dir=input_dir)


class TestMultipleWithGloo(unittest.TestCase):
def run_2cpu(self, target_file_name):
Expand Down
65 changes: 64 additions & 1 deletion tests/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
import yaml

from paddlenlp.trainer.argparser import strtobool
from paddlenlp.utils.import_utils import is_package_available
from paddlenlp.utils.import_utils import is_package_available, is_paddle_available

__all__ = ["get_vocab_list", "stable_softmax", "cross_entropy"]

Expand Down Expand Up @@ -407,3 +407,66 @@ def run_command(command: list[str], return_stdout=False):
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e


def require_paddle_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PaddlePaddle). These tests are skipped on a machine without
multiple GPUs.

To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_paddle_available():
return unittest.skip("test requires PaddlePaddle")(test_case)

import paddle

return unittest.skipUnless(paddle.device.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)


def require_paddle_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PaddlePaddle).
"""
if not is_paddle_available():
return unittest.skip("test requires PaddlePaddle")(test_case)

import paddle

return unittest.skipUnless(paddle.device.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)


def require_paddle_at_least_2_gpu(test_case):
"""
Decorator marking a test that requires >= 2 GPU setup (in PaddlePaddle).
"""
if not is_paddle_available():
return unittest.skip("test requires PaddlePaddle")(test_case)

import paddle

return unittest.skipUnless(paddle.device.cuda.device_count() >= 2, "test requires at least 2 GPUs")(test_case)


def require_paddle_at_least_8_gpu(test_case):
"""
Decorator marking a test that requires >= 8 GPU setup (in PaddlePaddle).
"""
if not is_paddle_available():
return unittest.skip("test requires PaddlePaddle")(test_case)

import paddle

return unittest.skipUnless(paddle.device.cuda.device_count() >= 8, "test requires at least 8 GPUs")(test_case)


def require_paddle_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PaddlePaddle).
"""
if not is_paddle_available():
return unittest.skip("test requires PaddlePaddle")(test_case)

import paddle

return unittest.skipUnless(paddle.device.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
Loading