Skip to content

Commit 950b711

Browse files
authored
Replace lm-eval bash script with pytest and use enforce_eager for faster CI (#17717)
Signed-off-by: mgoin <[email protected]>
1 parent e50a1f1 commit 950b711

File tree

4 files changed

+52
-91
lines changed

4 files changed

+52
-91
lines changed
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
from pathlib import Path
3+
4+
import pytest
5+
6+
7+
def pytest_addoption(parser):
8+
parser.addoption(
9+
"--config-list-file",
10+
action="store",
11+
help="Path to the file listing model config YAMLs (one per line)")
12+
parser.addoption("--tp-size",
13+
action="store",
14+
default="1",
15+
help="Tensor parallel size to use for evaluation")
16+
17+
18+
@pytest.fixture(scope="session")
19+
def config_list_file(pytestconfig, config_dir):
20+
rel_path = pytestconfig.getoption("--config-list-file")
21+
return config_dir / rel_path
22+
23+
24+
@pytest.fixture(scope="session")
25+
def tp_size(pytestconfig):
26+
return pytestconfig.getoption("--tp-size")
27+
28+
29+
def pytest_generate_tests(metafunc):
30+
if "config_filename" in metafunc.fixturenames:
31+
rel_path = metafunc.config.getoption("--config-list-file")
32+
config_list_file = Path(rel_path).resolve()
33+
config_dir = config_list_file.parent
34+
with open(config_list_file, encoding="utf-8") as f:
35+
configs = [
36+
config_dir / line.strip() for line in f
37+
if line.strip() and not line.startswith("#")
38+
]
39+
metafunc.parametrize("config_filename", configs)

.buildkite/lm-eval-harness/run-tests.sh

Lines changed: 0 additions & 59 deletions
This file was deleted.

.buildkite/lm-eval-harness/test_lm_eval_correctness.py

Lines changed: 11 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -3,67 +3,48 @@
33
LM eval harness on model to compare vs HF baseline computed offline.
44
Configs are found in configs/$MODEL.yaml
55
6-
* export LM_EVAL_TEST_DATA_FILE=configs/Meta-Llama-3-70B-Instruct.yaml
7-
* export LM_EVAL_TP_SIZE=4
8-
* pytest -s test_lm_eval_correctness.py
6+
pytest -s -v test_lm_eval_correctness.py \
7+
--config-list-file=configs/models-small.txt \
8+
--tp-size=1
99
"""
1010

11-
import os
12-
from pathlib import Path
13-
1411
import lm_eval
15-
import numpy
16-
import pytest
12+
import numpy as np
1713
import yaml
1814

1915
RTOL = 0.08
20-
TEST_DATA_FILE = os.environ.get(
21-
"LM_EVAL_TEST_DATA_FILE",
22-
".buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-Instruct.yaml")
23-
24-
TP_SIZE = os.environ.get("LM_EVAL_TP_SIZE", 1)
2516

2617

27-
def launch_lm_eval(eval_config):
18+
def launch_lm_eval(eval_config, tp_size):
2819
trust_remote_code = eval_config.get('trust_remote_code', False)
29-
3020
model_args = f"pretrained={eval_config['model_name']}," \
31-
f"tensor_parallel_size={TP_SIZE}," \
21+
f"tensor_parallel_size={tp_size}," \
22+
f"enforce_eager=true," \
3223
f"add_bos_token=true," \
3324
f"trust_remote_code={trust_remote_code}"
34-
3525
results = lm_eval.simple_evaluate(
3626
model="vllm",
3727
model_args=model_args,
3828
tasks=[task["name"] for task in eval_config["tasks"]],
3929
num_fewshot=eval_config["num_fewshot"],
4030
limit=eval_config["limit"],
4131
batch_size="auto")
42-
4332
return results
4433

4534

46-
def test_lm_eval_correctness():
47-
eval_config = yaml.safe_load(
48-
Path(TEST_DATA_FILE).read_text(encoding="utf-8"))
49-
50-
if eval_config[
51-
"model_name"] == "nm-testing/Meta-Llama-3-70B-Instruct-FBGEMM-nonuniform": #noqa: E501
52-
pytest.skip("FBGEMM is currently failing on main.")
35+
def test_lm_eval_correctness_param(config_filename, tp_size):
36+
eval_config = yaml.safe_load(config_filename.read_text(encoding="utf-8"))
5337

54-
# Launch eval requests.
55-
results = launch_lm_eval(eval_config)
38+
results = launch_lm_eval(eval_config, tp_size)
5639

57-
# Confirm scores match ground truth.
5840
success = True
5941
for task in eval_config["tasks"]:
6042
for metric in task["metrics"]:
6143
ground_truth = metric["value"]
6244
measured_value = results["results"][task["name"]][metric["name"]]
6345
print(f'{task["name"]} | {metric["name"]}: '
6446
f'ground_truth={ground_truth} | measured={measured_value}')
65-
success = success and numpy.isclose(
47+
success = success and np.isclose(
6648
ground_truth, measured_value, rtol=RTOL)
6749

68-
# Assert at the end, print all scores even on failure for debugging.
6950
assert success

.buildkite/test-pipeline.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -408,7 +408,7 @@ steps:
408408
- vllm/model_executor/layers/quantization
409409
commands:
410410
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
411-
- bash ./run-tests.sh -c configs/models-small.txt -t 1
411+
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-small.txt --tp-size=1
412412

413413
- label: OpenAI API correctness
414414
source_file_dependencies:
@@ -713,4 +713,4 @@ steps:
713713
- vllm/model_executor/layers/quantization
714714
commands:
715715
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
716-
- bash ./run-tests.sh -c configs/models-large.txt -t 4
716+
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large.txt --tp-size=4

0 commit comments

Comments
 (0)