Skip to content

Commit a195cc5

Browse files
committed
add CI test
Signed-off-by: Nick Hill <[email protected]>
1 parent a73c2c0 commit a195cc5

File tree

2 files changed

+315
-0
lines changed

2 files changed

+315
-0
lines changed

.buildkite/test-pipeline.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@ steps:
145145
- examples/offline_inference/rlhf_colocate.py
146146
- tests/examples/offline_inference/data_parallel.py
147147
- tests/v1/test_async_llm_dp.py
148+
- tests/v1/test_external_lb_dp.py
148149
- tests/v1/engine/test_engine_core_client.py
149150
commands:
150151
# test with tp=2 and external_dp=2
@@ -155,6 +156,7 @@ steps:
155156
# test with internal dp
156157
- python3 ../examples/offline_inference/data_parallel.py
157158
- TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/test_async_llm_dp.py
159+
- TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/test_external_lb_dp.py
158160
- pytest -v -s v1/engine/test_engine_core_client.py::test_kv_cache_events_dp
159161
- pytest -v -s distributed/test_utils.py
160162
- pytest -v -s compile/test_basic_correctness.py
@@ -644,10 +646,12 @@ steps:
644646
- vllm/worker/model_runner.py
645647
- entrypoints/llm/test_collective_rpc.py
646648
- tests/v1/test_async_llm_dp.py
649+
- tests/v1/test_external_lb_dp.py
647650
- tests/v1/entrypoints/openai/test_multi_api_servers.py
648651
- vllm/v1/engine/
649652
commands:
650653
- TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/test_async_llm_dp.py
654+
- TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/test_external_lb_dp.py
651655
- DP_SIZE=2 pytest -v -s v1/entrypoints/openai/test_multi_api_servers.py
652656
- pytest -v -s entrypoints/llm/test_collective_rpc.py
653657
- pytest -v -s ./compile/test_basic_correctness.py

tests/v1/test_external_lb_dp.py

Lines changed: 311 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,311 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3+
import asyncio
4+
import os
5+
import threading
6+
import time
7+
8+
import openai # use the official client for correctness check
9+
import pytest
10+
import pytest_asyncio
11+
12+
from tests.utils import RemoteOpenAIServer
13+
14+
MODEL_NAME = "ibm-research/PowerMoE-3b"
15+
16+
# Number of data parallel ranks for external LB testing
17+
DP_SIZE = int(os.getenv("DP_SIZE", "2"))
18+
# Default tensor parallell size to use
19+
TP_SIZE = int(os.getenv("TP_SIZE", "1"))
20+
21+
22+
class ExternalLBServerManager:
23+
"""Manages data parallel vLLM server instances for external
24+
load balancer testing."""
25+
26+
def __init__(self, model_name: str, dp_size: int, api_server_count: int,
27+
base_server_args: list, tp_size: int = TP_SIZE):
28+
self.model_name = model_name
29+
self.dp_size = dp_size
30+
self.tp_size = tp_size
31+
self.api_server_count = api_server_count
32+
self.base_server_args = base_server_args
33+
self.servers: list[tuple[RemoteOpenAIServer, list[str]]] = []
34+
self.server_threads = []
35+
36+
def __enter__(self) -> list[tuple[RemoteOpenAIServer, list[str]]]:
37+
"""Start all server instances for external LB mode."""
38+
for rank in range(self.dp_size):
39+
# Create server args for this specific rank
40+
server_args = self.base_server_args.copy()
41+
42+
# Add external LB specific arguments
43+
server_args.extend([
44+
"--data-parallel-size",
45+
str(self.dp_size),
46+
"--data-parallel-rank",
47+
str(rank),
48+
"--data-parallel-size-local",
49+
"1",
50+
"--tensor-parallel-size",
51+
str(self.tp_size),
52+
"--port",
53+
str(8000 + rank), # Different port for each rank
54+
"--api-server-count",
55+
str(self.api_server_count),
56+
])
57+
58+
# Use a thread to start each server to allow parallel initialization
59+
def start_server(r, sargs):
60+
try:
61+
# Start the server
62+
server = RemoteOpenAIServer(
63+
self.model_name, sargs, auto_port=False,
64+
env_dict={"CUDA_VISIBLE_DEVICES": str(r)})
65+
server.__enter__()
66+
print(
67+
f"Server rank {r} started successfully with "
68+
f"{self.api_server_count} API servers"
69+
)
70+
self.servers.append((server, sargs))
71+
except Exception as e:
72+
print(f"Failed to start server rank {r}: {e}")
73+
raise
74+
75+
thread = threading.Thread(target=start_server,
76+
args=(rank, server_args))
77+
thread.start()
78+
79+
self.server_threads.append(thread)
80+
81+
# Wait for all servers to start
82+
for thread in self.server_threads:
83+
thread.join()
84+
85+
# Give servers additional time to fully initialize and coordinate
86+
time.sleep(2)
87+
88+
if len(self.servers) != self.dp_size:
89+
raise Exception("Servers failed to start")
90+
91+
return self.servers
92+
93+
def __exit__(self, exc_type, exc_val, exc_tb):
94+
"""Stop all server instances."""
95+
while self.servers:
96+
try:
97+
self.servers.pop()[0].__exit__(exc_type, exc_val, exc_tb)
98+
except Exception as e:
99+
print(f"Error stopping server: {e}")
100+
101+
102+
@pytest.fixture(scope="module")
103+
def default_server_args():
104+
return [
105+
# use half precision for speed and memory savings in CI environment
106+
"--dtype",
107+
"bfloat16",
108+
"--max-model-len",
109+
"2048",
110+
"--max-num-seqs",
111+
"128",
112+
"--enforce-eager",
113+
]
114+
115+
116+
@pytest.fixture(scope="module", params=[1 , 4])
117+
def servers(request, default_server_args):
118+
api_server_count = request.param
119+
with ExternalLBServerManager(MODEL_NAME, DP_SIZE, api_server_count,
120+
default_server_args) as server_list:
121+
yield server_list
122+
123+
124+
@pytest_asyncio.fixture
125+
async def clients(servers: list[tuple[RemoteOpenAIServer, list[str]]]):
126+
# Create a client for each server
127+
async_clients = []
128+
for server, _ in servers:
129+
client = await server.get_async_client().__aenter__()
130+
async_clients.append(client)
131+
132+
try:
133+
yield async_clients
134+
finally:
135+
# Clean up all clients
136+
for client in async_clients:
137+
try:
138+
await client.__aexit__(None, None, None)
139+
except Exception as e:
140+
print(f"Error closing client: {e}")
141+
142+
143+
@pytest.mark.asyncio
144+
@pytest.mark.parametrize(
145+
"model_name",
146+
[MODEL_NAME],
147+
)
148+
async def test_external_lb_single_completion(
149+
clients: list[openai.AsyncOpenAI],
150+
servers: list[tuple[RemoteOpenAIServer, list[str]]],
151+
model_name: str) -> None:
152+
153+
async def make_request(client: openai.AsyncOpenAI):
154+
completion = await client.completions.create(
155+
model=model_name,
156+
prompt="Hello, my name is",
157+
max_tokens=10,
158+
temperature=1.0)
159+
160+
assert completion.id is not None
161+
assert completion.choices is not None and len(completion.choices) == 1
162+
163+
choice = completion.choices[0]
164+
# The exact number of tokens can vary slightly with temperature=1.0,
165+
# so we check for a reasonable minimum length.
166+
assert len(choice.text) >= 1
167+
# Finish reason might not always be 'length' if the model finishes early
168+
# or due to other reasons, especially with high temperature.
169+
# So, we'll accept 'length' or 'stop'.
170+
assert choice.finish_reason in ("length", "stop")
171+
172+
# Token counts can also vary, so we check they are positive.
173+
assert completion.usage.completion_tokens > 0
174+
assert completion.usage.prompt_tokens > 0
175+
assert completion.usage.total_tokens > 0
176+
return completion
177+
178+
# Test single request to each server
179+
for i, client in enumerate(clients):
180+
result = await make_request(client)
181+
assert result is not None
182+
print(f"Server {i} handled single completion request successfully")
183+
184+
await asyncio.sleep(0.5)
185+
186+
# Send requests to all servers in round-robin fashion
187+
num_requests_per_server = 25 # Total 50 requests across 2 servers
188+
all_tasks = []
189+
190+
for i, client in enumerate(clients):
191+
tasks = [make_request(client) for _ in range(num_requests_per_server)]
192+
all_tasks.extend(tasks)
193+
194+
results = await asyncio.gather(*all_tasks)
195+
assert len(results) == num_requests_per_server * len(clients)
196+
assert all(completion is not None for completion in results)
197+
198+
await asyncio.sleep(0.5)
199+
200+
# Second burst of requests
201+
all_tasks = []
202+
for i, client in enumerate(clients):
203+
tasks = [make_request(client) for _ in range(num_requests_per_server)]
204+
all_tasks.extend(tasks)
205+
206+
results = await asyncio.gather(*all_tasks)
207+
assert len(results) == num_requests_per_server * len(clients)
208+
assert all(completion is not None for completion in results)
209+
210+
_, server_args = servers[0]
211+
print(
212+
f"Successfully completed external LB test with {len(clients)} servers "
213+
f"(API server count: {server_args.count('--api-server-count')
214+
and server_args[server_args.index(
215+
'--api-server-count') + 1] or '1'})"
216+
)
217+
218+
219+
@pytest.mark.asyncio
220+
@pytest.mark.parametrize(
221+
"model_name",
222+
[MODEL_NAME],
223+
)
224+
async def test_external_lb_completion_streaming(clients: list[
225+
openai.AsyncOpenAI], servers: list[tuple[RemoteOpenAIServer, list[str]]],
226+
model_name: str) -> None:
227+
prompt = "What is an LLM?"
228+
229+
async def make_streaming_request(client: openai.AsyncOpenAI):
230+
# Perform a non-streaming request to get the expected full output
231+
single_completion = await client.completions.create(
232+
model=model_name,
233+
prompt=prompt,
234+
max_tokens=5,
235+
temperature=0.0,
236+
)
237+
single_output = single_completion.choices[0].text
238+
239+
# Perform the streaming request
240+
stream = await client.completions.create(model=model_name,
241+
prompt=prompt,
242+
max_tokens=5,
243+
temperature=0.0,
244+
stream=True)
245+
chunks: list[str] = []
246+
finish_reason_count = 0
247+
last_chunk = None
248+
async for chunk in stream:
249+
chunks.append(chunk.choices[0].text)
250+
if chunk.choices[0].finish_reason is not None:
251+
finish_reason_count += 1
252+
last_chunk = chunk # Keep track of the last chunk
253+
254+
# finish reason should only return in the last block for OpenAI API
255+
assert finish_reason_count == 1, (
256+
"Finish reason should appear exactly once.")
257+
assert last_chunk is not None, (
258+
"Stream should have yielded at least one chunk.")
259+
assert last_chunk.choices[
260+
0].finish_reason == "length", "Finish reason should be 'length'."
261+
# Check that the combined text matches the non-streamed version.
262+
assert "".join(
263+
chunks
264+
) == single_output, "Streamed output should match non-streamed output."
265+
return True # Indicate success for this request
266+
267+
# Test single request to each server
268+
for i, client in enumerate(clients):
269+
result = await make_streaming_request(client)
270+
assert result is not None
271+
print(f"Server {i} handled single streaming request successfully")
272+
273+
await asyncio.sleep(0.5)
274+
275+
# Send streaming requests to all servers in round-robin fashion
276+
num_requests_per_server = 25 # Total 50 requests across 2 servers
277+
all_tasks = []
278+
279+
for i, client in enumerate(clients):
280+
tasks = [
281+
make_streaming_request(client)
282+
for _ in range(num_requests_per_server)
283+
]
284+
all_tasks.extend(tasks)
285+
286+
results = await asyncio.gather(*all_tasks)
287+
assert len(results) == num_requests_per_server * len(clients)
288+
assert all(results), "Not all streaming requests completed successfully."
289+
290+
await asyncio.sleep(0.5)
291+
292+
# Second burst of streaming requests
293+
all_tasks = []
294+
for i, client in enumerate(clients):
295+
tasks = [
296+
make_streaming_request(client)
297+
for _ in range(num_requests_per_server)
298+
]
299+
all_tasks.extend(tasks)
300+
301+
results = await asyncio.gather(*all_tasks)
302+
assert len(results) == num_requests_per_server * len(clients)
303+
assert all(results), "Not all streaming requests completed successfully."
304+
305+
_, server_args = servers[0]
306+
print(
307+
f"Successfully completed external LB streaming test with "
308+
f"(API server count: {server_args.count('--api-server-count')
309+
and server_args[server_args.index(
310+
'--api-server-count') + 1] or '1'})"
311+
)

0 commit comments

Comments
 (0)