|
| 1 | +# Copyright 2024 Bytedance Ltd. and/or its affiliates |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | +import asyncio |
| 15 | +import os |
| 16 | + |
| 17 | +import pytest |
| 18 | +import ray |
| 19 | +from omegaconf import DictConfig |
| 20 | +from openai import AsyncOpenAI |
| 21 | + |
| 22 | +from verl.workers.rollout.replica import get_rollout_replica_class |
| 23 | + |
| 24 | + |
| 25 | +@pytest.fixture |
| 26 | +def init_config() -> DictConfig: |
| 27 | + from hydra import compose, initialize_config_dir |
| 28 | + |
| 29 | + with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): |
| 30 | + config = compose(config_name="ppo_trainer") |
| 31 | + |
| 32 | + config.trainer.n_gpus_per_node = 4 |
| 33 | + config.trainer.nnodes = 2 |
| 34 | + config.actor_rollout_ref.model.path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct") |
| 35 | + config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"] |
| 36 | + config.actor_rollout_ref.rollout.load_format = "auto" |
| 37 | + config.actor_rollout_ref.rollout.enforce_eager = True |
| 38 | + |
| 39 | + return config |
| 40 | + |
| 41 | + |
| 42 | +@pytest.mark.asyncio |
| 43 | +@pytest.mark.parametrize("tp_size", [2, 4]) |
| 44 | +async def test_standalone_(init_config, tp_size): |
| 45 | + """Test standalone rollout single node and multi nodes.""" |
| 46 | + ray.init( |
| 47 | + runtime_env={ |
| 48 | + "env_vars": { |
| 49 | + "TOKENIZERS_PARALLELISM": "true", |
| 50 | + "NCCL_DEBUG": "WARN", |
| 51 | + "VLLM_LOGGING_LEVEL": "INFO", |
| 52 | + "VLLM_USE_V1": "1", |
| 53 | + } |
| 54 | + } |
| 55 | + ) |
| 56 | + |
| 57 | + init_config.actor_rollout_ref.rollout.skip_tokenizer_init = False |
| 58 | + init_config.actor_rollout_ref.rollout.tensor_model_parallel_size = tp_size |
| 59 | + num_replicas = (init_config.trainer.n_gpus_per_node * init_config.trainer.nnodes) // tp_size |
| 60 | + |
| 61 | + # create standalone rollout server |
| 62 | + rollout_server_class = get_rollout_replica_class(init_config.actor_rollout_ref.rollout.name) |
| 63 | + rollout_servers = [ |
| 64 | + rollout_server_class(replica_rank=replica_rank, config=init_config, gpus_per_node=2) |
| 65 | + for replica_rank in range(num_replicas) |
| 66 | + ] |
| 67 | + await asyncio.gather(*[server.init_standalone() for server in rollout_servers]) |
| 68 | + |
| 69 | + server_handles = [server._server_handle for server in rollout_servers] |
| 70 | + server_addresses = [server._server_address for server in rollout_servers] |
| 71 | + assert len(server_handles) == num_replicas |
| 72 | + assert len(server_addresses) == num_replicas |
| 73 | + |
| 74 | + os.environ.pop("HTTPS_PROXY", None) |
| 75 | + os.environ.pop("HTTP_PROXY", None) |
| 76 | + os.environ.pop("NO_PROXY", None) |
| 77 | + |
| 78 | + client = AsyncOpenAI( |
| 79 | + api_key="123-abc", |
| 80 | + base_url=f"http://{server_addresses[0]}/v1", |
| 81 | + ) |
| 82 | + |
| 83 | + completion = await client.chat.completions.create( |
| 84 | + model=init_config.actor_rollout_ref.model.path, |
| 85 | + messages=[{"role": "user", "content": "What can you do?"}], |
| 86 | + ) |
| 87 | + print(completion.choices[0].message.content) |
| 88 | + |
| 89 | + ray.shutdown() |
0 commit comments