|
1 | 1 | import argparse |
2 | | -from typing import Tuple |
| 2 | +import dataclasses |
| 3 | +from dataclasses import dataclass |
| 4 | +from typing import Optional, Tuple |
3 | 5 |
|
4 | 6 | from cacheflow.config import (CacheConfig, ModelConfig, ParallelConfig, |
5 | 7 | SchedulerConfig) |
6 | | -from cacheflow.server.llm_server import LLMServer |
7 | | -from cacheflow.server.ray_utils import initialize_cluster |
8 | 8 |
|
9 | | -_GiB = 1 << 30 |
10 | 9 |
|
| 10 | +@dataclass |
| 11 | +class ServerArgs: |
| 12 | + model: str |
| 13 | + download_dir: Optional[str] = None |
| 14 | + use_np_weights: bool = False |
| 15 | + use_dummy_weights: bool = False |
| 16 | + dtype: str = "default" |
| 17 | + seed: int = 0 |
| 18 | + use_ray: bool = False |
| 19 | + pipeline_parallel_size: int = 1 |
| 20 | + tensor_parallel_size: int = 1 |
| 21 | + block_size: int = 16 |
| 22 | + swap_space: int = 4 # GiB |
| 23 | + gpu_memory_utilization: float = 0.95 |
| 24 | + max_num_batched_tokens: int = 2560 |
| 25 | + max_num_seqs: int = 256 |
| 26 | + disable_log_stats: bool = False |
11 | 27 |
|
12 | | -def add_server_arguments(parser: argparse.ArgumentParser): |
13 | | - """Shared arguments for CacheFlow servers.""" |
| 28 | + def __post_init__(self): |
| 29 | + self.max_num_seqs = min(self.max_num_seqs, self.max_num_batched_tokens) |
| 30 | + |
| 31 | + @staticmethod |
| 32 | + def add_cli_args( |
| 33 | + parser: argparse.ArgumentParser, |
| 34 | + ) -> argparse.ArgumentParser: |
| 35 | + return _add_server_arguments(parser) |
| 36 | + |
| 37 | + @classmethod |
| 38 | + def from_cli_args(cls, args: argparse.Namespace) -> "ServerArgs": |
| 39 | + # Get the list of attributes of this dataclass. |
| 40 | + attrs = [attr.name for attr in dataclasses.fields(cls)] |
| 41 | + # Set the attributes from the parsed arguments. |
| 42 | + server_args = cls(**{attr: getattr(args, attr) for attr in attrs}) |
| 43 | + return server_args |
| 44 | + |
| 45 | + def create_server_configs( |
| 46 | + self, |
| 47 | + ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]: |
| 48 | + # Initialize the configs. |
| 49 | + model_config = ModelConfig( |
| 50 | + self.model, self.download_dir, self.use_np_weights, |
| 51 | + self.use_dummy_weights, self.dtype, self.seed) |
| 52 | + cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization, |
| 53 | + self.swap_space) |
| 54 | + parallel_config = ParallelConfig(self.pipeline_parallel_size, |
| 55 | + self.tensor_parallel_size, |
| 56 | + self.use_ray) |
| 57 | + scheduler_config = SchedulerConfig(self.max_num_batched_tokens, |
| 58 | + self.max_num_seqs) |
| 59 | + return model_config, cache_config, parallel_config, scheduler_config |
| 60 | + |
| 61 | + |
| 62 | +def _add_server_arguments( |
| 63 | + parser: argparse.ArgumentParser, |
| 64 | +)-> argparse.ArgumentParser: |
| 65 | + """Shared CLI arguments for CacheFlow servers.""" |
14 | 66 | # Model arguments |
15 | | - parser.add_argument('--model', type=str, default='facebook/opt-125m', help='model name') |
16 | | - parser.add_argument('--download-dir', type=str, default=None, |
| 67 | + parser.add_argument('--model', type=str, default='facebook/opt-125m', |
| 68 | + help='name or path of the huggingface model to use') |
| 69 | + parser.add_argument('--download-dir', type=str, |
| 70 | + default=ServerArgs.download_dir, |
17 | 71 | help='directory to download and load the weights, ' |
18 | 72 | 'default to the default cache dir of huggingface') |
19 | 73 | parser.add_argument('--use-np-weights', action='store_true', |
20 | | - help='save a numpy copy of model weights for faster loading') |
21 | | - parser.add_argument('--use-dummy-weights', action='store_true', help='use dummy values for model weights') |
| 74 | + help='save a numpy copy of model weights for faster ' |
| 75 | + 'loading. This can increase the disk usage by up ' |
| 76 | + 'to 2x.') |
| 77 | + parser.add_argument('--use-dummy-weights', action='store_true', |
| 78 | + help='use dummy values for model weights') |
22 | 79 | # TODO(woosuk): Support FP32. |
23 | | - parser.add_argument('--dtype', type=str, default='default', choices=['default', 'half', 'bfloat16'], |
| 80 | + parser.add_argument('--dtype', type=str, default=ServerArgs.dtype, |
| 81 | + choices=['default', 'half', 'bfloat16'], |
24 | 82 | help=('data type for model weights and activations. ' |
25 | 83 | 'The "default" option will use FP16 precision ' |
26 | 84 | 'for FP32 and FP16 models, and BF16 precision ' |
27 | 85 | 'for BF16 models.')) |
28 | 86 | # Parallel arguments |
29 | | - parser.add_argument('--use-ray', action='store_true', help='use Ray for distributed serving, will be automatically set when using more than 1 GPU') |
30 | | - parser.add_argument('--pipeline-parallel-size', '-pp', type=int, default=1, help='number of pipeline stages') |
31 | | - parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1, help='number of tensor parallel replicas') |
| 87 | + parser.add_argument('--use-ray', action='store_true', |
| 88 | + help='use Ray for distributed serving, will be ' |
| 89 | + 'automatically set when using more than 1 GPU') |
| 90 | + parser.add_argument('--pipeline-parallel-size', '-pp', type=int, |
| 91 | + default=ServerArgs.pipeline_parallel_size, |
| 92 | + help='number of pipeline stages') |
| 93 | + parser.add_argument('--tensor-parallel-size', '-tp', type=int, |
| 94 | + default=ServerArgs.tensor_parallel_size, |
| 95 | + help='number of tensor parallel replicas') |
32 | 96 | # KV cache arguments |
33 | | - parser.add_argument('--block-size', type=int, default=16, choices=[1, 2, 4, 8, 16, 32, 64, 128, 256], help='token block size') |
| 97 | + parser.add_argument('--block-size', type=int, default=ServerArgs.block_size, |
| 98 | + choices=[1, 2, 4, 8, 16, 32, 64, 128, 256], |
| 99 | + help='token block size') |
34 | 100 | # TODO(woosuk): Support fine-grained seeds (e.g., seed per request). |
35 | | - parser.add_argument('--seed', type=int, default=0, help='random seed') |
36 | | - parser.add_argument('--swap-space', type=int, default=4, help='CPU swap space size (GiB) per GPU') |
37 | | - parser.add_argument('--gpu-memory-utilization', type=float, default=0.95, help='the percentage of GPU memory to be used for the model executor') |
38 | | - parser.add_argument('--max-num-batched-tokens', type=int, default=2560, help='maximum number of batched tokens per iteration') |
39 | | - parser.add_argument('--max-num-seqs', type=int, default=256, help='maximum number of sequences per iteration') |
40 | | - parser.add_argument('--disable-log-stats', action='store_true', help='disable logging statistics') |
| 101 | + parser.add_argument('--seed', type=int, default=ServerArgs.seed, |
| 102 | + help='random seed') |
| 103 | + parser.add_argument('--swap-space', type=int, default=ServerArgs.swap_space, |
| 104 | + help='CPU swap space size (GiB) per GPU') |
| 105 | + parser.add_argument('--gpu-memory-utilization', type=float, |
| 106 | + default=ServerArgs.gpu_memory_utilization, |
| 107 | + help='the percentage of GPU memory to be used for the ' |
| 108 | + 'model executor') |
| 109 | + parser.add_argument('--max-num-batched-tokens', type=int, |
| 110 | + default=ServerArgs.max_num_batched_tokens, |
| 111 | + help='maximum number of batched tokens per iteration') |
| 112 | + parser.add_argument('--max-num-seqs', type=int, |
| 113 | + default=ServerArgs.max_num_seqs, |
| 114 | + help='maximum number of sequences per iteration') |
| 115 | + parser.add_argument('--disable-log-stats', action='store_true', |
| 116 | + help='disable logging statistics') |
41 | 117 | return parser |
42 | | - |
43 | | - |
44 | | -def create_server_configs_from_args( |
45 | | - args: argparse.Namespace, |
46 | | -) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]: |
47 | | - # Post-process the parsed arguments. |
48 | | - args.swap_space = args.swap_space * _GiB |
49 | | - args.max_num_seqs = min(args.max_num_seqs, args.max_num_batched_tokens) |
50 | | - |
51 | | - # Initialize the configs. |
52 | | - model_config = ModelConfig( |
53 | | - args.model, args.download_dir, args.use_np_weights, |
54 | | - args.use_dummy_weights, args.dtype, args.seed) |
55 | | - cache_config = CacheConfig(args.block_size, args.gpu_memory_utilization, |
56 | | - args.swap_space) |
57 | | - parallel_config = ParallelConfig(args.pipeline_parallel_size, |
58 | | - args.tensor_parallel_size, args.use_ray) |
59 | | - scheduler_config = SchedulerConfig(args.max_num_batched_tokens, |
60 | | - args.max_num_seqs) |
61 | | - return model_config, cache_config, parallel_config, scheduler_config |
62 | | - |
63 | | - |
64 | | -def initialize_server_from_args(args: argparse.Namespace) -> LLMServer: |
65 | | - server_configs = create_server_configs_from_args(args) |
66 | | - parallel_config = server_configs[2] |
67 | | - |
68 | | - # Initialize the cluster. |
69 | | - distributed_init_method, devices = initialize_cluster(parallel_config) |
70 | | - |
71 | | - # Create the LLM server. |
72 | | - server = LLMServer(*server_configs, distributed_init_method, devices, |
73 | | - log_stats=not args.disable_log_stats) |
74 | | - return server |
|
0 commit comments