From e0e1386ac66912bdd765895ee66c408991e1807a Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 8 Feb 2024 11:44:52 -0800 Subject: [PATCH 01/47] Write info to local json --- requirements.txt | 1 + vllm/engine/llm_engine.py | 10 +++++++- vllm/usage/usage_lib.py | 52 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 vllm/usage/usage_lib.py diff --git a/requirements.txt b/requirements.txt index 5684b2c29634..37be5c168752 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ ray >= 2.9 sentencepiece # Required for LLaMA tokenizer. numpy torch == 2.1.2 +cloud-detect transformers >= 4.37.0 # Required for Qwen2 xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 02c673c96fd9..af0094731159 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -2,6 +2,7 @@ from collections import defaultdict import os import time +import platform from typing import (TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union) @@ -20,7 +21,9 @@ from vllm.transformers_utils.tokenizer import (detokenize_incrementally, TokenizerGroup) from vllm.utils import Counter, set_cuda_visible_devices, get_ip, get_open_port, get_distributed_init_method - +from vllm.usage.usage_lib import is_usage_stats_enabled, usage_message +import torch +from cloud_detect import provider if ray: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -103,6 +106,11 @@ def __init__( self._init_tokenizer() self.seq_counter = Counter() + #If usage stat is enabled, collect relevant info. + if is_usage_stats_enabled(): + usage_message.report_usage() + usage_message.update_model(model_config.model) + usage_message.write_to_file() # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: # Disable Ray usage stats collection. diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py new file mode 100644 index 000000000000..d512bd85b82b --- /dev/null +++ b/vllm/usage/usage_lib.py @@ -0,0 +1,52 @@ +import requests +import os +import torch +import json +import platform +import sys +from cloud_detect import provider +from typing import Optional +_USAGE_STATS_FILE = 'usage_stats.json' +_USAGE_STATS_ENABLED = None +_USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') + +def is_usage_stats_enabled(): + """Determine whether or not we can send usage stats to the server. + The logic is as follows: + - By default, it should be enabled. + - Two environment variables can disable it: + - DO_NOT_TRACK=1 + - VLLM_NO_USAGE_STATS=1 + - A file in the home directory can disable it if it exists: + - $HOME/.config/vllm/do_not_track + """ + global _USAGE_STATS_ENABLED + if _USAGE_STATS_ENABLED is None: + do_not_track = os.environ.get('DO_NOT_TRACK', '0') == '1' + no_usage_stats = os.environ.get('VLLM_NO_USAGE_STATS', '0') == '1' + do_not_track_file = os.path.exists(os.path.expanduser('~/.config/vllm/do_not_track')) + + _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats or do_not_track_file) + return _USAGE_STATS_ENABLED + + +class UsageMessage: + def __init__(self) -> None: + self.gpu_name : Optional[str] = None + self.provider : Optional[str] = None + self.architecture : Optional[str] = None + self.platform : Optional[str] = None + self.model : Optional[str] = None + self.entry_point : Optional[str] = None + def report_usage(self) -> None: + self.entry_point = sys.argv + self.gpu_name = torch.cuda.get_device_name() + self.provider = provider() + self.architecture = platform.machine() + self.platform = platform.platform() + def update_model(self, model: str) -> None: + self.model = model + def write_to_file(self): + with open(_USAGE_STATS_FILE, "w") as outfile: + json.dump(vars(self), outfile) +usage_message = UsageMessage() \ No newline at end of file From c33b4ccfdb12c9d4d87bf14d38bd913de3975f3d Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 8 Feb 2024 16:49:38 -0800 Subject: [PATCH 02/47] add usage context --- vllm/engine/arg_utils.py | 7 ++++--- vllm/engine/async_llm_engine.py | 4 +++- vllm/engine/llm_engine.py | 10 ++++++---- vllm/entrypoints/api_server.py | 4 ++-- vllm/entrypoints/llm.py | 3 ++- vllm/entrypoints/openai/api_server.py | 5 ++--- vllm/usage/usage_lib.py | 17 +++++++++++++---- 7 files changed, 32 insertions(+), 18 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index d5e63e25d6e8..746e321872c2 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -5,7 +5,7 @@ from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, ParallelConfig, SchedulerConfig, LoRAConfig) - +from vllm.usage.usage_lib import UsageContext @dataclass class EngineArgs: @@ -44,7 +44,7 @@ class EngineArgs: lora_dtype = 'auto' max_cpu_loras: Optional[int] = None device: str = 'cuda' - + usage_context: Optional[UsageContext] = UsageContext.UNKNOWN_CONTEXT def __post_init__(self): if self.tokenizer is None: self.tokenizer = self.model @@ -267,9 +267,10 @@ def add_cli_args( return parser @classmethod - def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs': + def from_cli_args(cls, args: argparse.Namespace, context: UsageContext=UsageContext.UNKNOWN_CONTEXT) -> 'EngineArgs': # Get the list of attributes of this dataclass. attrs = [attr.name for attr in dataclasses.fields(cls)] + args.usage_context = context # Set the attributes from the parsed arguments. engine_args = cls(**{attr: getattr(args, attr) for attr in attrs}) return engine_args diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 7cba65460277..28c4f6086f52 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -629,7 +629,9 @@ def from_engine_args(cls, log_requests=not engine_args.disable_log_requests, log_stats=not engine_args.disable_log_stats, max_log_len=engine_args.max_log_len, - start_engine_loop=start_engine_loop) + start_engine_loop=start_engine_loop, + usage_context = engine_args.usage_context + ) return engine async def do_log_stats(self) -> None: diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 94f73e986bc4..8f4fafc80655 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -22,7 +22,7 @@ from vllm.transformers_utils.tokenizer import (detokenize_incrementally, TokenizerGroup) from vllm.utils import Counter, set_cuda_visible_devices, get_ip, get_open_port, get_distributed_init_method -from vllm.usage.usage_lib import is_usage_stats_enabled, usage_message +from vllm.usage.usage_lib import UsageContext, is_usage_stats_enabled, usage_message import torch from cloud_detect import provider if ray: @@ -78,6 +78,7 @@ def __init__( lora_config: Optional[LoRAConfig], placement_group: Optional["PlacementGroup"], log_stats: bool, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT ) -> None: logger.info( "Initializing an LLM engine with config: " @@ -114,9 +115,9 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): - usage_message.report_usage() - usage_message.update_model(model_config.model) + usage_message.report_usage(model_config.model, usage_context) usage_message.write_to_file() + # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: # Disable Ray usage stats collection. @@ -378,7 +379,8 @@ def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": # Create the LLM engine. engine = cls(*engine_configs, placement_group, - log_stats=not engine_args.disable_log_stats) + log_stats=not engine_args.disable_log_stats, + usage_context = engine_args.usage_context) return engine def encode_request( diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index f7b8d258fae4..778d2a3668d8 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -10,6 +10,7 @@ from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid +from vllm.usage.usage_lib import UsageContext TIMEOUT_KEEP_ALIVE = 5 # seconds. app = FastAPI() @@ -85,8 +86,7 @@ async def stream_results() -> AsyncGenerator[bytes, None]: help="FastAPI root_path when app is behind a path based routing proxy") parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args() - - engine_args = AsyncEngineArgs.from_cli_args(args) + engine_args = AsyncEngineArgs.from_cli_args(args, UsageContext.API_SERVER) engine = AsyncLLMEngine.from_engine_args(engine_args) app.root_path = args.root_path diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index fc82018d18eb..ad1b5c9d7c47 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -9,7 +9,7 @@ from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.utils import Counter - +from vllm.usage.usage_lib import UsageContext class LLM: """An LLM for generating texts from given prompts and sampling parameters. @@ -104,6 +104,7 @@ def __init__( enforce_eager=enforce_eager, max_context_len_to_capture=max_context_len_to_capture, disable_custom_all_reduce=disable_custom_all_reduce, + usage_context=UsageContext.LLM, **kwargs, ) self.llm_engine = LLMEngine.from_engine_args(engine_args) diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index deb0fddd643c..6fa5519e627c 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -23,7 +23,7 @@ from vllm.logger import init_logger from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion - +from vllm.usage.usage_lib import UsageContext TIMEOUT_KEEP_ALIVE = 5 # seconds openai_serving_chat: OpenAIServingChat = None @@ -212,8 +212,7 @@ async def authentication(request: Request, call_next): served_model = args.served_model_name else: served_model = args.model - - engine_args = AsyncEngineArgs.from_cli_args(args) + engine_args = AsyncEngineArgs.from_cli_args(args, UsageContext.OPENAI_API_SERVER) engine = AsyncLLMEngine.from_engine_args(engine_args) openai_serving_chat = OpenAIServingChat(engine, served_model, args.response_role, diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index d512bd85b82b..cc36a23e569f 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -4,8 +4,10 @@ import json import platform import sys +import pkg_resources from cloud_detect import provider from typing import Optional +from enum import Enum _USAGE_STATS_FILE = 'usage_stats.json' _USAGE_STATS_ENABLED = None _USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') @@ -29,6 +31,12 @@ def is_usage_stats_enabled(): _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats or do_not_track_file) return _USAGE_STATS_ENABLED +class UsageContext(Enum): + UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" + LLM = "LLM" + API_SERVER = "API_SERVER" + OPENAI_API_SERVER = "OPENAI_API_SERVER" + class UsageMessage: def __init__(self) -> None: @@ -37,14 +45,15 @@ def __init__(self) -> None: self.architecture : Optional[str] = None self.platform : Optional[str] = None self.model : Optional[str] = None - self.entry_point : Optional[str] = None - def report_usage(self) -> None: - self.entry_point = sys.argv + self.vllm_version : Optional[str] = None + self.context : Optional[str] = None + def report_usage(self, model: str, context: UsageContext) -> None: + self.context = context.value self.gpu_name = torch.cuda.get_device_name() self.provider = provider() self.architecture = platform.machine() self.platform = platform.platform() - def update_model(self, model: str) -> None: + self.vllm_version = pkg_resources.get_distribution("vllm").version self.model = model def write_to_file(self): with open(_USAGE_STATS_FILE, "w") as outfile: From b74e3a64e5ee00f482466acb6620f104f20d3935 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 8 Feb 2024 21:02:21 -0800 Subject: [PATCH 03/47] removed usage_context from Engine_args --- vllm/engine/arg_utils.py | 6 ++---- vllm/engine/async_llm_engine.py | 7 ++++--- vllm/engine/llm_engine.py | 5 +++-- vllm/entrypoints/api_server.py | 4 ++-- vllm/entrypoints/llm.py | 3 +-- vllm/entrypoints/openai/api_server.py | 4 ++-- vllm/usage/usage_lib.py | 3 ++- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 746e321872c2..7ed9ba738b5b 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -5,7 +5,6 @@ from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, ParallelConfig, SchedulerConfig, LoRAConfig) -from vllm.usage.usage_lib import UsageContext @dataclass class EngineArgs: @@ -44,7 +43,7 @@ class EngineArgs: lora_dtype = 'auto' max_cpu_loras: Optional[int] = None device: str = 'cuda' - usage_context: Optional[UsageContext] = UsageContext.UNKNOWN_CONTEXT + def __post_init__(self): if self.tokenizer is None: self.tokenizer = self.model @@ -267,10 +266,9 @@ def add_cli_args( return parser @classmethod - def from_cli_args(cls, args: argparse.Namespace, context: UsageContext=UsageContext.UNKNOWN_CONTEXT) -> 'EngineArgs': + def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs': # Get the list of attributes of this dataclass. attrs = [attr.name for attr in dataclasses.fields(cls)] - args.usage_context = context # Set the attributes from the parsed arguments. engine_args = cls(**{attr: getattr(args, attr) for attr in attrs}) return engine_args diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 28c4f6086f52..72eb2dd5ae5f 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -12,7 +12,7 @@ from vllm.logger import init_logger from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams - +from vllm.usage.usage_lib import UsageContext logger = init_logger(__name__) @@ -613,7 +613,8 @@ async def get_model_config(self) -> ModelConfig: @classmethod def from_engine_args(cls, engine_args: AsyncEngineArgs, - start_engine_loop: bool = True) -> "AsyncLLMEngine": + start_engine_loop: bool = True, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT) -> "AsyncLLMEngine": """Creates an async LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -630,7 +631,7 @@ def from_engine_args(cls, log_stats=not engine_args.disable_log_stats, max_log_len=engine_args.max_log_len, start_engine_loop=start_engine_loop, - usage_context = engine_args.usage_context + usage_context=usage_context ) return engine diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 8f4fafc80655..c1feee251f75 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -369,7 +369,7 @@ def _init_cache(self) -> None: self._run_workers("warm_up_model") @classmethod - def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": + def from_engine_args(cls, engine_args: EngineArgs, usage_context: UsageContext=UsageContext.UNKNOWN_CONTEXT) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -380,7 +380,8 @@ def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": engine = cls(*engine_configs, placement_group, log_stats=not engine_args.disable_log_stats, - usage_context = engine_args.usage_context) + usage_context = usage_context + ) return engine def encode_request( diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index 778d2a3668d8..ae0df842bbe5 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -86,8 +86,8 @@ async def stream_results() -> AsyncGenerator[bytes, None]: help="FastAPI root_path when app is behind a path based routing proxy") parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args() - engine_args = AsyncEngineArgs.from_cli_args(args, UsageContext.API_SERVER) - engine = AsyncLLMEngine.from_engine_args(engine_args) + engine_args = AsyncEngineArgs.from_cli_args(args) + engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.API_SERVER) app.root_path = args.root_path uvicorn.run(app, diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index ad1b5c9d7c47..52de89aa383d 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -104,10 +104,9 @@ def __init__( enforce_eager=enforce_eager, max_context_len_to_capture=max_context_len_to_capture, disable_custom_all_reduce=disable_custom_all_reduce, - usage_context=UsageContext.LLM, **kwargs, ) - self.llm_engine = LLMEngine.from_engine_args(engine_args) + self.llm_engine = LLMEngine.from_engine_args(engine_args,usage_context=UsageContext.LLM) self.request_counter = Counter() def get_tokenizer( diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 6fa5519e627c..291c90a17d23 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -212,8 +212,8 @@ async def authentication(request: Request, call_next): served_model = args.served_model_name else: served_model = args.model - engine_args = AsyncEngineArgs.from_cli_args(args, UsageContext.OPENAI_API_SERVER) - engine = AsyncLLMEngine.from_engine_args(engine_args) + engine_args = AsyncEngineArgs.from_cli_args(args) + engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.API_SERVER) openai_serving_chat = OpenAIServingChat(engine, served_model, args.response_role, args.chat_template) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index cc36a23e569f..89193b41675e 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -8,7 +8,8 @@ from cloud_detect import provider from typing import Optional from enum import Enum -_USAGE_STATS_FILE = 'usage_stats.json' +from pathlib import Path +_USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') _USAGE_STATS_ENABLED = None _USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') From c988e071984cb15ab04a8afe93938de3c5209de3 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 8 Feb 2024 21:08:45 -0800 Subject: [PATCH 04/47] Move IO to another process --- vllm/engine/llm_engine.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index c1feee251f75..86be9137730b 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -3,7 +3,6 @@ import os import time import pickle -import platform from typing import (TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union) @@ -23,8 +22,7 @@ TokenizerGroup) from vllm.utils import Counter, set_cuda_visible_devices, get_ip, get_open_port, get_distributed_init_method from vllm.usage.usage_lib import UsageContext, is_usage_stats_enabled, usage_message -import torch -from cloud_detect import provider +from multiprocessing import Process if ray: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -116,7 +114,7 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): usage_message.report_usage(model_config.model, usage_context) - usage_message.write_to_file() + p = Process(usage_message.write_to_file()) # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: From 88c51875a45299edd57a899ceea4ebddbfa078ad Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 12 Feb 2024 18:32:15 -0800 Subject: [PATCH 05/47] added http request --- vllm/engine/llm_engine.py | 2 +- vllm/usage/usage_lib.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 86be9137730b..0cb03b330d40 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -114,7 +114,7 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): usage_message.report_usage(model_config.model, usage_context) - p = Process(usage_message.write_to_file()) + p = Process(usage_message.send_to_server()) # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 89193b41675e..d8c450b4eb80 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -5,6 +5,7 @@ import platform import sys import pkg_resources +import requests from cloud_detect import provider from typing import Optional from enum import Enum @@ -12,7 +13,7 @@ _USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') _USAGE_STATS_ENABLED = None _USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') - +_USAGE_STATS_URL = "http://127.0.0.1:1234" def is_usage_stats_enabled(): """Determine whether or not we can send usage stats to the server. The logic is as follows: @@ -59,4 +60,8 @@ def report_usage(self, model: str, context: UsageContext) -> None: def write_to_file(self): with open(_USAGE_STATS_FILE, "w") as outfile: json.dump(vars(self), outfile) + def send_to_server(self): + headers = {'Content-type': 'application/json'} + payload = json.dumps(vars(self)) + response = requests.post(_USAGE_STATS_URL, data=payload, headers=headers) usage_message = UsageMessage() \ No newline at end of file From 33c9dffabca833a600ed3ecb8553e6ce3fbdcd14 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 12 Feb 2024 21:46:24 -0800 Subject: [PATCH 06/47] Added additional arg for from_engine_args --- vllm/entrypoints/openai/api_server.py | 2 +- vllm/usage/usage_lib.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 291c90a17d23..6b1720842180 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -213,7 +213,7 @@ async def authentication(request: Request, call_next): else: served_model = args.model engine_args = AsyncEngineArgs.from_cli_args(args) - engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.API_SERVER) + engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.OPENAI_API_SERVER) openai_serving_chat = OpenAIServingChat(engine, served_model, args.response_role, args.chat_template) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index d8c450b4eb80..5abfd950bde9 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -6,14 +6,19 @@ import sys import pkg_resources import requests +import datetime from cloud_detect import provider from typing import Optional from enum import Enum from pathlib import Path + + _USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') _USAGE_STATS_ENABLED = None _USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') _USAGE_STATS_URL = "http://127.0.0.1:1234" + + def is_usage_stats_enabled(): """Determine whether or not we can send usage stats to the server. The logic is as follows: @@ -33,6 +38,9 @@ def is_usage_stats_enabled(): _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats or do_not_track_file) return _USAGE_STATS_ENABLED +def _get_current_timestamp_ns() -> int: + return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1e9) + class UsageContext(Enum): UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" LLM = "LLM" @@ -49,6 +57,8 @@ def __init__(self) -> None: self.model : Optional[str] = None self.vllm_version : Optional[str] = None self.context : Optional[str] = None + self.log_time : Optional[int] = None + def report_usage(self, model: str, context: UsageContext) -> None: self.context = context.value self.gpu_name = torch.cuda.get_device_name() @@ -57,9 +67,12 @@ def report_usage(self, model: str, context: UsageContext) -> None: self.platform = platform.platform() self.vllm_version = pkg_resources.get_distribution("vllm").version self.model = model + self.log_time = _get_current_timestamp_ns() + def write_to_file(self): with open(_USAGE_STATS_FILE, "w") as outfile: json.dump(vars(self), outfile) + def send_to_server(self): headers = {'Content-type': 'application/json'} payload = json.dumps(vars(self)) From ad609f07961c3d8253be5357ea5278168fc5e7cd Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 12 Feb 2024 21:50:29 -0800 Subject: [PATCH 07/47] comments --- vllm/usage/usage_lib.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 5abfd950bde9..3fe762e3c544 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -13,10 +13,10 @@ from pathlib import Path -_USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') +_USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') #File path to store usage data locally _USAGE_STATS_ENABLED = None _USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') -_USAGE_STATS_URL = "http://127.0.0.1:1234" +_USAGE_STATS_URL = "http://127.0.0.1:1234" #Placeholder for sending usage data to vector.dev http server def is_usage_stats_enabled(): From 8a2f18ae81ade20c7743202a0affca27a9c2cfb1 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 8 Feb 2024 11:44:52 -0800 Subject: [PATCH 08/47] Write info to local json MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ROCm] Fix build problem resulted from previous commit related to FP8 kv-cache support (#2790) Add documentation on how to do incremental builds (#2796) [Ray] Integration compiled DAG off by default (#2471) Disable custom all reduce by default (#2808) add usage context removed usage_context from Engine_args Move IO to another process added http request [ROCm] support Radeon™ 7900 series (gfx1100) without using flash-attention (#2768) Add documentation section about LoRA (#2834) Refactor 2 awq gemm kernels into m16nXk32 (#2723) Co-authored-by: Chunan Zeng Added additional arg for from_engine_args comments --- Dockerfile.rocm | 16 +- csrc/quantization/awq/gemm_kernels.cu | 366 ++++-------------- .../getting_started/amd-installation.rst | 3 +- docs/source/getting_started/installation.rst | 10 + docs/source/index.rst | 1 + docs/source/models/lora.rst | 52 +++ requirements.txt | 1 + rocm_patch/rocm_bf16.patch | 15 + setup.py | 7 +- vllm/config.py | 26 +- vllm/engine/arg_utils.py | 1 - vllm/engine/async_llm_engine.py | 9 +- vllm/engine/llm_engine.py | 78 +++- vllm/engine/ray_utils.py | 18 + vllm/entrypoints/api_server.py | 4 +- vllm/entrypoints/llm.py | 4 +- vllm/entrypoints/openai/api_server.py | 5 +- vllm/model_executor/layers/attention.py | 45 +++ .../model_executor/layers/quantization/awq.py | 2 +- vllm/usage/usage_lib.py | 80 ++++ 20 files changed, 414 insertions(+), 329 deletions(-) create mode 100644 docs/source/models/lora.rst create mode 100644 rocm_patch/rocm_bf16.patch create mode 100644 vllm/usage/usage_lib.py diff --git a/Dockerfile.rocm b/Dockerfile.rocm index 3c7630530303..e0ef4a0f4131 100644 --- a/Dockerfile.rocm +++ b/Dockerfile.rocm @@ -17,6 +17,12 @@ RUN echo "FA_GFX_ARCHS is $FA_GFX_ARCHS" ARG FA_BRANCH="3d2b6f5" RUN echo "FA_BRANCH is $FA_BRANCH" +# whether to build flash-attention +# if 0, will not build flash attention +# this is useful for gfx target where flash-attention is not supported +# In that case, we need to use the python reference attention implementation in vllm +ARG BUILD_FA="1" + # Install some basic utilities RUN apt-get update && apt-get install python3 python3-pip -y @@ -50,7 +56,8 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm/lib/:/libtorch/lib: ENV CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/libtorch/include:/libtorch/include/torch/csrc/api/include/:/opt/rocm/include/: # Install ROCm flash-attention -RUN mkdir libs \ +RUN if [ "$BUILD_FA" == "1" ]; then \ + mkdir libs \ && cd libs \ && git clone https://github.com/ROCmSoftwarePlatform/flash-attention.git \ && cd flash-attention \ @@ -60,7 +67,8 @@ RUN mkdir libs \ && if [ "$BASE_IMAGE" = "rocm/pytorch:rocm5.7_ubuntu22.04_py3.10_pytorch_2.0.1" ]; then \ patch /opt/conda/envs/py_3.10/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py hipify_patch.patch; fi \ && python3 setup.py install \ - && cd .. + && cd ..; \ + fi COPY ./ /app/vllm @@ -75,7 +83,9 @@ RUN if [ "$BASE_IMAGE" = "rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1" RUN cd /app \ && cd vllm \ && pip install -U -r requirements-rocm.txt \ - && bash patch_xformers.rocm.sh \ + && if [ "$BUILD_FA" == "1" ]; then \ + bash patch_xformers.rocm.sh; fi \ + && patch /opt/rocm/include/hip/amd_detail/amd_hip_bf16.h /app/vllm/rocm_patch/rocm_bf16.patch \ && python3 setup.py install \ && cd .. diff --git a/csrc/quantization/awq/gemm_kernels.cu b/csrc/quantization/awq/gemm_kernels.cu index 376c8ebfb9b7..5aefb0bd16ae 100644 --- a/csrc/quantization/awq/gemm_kernels.cu +++ b/csrc/quantization/awq/gemm_kernels.cu @@ -27,72 +27,85 @@ __pack_half2(const half x, const half y) { return (v1 << 16) | v0; } -__global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16n128k32(int G, int split_k_iters, half* __restrict__ A, int* __restrict__ B, half* __restrict__ scaling_factors, int* __restrict__ zeros, int M, int IC, int OC, half* __restrict__ C) +template +__global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16nXk32( + int G, + int split_k_iters, + half* __restrict__ A, + int* __restrict__ B, + half* __restrict__ scaling_factors, + int* __restrict__ zeros, + int M, + int IC, + int OC, + half* __restrict__ C) { + // Only support matrix n = 64 or 128 + assert(N == 64 || N == 128); #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 750 assert(false); #else static constexpr uint32_t ZERO = 0x0; float C_warp[32]; __shared__ half A_shared[16 * (32 + 8)]; - __shared__ half B_shared[32 * (128 + 8)]; - - __shared__ half scaling_factors_shared[128]; - __shared__ half zeros_shared[128]; + __shared__ half B_shared[32 * (N + 8)]; - int j_factors1 = ((OC + 128 - 1) / 128); + __shared__ half scaling_factors_shared[N]; + __shared__ half zeros_shared[N]; + + int j_factors1 = ((OC + N - 1) / N); int blockIdx_x = 0; int blockIdx_y = blockIdx.x % ((M + 16 - 1) / 16 * j_factors1); int blockIdx_z = blockIdx.x / ((M + 16 - 1) / 16 * j_factors1); half A_shared_warp[8]; - half B_shared_warp[32]; - for (int j_0_4_init = 0; j_0_4_init < 4; ++j_0_4_init) { + half B_shared_warp[N / 4]; + for (int j_0_4_init = 0; j_0_4_init < N / 32; ++j_0_4_init) { for (int i = 0; i < 8; ++i) { C_warp[(j_0_4_init * 8) + i] = 0.0; } } static constexpr int row_stride_warp = 32 * 8 / 32; - static constexpr int row_stride = 2 * 32 * 8 / 128; - bool ld_zero_flag = (threadIdx.y * 32 + threadIdx.x) * 8 < 128; + static constexpr int row_stride = 2 * 32 * 8 / N; + bool ld_zero_flag = (threadIdx.y * 32 + threadIdx.x) * 8 < N; // TODO: Haotian: blockIdx_y / j_factors1 in A loading to support bsz > 16 bool ld_A_flag = (blockIdx_y / j_factors1 * 16 + threadIdx.y * row_stride_warp + threadIdx.x * 8 / 32) < M; // threadIdx.y is warp_id // bool wb_C_flag = (threadIdx.x / 4) < M; - half* A_ptr = A + half* A_ptr = A + (((int)blockIdx_y) / j_factors1 * 16 + (((int)threadIdx.y) * row_stride_warp) + ((int)threadIdx.x) / (32 / 8)) * IC + (((int)threadIdx.x) % (32 / 8)) * 8; - + int* B_ptr = B - + ((int)threadIdx.y) * (OC / 8) * 2 - + (((int)threadIdx.x) / (128 / 8)) * (OC / 8) - + (((int)blockIdx_y) % j_factors1) * (128 / 8) - + (((int)threadIdx.x) % (128 / 8)) * 1; + + ((int)threadIdx.y) * (OC / 8) * (256 / N) + + (((int)threadIdx.x) / (N / 8)) * (OC / 8) + + (((int)blockIdx_y) % j_factors1) * (N / 8) + + (((int)threadIdx.x) % (N / 8)) * 1; // Why * 1 in the above line? - - half* A_shared_ptr = A_shared - + ((int)threadIdx.y) * row_stride_warp * (32 + 8) + + half* A_shared_ptr = A_shared + + ((int)threadIdx.y) * row_stride_warp * (32 + 8) + (((int)threadIdx.x) / (32 / 8)) * (32 + 8) + (((int)threadIdx.x) % (32 / 8) ) * 8; half* B_shared_ptr = B_shared - + ((int)threadIdx.y) * (row_stride / 2) * (128 + 8) - + (((int)threadIdx.x) / (128 / 8)) * (128 + 8) - + (((int)threadIdx.x) % (128 / 8)) * 8; - + + ((int)threadIdx.y) * (row_stride / 2) * (N + 8) + + (((int)threadIdx.x) / (N / 8)) * (N + 8) + + (((int)threadIdx.x) % (N / 8)) * 8; + int* zeros_ptr = zeros - + (((int)blockIdx_y) % j_factors1) * (128 / 8) - + ((int)threadIdx.x) % (128 / 8); - + + (((int)blockIdx_y) % j_factors1) * (N / 8) + + ((int)threadIdx.x) % (N / 8); + half* scaling_factors_ptr = scaling_factors - + (((int)blockIdx_y) % j_factors1) * (128) - + (((int)threadIdx.x) % (128 / 8)) * 8; + + (((int)blockIdx_y) % j_factors1) * N + + (((int)threadIdx.x) % (N / 8)) * 8; - half* C_ptr = C + half* C_ptr = C + static_cast(blockIdx_z) * M * OC // blockIdz.x -> split_k dim - + (((int)blockIdx_y) % j_factors1) * 128 - + ((int)threadIdx.y) * 64 + + (((int)blockIdx_y) % j_factors1) * N + + ((int)threadIdx.y) * (N / 2) + (((int)threadIdx.x) % 4) * 2; // preload s.f. and zeros @@ -123,13 +136,13 @@ __global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16n128k32(int G, i // uint4 B_loaded_scale = make_uint4(0, 0, 0, 0); int* B_ptr_local = B_ptr + k_0_0 * 32 * (OC / 8); - for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < 8; ++ax0_ax1_fused_0) { + for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < N / 16; ++ax0_ax1_fused_0) { // B: 32 x 136 (128+8) float16 // each warp: 32 x 4 // each thr: read 32 bit -> convert to 8xFP16 (a UINT4) -> scale and minus zero -> WB UINT4 // *(uint4*)(B_shared + ((((ax0_ax1_fused_0 * 544) + (((int)threadIdx.y) * 272)) + ((((int)threadIdx.x) >> 4) * 136)) + ((((int)threadIdx.x) & 15) * 8))) = *(uint4*)(B + ((((((k_0_0 * 163840) + (ax0_ax1_fused_0 * 20480)) + (((int)threadIdx.y) * 10240)) + ((((int)threadIdx.x) >> 4) * 5120)) + (((int)blockIdx_y) * 128)) + ((((int)threadIdx.x) & 15) * 8))); - // row stride in shared memory: (NWARPS * 32 * 8 / cta_N) + // row stride in shared memory: (NWARPS * 32 * 8 / cta_N) uint32_t B_loaded = *(uint32_t*)(B_ptr_local + ax0_ax1_fused_0 * row_stride * (OC / 8)); uint4 B_loaded_fp16 = dequantize_s4_to_fp16x2(B_loaded); //uint4 B_loaded_zero = *(uint4*)(zeros_shared + (threadIdx.x % (cta_N / 8)) * 8); @@ -152,7 +165,7 @@ __global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16n128k32(int G, i */ // write back - *(uint4*)(B_shared_ptr + ax0_ax1_fused_0 * row_stride * (128 + 8)) = B_loaded_fp16; + *(uint4*)(B_shared_ptr + ax0_ax1_fused_0 * row_stride * (N + 8)) = B_loaded_fp16; } __syncthreads(); @@ -174,13 +187,13 @@ __global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16n128k32(int G, i ); } - for (int ax1_0 = 0; ax1_0 < 4; ++ax1_0) { + for (int ax1_0 = 0; ax1_0 < N / 32; ++ax1_0) { { unsigned int addr; __asm__ __volatile__( "{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n" : "=r"(addr) - : "l"((void *)((&(B_shared[(((k_0_1 * 2176) + (((int)threadIdx.y) * 64)) + (ax1_0 * 16))])) + (((((int)threadIdx.x) & 15) * 136) + ((((int)threadIdx.x) >> 4) * 8)))) + : "l"((void *)((&(B_shared[(((k_0_1 * (N * 16 + 128)) + (((int)threadIdx.y) * (N / 2))) + (ax1_0 * 16))])) + (((((int)threadIdx.x) & 15) * (N + 8)) + ((((int)threadIdx.x) >> 4) * 8)))) ); __asm__ __volatile__( "ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16" @@ -190,7 +203,7 @@ __global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16n128k32(int G, i ); } } - for (int j_0_4 = 0; j_0_4 < 4; ++j_0_4) { + for (int j_0_4 = 0; j_0_4 < N / 32; ++j_0_4) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 { __asm__ __volatile__( @@ -258,241 +271,6 @@ __global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16n128k32(int G, i #endif } - -__global__ void __launch_bounds__(64) gemm_forward_4bit_cuda_m16n64k32(int G, int split_k_iters, half* __restrict__ A, int* __restrict__ B, half* __restrict__ scaling_factors, int* __restrict__ zeros, int M, int IC, int OC, half* __restrict__ C) -{ -#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 750 - assert(false); -#else - static constexpr uint32_t ZERO = 0x0; - float C_warp[32]; - __shared__ half A_shared[16 * (32 + 8)]; - __shared__ half B_shared[32 * (64 + 8)]; - - __shared__ half scaling_factors_shared[64]; - __shared__ half zeros_shared[64]; - - int j_factors1 = ((OC + 64 - 1) / 64); - - int blockIdx_x = 0; - int blockIdx_y = blockIdx.x % ((M + 16 - 1) / 16 * j_factors1); - int blockIdx_z = blockIdx.x / ((M + 16 - 1) / 16 * j_factors1); - - half A_shared_warp[8]; - half B_shared_warp[16]; - for (int j_0_4_init = 0; j_0_4_init < 2; ++j_0_4_init) { - for (int i = 0; i < 8; ++i) { - C_warp[(j_0_4_init * 8) + i] = 0.0; - } - } - - static constexpr int row_stride_warp = 32 * 8 / 32; - static constexpr int row_stride = 2 * 32 * 8 / 64; - bool ld_zero_flag = (threadIdx.y * 32 + threadIdx.x) * 8 < 64; - // TODO: Haotian: blockIdx_y / j_factors1 in A loading to support bsz > 16 - bool ld_A_flag = (blockIdx_y / j_factors1 * 16 + threadIdx.y * row_stride_warp + threadIdx.x * 8 / 32) < M; // threadIdx.y is warp_id - // bool wb_C_flag = (threadIdx.x / 4) < M; - - half* A_ptr = A - + (((int)blockIdx_y) / j_factors1 * 16 + (((int)threadIdx.y) * row_stride_warp) + ((int)threadIdx.x) / (32 / 8)) * IC - + (((int)threadIdx.x) % (32 / 8)) * 8; - - int* B_ptr = B - + ((int)threadIdx.y) * (OC / 8) * 4 - + (((int)threadIdx.x) / (64 / 8)) * (OC / 8) - + (((int)blockIdx_y) % j_factors1) * (64 / 8) - + (((int)threadIdx.x) % (64 / 8)) * 1; -// Why * 1 in the above line? - - half* A_shared_ptr = A_shared - + ((int)threadIdx.y) * row_stride_warp * (32 + 8) - + (((int)threadIdx.x) / (32 / 8)) * (32 + 8) - + (((int)threadIdx.x) % (32 / 8) ) * 8; - - half* B_shared_ptr = B_shared - + ((int)threadIdx.y) * (row_stride / 2) * (64 + 8) - + (((int)threadIdx.x) / (64 / 8)) * (64 + 8) - + (((int)threadIdx.x) % (64 / 8)) * 8; - - int* zeros_ptr = zeros - + (((int)blockIdx_y) % j_factors1) * (64 / 8) - + ((int)threadIdx.x) % (64 / 8); - - half* scaling_factors_ptr = scaling_factors - + (((int)blockIdx_y) % j_factors1) * (64) - + (((int)threadIdx.x) % (64 / 8)) * 8; - - half* C_ptr = C - + static_cast(blockIdx_z) * M * OC // blockIdz.x -> split_k dim - + (((int)blockIdx_y) % j_factors1) * 64 - + ((int)threadIdx.y) * 32 - + (((int)threadIdx.x) % 4) * 2; - - // preload s.f. and zeros - int k_bound = (IC / 32 + split_k_iters - 1) / split_k_iters; - if ((k_bound - 1) * split_k_iters * 32 + blockIdx_z * 32 >= IC) k_bound -= 1; - for (int _k_0_0 = 0; _k_0_0 < k_bound; ++_k_0_0) { - int k_0_0 = _k_0_0 * split_k_iters + blockIdx_z; - __syncthreads(); - // TODO: Haotian: blockIdx_y / j_factors1 in A loading to support bsz > 16 - if (ld_A_flag) - { - *(uint4*)(A_shared_ptr) = *(uint4*)(A_ptr + (k_0_0 * 32)); - } - else - { - *(uint4*)(A_shared_ptr) = make_uint4(0, 0, 0, 0); - } - - // for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < 2; ++ax0_ax1_fused_0) { - uint32_t zeros_loaded = *(uint32_t*)(zeros_ptr + k_0_0 * 32 / G * (OC / 8)); - uint4 B_loaded_zero = dequantize_s4_to_fp16x2(zeros_loaded); - uint4 B_loaded_scale = *(uint4*)(scaling_factors_ptr + k_0_0 * 32 / G * (OC)); - /* - if (blockIdx_z == 0 && blockIdx_y == 0 && k_0_0 == 0 && threadIdx.x == 0 && threadIdx.y == 0){ - printf("%x %x %x %x %x %x %x %x\n", B_loaded_scale.x, B_loaded_scale.y, B_loaded_scale.z, B_loaded_scale.w, B_loaded_zero.x, B_loaded_zero.y, B_loaded_zero.z, B_loaded_zero.w); - } - */ - // uint4 B_loaded_scale = make_uint4(0, 0, 0, 0); - int* B_ptr_local = B_ptr + k_0_0 * 32 * (OC / 8); - - for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < 4; ++ax0_ax1_fused_0) { - - // B: 32 x 136 (128+8) float16 - // each warp: 32 x 4 - // each thr: read 32 bit -> convert to 8xFP16 (a UINT4) -> scale and minus zero -> WB UINT4 - // *(uint4*)(B_shared + ((((ax0_ax1_fused_0 * 544) + (((int)threadIdx.y) * 272)) + ((((int)threadIdx.x) >> 4) * 136)) + ((((int)threadIdx.x) & 15) * 8))) = *(uint4*)(B + ((((((k_0_0 * 163840) + (ax0_ax1_fused_0 * 20480)) + (((int)threadIdx.y) * 10240)) + ((((int)threadIdx.x) >> 4) * 5120)) + (((int)blockIdx_y) * 128)) + ((((int)threadIdx.x) & 15) * 8))); - // row stride in shared memory: (NWARPS * 32 * 8 / cta_N) - uint32_t B_loaded = *(uint32_t*)(B_ptr_local + ax0_ax1_fused_0 * row_stride * (OC / 8)); - uint4 B_loaded_fp16 = dequantize_s4_to_fp16x2(B_loaded); - //uint4 B_loaded_zero = *(uint4*)(zeros_shared + (threadIdx.x % (cta_N / 8)) * 8); - - // uint4 B_loaded_scale = *(uint4*)(scaling_factors_shared + (threadIdx.x % (cta_N / 8)) * 8); - // - zero and * scale - // TODO (Haotian): can save 4 assembly instructions if sormulate as deq = q * scale - zero * scale. - asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_zero.x)); - asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_scale.x), "r"(ZERO)); - asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_zero.y)); - asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_scale.y), "r"(ZERO)); - asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_zero.z)); - asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_scale.z), "r"(ZERO)); - asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_zero.w)); - asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_scale.w), "r"(ZERO)); - /* - if (ax0_ax1_fused_0 == 0 && blockIdx_z == 0 && blockIdx_y == 0 && k_0_0 == 0 && threadIdx.x == 17 && threadIdx.y == 0){ - printf("[x] %X %X %X %X\n", B_loaded_fp16.x, B_loaded_fp16.y, B_loaded_fp16.z, B_loaded_fp16.w); - } - */ - - // write back - *(uint4*)(B_shared_ptr + ax0_ax1_fused_0 * row_stride * (64 + 8)) = B_loaded_fp16; - } - __syncthreads(); - - for (int k_0_1 = 0; k_0_1 < 2; ++k_0_1) - { - { - unsigned int addr; - __asm__ __volatile__( - "{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n" - : "=r"(addr) - : "l"((void *)((&(A_shared[(k_0_1 * 16)])) + (((((int)threadIdx.x) & 15) * 40) + ((((int)threadIdx.x) >> 4) * 8)))) - ); - __asm__ __volatile__( - "ldmatrix.sync.aligned.m8n8.x4.shared.b16" - "{%0, %1, %2, %3}, [%4];\n" - : "=r"(((unsigned *)(A_shared_warp + 0))[0]), "=r"(((unsigned *)(A_shared_warp + 0))[1]), "=r"(((unsigned *)(A_shared_warp + 0))[2]), "=r"(((unsigned *)(A_shared_warp + 0))[3]) - : "r"(addr) - ); - } - - - for (int ax1_0 = 0; ax1_0 < 2; ++ax1_0) - { - { - unsigned int addr; - __asm__ __volatile__( - "{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n" - : "=r"(addr) - : "l"((void *)((&(B_shared[(((k_0_1 * 1152) + (((int)threadIdx.y) * 32)) + (ax1_0 * 16))])) + (((((int)threadIdx.x) & 15) * 72) + ((((int)threadIdx.x) >> 4) * 8)))) - ); - __asm__ __volatile__( - "ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16" - "{%0, %1, %2, %3}, [%4];\n" - : "=r"(((unsigned *)(B_shared_warp + (ax1_0 * 8)))[0]), "=r"(((unsigned *)(B_shared_warp + (ax1_0 * 8)))[1]), "=r"(((unsigned *)(B_shared_warp + (ax1_0 * 8)))[2]), "=r"(((unsigned *)(B_shared_warp + (ax1_0 * 8)))[3]) - : "r"(addr) - ); - } - } - - for (int j_0_4 = 0; j_0_4 < 2; ++j_0_4) - { -#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 - { - __asm__ __volatile__( - "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32" - "{%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" - : "=f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[3]) - : "r"(((unsigned *)(A_shared_warp + 0))[0]), "r"(((unsigned *)(A_shared_warp + 0))[1]), "r"(((unsigned *)(B_shared_warp + (j_0_4 * 8)))[0]), "f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "f"(((float *)(C_warp + (j_0_4 * 8)))[3])); - } - - { - __asm__ __volatile__( - "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32" - "{%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" - : "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3]) - : "r"(((unsigned *)(A_shared_warp + 0))[0]), "r"(((unsigned *)(A_shared_warp + 0))[1]), "r"(((unsigned *)(B_shared_warp + ((j_0_4 * 8) + 4)))[0]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3])); - } - - { - __asm__ __volatile__( - "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32" - "{%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" - : "=f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[3]) - : "r"(((unsigned *)(A_shared_warp + 0))[2]), "r"(((unsigned *)(A_shared_warp + 0))[3]), "r"(((unsigned *)(B_shared_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "f"(((float *)(C_warp + (j_0_4 * 8)))[3])); - } - - { - __asm__ __volatile__( - "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32" - "{%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" - : "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3]) - : "r"(((unsigned *)(A_shared_warp + 0))[2]), "r"(((unsigned *)(A_shared_warp + 0))[3]), "r"(((unsigned *)(B_shared_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3])); - } -#else - { - __asm__ __volatile__( - "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32" - "{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n" - : "=f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "=f"(((float *)(C_warp + (j_0_4 * 8)))[3]) - : "r"(((unsigned *)(A_shared_warp + 0))[0]), "r"(((unsigned *)(A_shared_warp + 0))[1]), "r"(((unsigned *)(A_shared_warp + 0))[2]), "r"(((unsigned *)(A_shared_warp + 0))[3]), "r"(((unsigned *)(B_shared_warp + (j_0_4 * 8)))[0]), "r"(((unsigned *)(B_shared_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[0]), "f"(((float *)(C_warp + (j_0_4 * 8)))[1]), "f"(((float *)(C_warp + (j_0_4 * 8)))[2]), "f"(((float *)(C_warp + (j_0_4 * 8)))[3])); - } - - { - __asm__ __volatile__( - "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32" - "{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n" - : "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "=f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3]) - : "r"(((unsigned *)(A_shared_warp + 0))[0]), "r"(((unsigned *)(A_shared_warp + 0))[1]), "r"(((unsigned *)(A_shared_warp + 0))[2]), "r"(((unsigned *)(A_shared_warp + 0))[3]), "r"(((unsigned *)(B_shared_warp + ((j_0_4 * 8) + 4)))[0]), "r"(((unsigned *)(B_shared_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[0]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[1]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[2]), "f"(((float *)(C_warp + ((j_0_4 * 8) + 4)))[3])); - } -#endif - } - } - } - -// TODO: Shang: Hoist loop invariance. - for (int ax1_0_1 = 0; ax1_0_1 < 2; ++ax1_0_1) { - for (int local_id = 0; local_id < 8; ++local_id) { - int row_offset = (((int)blockIdx_y) / j_factors1) * 16 + ((int)threadIdx.x) / 4 + (local_id % 4) / 2 * 8; - if (row_offset < M) - { - *(C_ptr + ax1_0_1 * 16 + row_offset * OC + (local_id / 4) * 8 + local_id % 2) = __float2half(C_warp[(ax1_0_1 * 8) + local_id]); - } - } - } -#endif -} - __global__ void __launch_bounds__(64) dequantize_weights( int* __restrict__ B, half* __restrict__ scaling_factors, @@ -526,26 +304,24 @@ __global__ void __launch_bounds__(64) dequantize_weights( int index4 = 8 * col + (int)(row / G) * N * 8; half* scaling_factors_ptr2 = scaling_factors + index4; + uint32_t zeros_loaded = *(uint32_t*)(zeros_ptr2); + uint4 B_loaded_zero = dequantize_s4_to_fp16x2(zeros_loaded); + uint4 B_loaded_scale = *(uint4*)(scaling_factors_ptr2); - uint32_t zeros_loaded = *(uint32_t*)(zeros_ptr2); - uint4 B_loaded_zero = dequantize_s4_to_fp16x2(zeros_loaded); - uint4 B_loaded_scale = *(uint4*)(scaling_factors_ptr2); -int j=0; + uint32_t B_loaded = *(uint32_t*)B_ptr2; + uint4 B_loaded_fp16 = dequantize_s4_to_fp16x2(B_loaded); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_zero.x)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_scale.x), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_zero.y)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_scale.y), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_zero.z)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_scale.z), "r"(ZERO)); + asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_zero.w)); + asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_scale.w), "r"(ZERO)); - uint32_t B_loaded = *(uint32_t*)(B_ptr2 + j); - uint4 B_loaded_fp16 = dequantize_s4_to_fp16x2(B_loaded); - asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_zero.x)); - asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.x) : "r"(B_loaded_fp16.x), "r"(B_loaded_scale.x), "r"(ZERO)); - asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_zero.y)); - asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.y) : "r"(B_loaded_fp16.y), "r"(B_loaded_scale.y), "r"(ZERO)); - asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_zero.z)); - asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.z) : "r"(B_loaded_fp16.z), "r"(B_loaded_scale.z), "r"(ZERO)); - asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_zero.w)); - asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(B_loaded_fp16.w) : "r"(B_loaded_fp16.w), "r"(B_loaded_scale.w), "r"(ZERO)); - - *(uint4*)(B_shared_ptr2 + j) = B_loaded_fp16; + *(uint4*)B_shared_ptr2 = B_loaded_fp16; - for (int i=0; i<8; ++i) { + for (int i = 0; i < 8; ++i) { *(C_ptr2 + i) = B_shared[i]; } } @@ -650,19 +426,21 @@ torch::Tensor awq_gemm( // threadIdx.x: 32 // threadIdx.y: i_factors[2] * j_factors[2] dim3 threads_per_block(32, 2); - vllm::awq::gemm_forward_4bit_cuda_m16n128k32<<>>( - group_size, split_k_iters, in_feats, kernel, scaling_factors, zeros, num_in_feats, num_in_channels, num_out_channels, out_feats); + vllm::awq::gemm_forward_4bit_cuda_m16nXk32<128><<>>( + group_size, split_k_iters, in_feats, kernel, scaling_factors, zeros, num_in_feats, num_in_channels, + num_out_channels, out_feats); } else if (num_out_channels % 64 == 0) { int j_factors1 = num_out_channels / 64 / 1; dim3 num_blocks(1 * (num_out_feats + 16 - 1) / 16 * j_factors1 * split_k_iters); - + // threadIdx.x: 32 // threadIdx.y: i_factors[2] * j_factors[2] dim3 threads_per_block(32, 2); - vllm::awq::gemm_forward_4bit_cuda_m16n64k32<<>>( - group_size, split_k_iters, in_feats, kernel, scaling_factors, zeros, num_in_feats, num_in_channels, num_out_channels, out_feats); + vllm::awq::gemm_forward_4bit_cuda_m16nXk32<64><<>>( + group_size, split_k_iters, in_feats, kernel, scaling_factors, zeros, num_in_feats, num_in_channels, + num_out_channels, out_feats); } return _out_feats.sum(0); } diff --git a/docs/source/getting_started/amd-installation.rst b/docs/source/getting_started/amd-installation.rst index 6851ba136351..5d9fdf405670 100644 --- a/docs/source/getting_started/amd-installation.rst +++ b/docs/source/getting_started/amd-installation.rst @@ -12,7 +12,7 @@ Requirements * OS: Linux * Python: 3.8 -- 3.11 -* GPU: MI200s (gfx90a), MI300 (gfx942) +* GPU: MI200s (gfx90a), MI300 (gfx942), Radeon RX 7900 series (gfx1100) * Pytorch 2.0.1/2.1.1/2.2 * ROCm 5.7 (Verified on python 3.10) or ROCm 6.0 (Verified on python 3.9) @@ -105,6 +105,7 @@ The `Dokerfile.rocm` is designed to support both ROCm 5.7 and ROCm 6.0 and later * `BASE_IMAGE`: specifies the base image used when running ``docker build``, specifically the PyTorch on ROCm base image. We have tested ROCm 5.7 and ROCm 6.0. The default is `rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1` * `FX_GFX_ARCHS`: specifies the GFX architecture that is used to build flash-attention, for example, `gfx90a;gfx942` for MI200 and MI300. The default is `gfx90a;gfx942` * `FA_BRANCH`: specifies the branch used to build the flash-attention in `ROCmSoftwarePlatform's flash-attention repo `_. The default is `3d2b6f5` +* `BUILD_FA`: specifies whether to build flash-attention. For `Radeon RX 7900 series (gfx1100) `_, this should be set to 0 before flash-attention supports this target. Their values can be passed in when running ``docker build`` with ``--build-arg`` options. diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index 911c3d8f9a4a..77b0ae65838a 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -67,3 +67,13 @@ You can also build and install vLLM from source: $ # Use `--ipc=host` to make sure the shared memory is large enough. $ docker run --gpus all -it --rm --ipc=host nvcr.io/nvidia/pytorch:23.10-py3 + +.. note:: + If you are developing the C++ backend of vLLM, consider building vLLM with + + .. code-block:: console + + $ python setup.py develop + + since it will give you incremental builds. The downside is that this method + is `deprecated by setuptools `_. diff --git a/docs/source/index.rst b/docs/source/index.rst index 3e2331907f0f..9b53a643b8d4 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -82,6 +82,7 @@ Documentation models/supported_models models/adding_model models/engine_args + models/lora .. toctree:: :maxdepth: 1 diff --git a/docs/source/models/lora.rst b/docs/source/models/lora.rst new file mode 100644 index 000000000000..b773edfc6ff2 --- /dev/null +++ b/docs/source/models/lora.rst @@ -0,0 +1,52 @@ +.. _lora: + +Using LoRA adapters +=================== + +This document shows you how to use `LoRA adapters `_ with vLLM on top of a base model. +Adapters can be efficiently served on a per request basis with minimal overhead. First we download the adapter(s) and save +them locally with + +.. code-block:: python + + from huggingface_hub import snapshot_download + + sql_lora_path = snapshot_download(repo_id="yard1/llama-2-7b-sql-lora-test") + + +Then we instantiate the base model and pass in the ``enable_lora=True`` flag: + +.. code-block:: python + + from vllm import LLM, SamplingParams + from vllm.lora.request import LoRARequest + + llm = LLM(model="meta-llama/Llama-2-7b-hf", enable_lora=True) + + +We can now submit the prompts and call ``llm.generate`` with the ``lora_request`` parameter. The first parameter +of ``LoRARequest`` is a human identifiable name, the second parameter is a globally unique ID for the adapter and +the third parameter is the path to the LoRA adapter. + +.. code-block:: python + + sampling_params = SamplingParams( + temperature=0, + max_tokens=256, + stop=["[/assistant]"] + ) + + prompts = [ + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", + ] + + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest("sql_adapter", 1, sql_lora_path) + ) + + +Check out `examples/multilora_inference.py `_ +for an example of how to use LoRA adapters with the async engine and how to use more advanced configuration options. \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 5684b2c29634..37be5c168752 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ ray >= 2.9 sentencepiece # Required for LLaMA tokenizer. numpy torch == 2.1.2 +cloud-detect transformers >= 4.37.0 # Required for Qwen2 xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi diff --git a/rocm_patch/rocm_bf16.patch b/rocm_patch/rocm_bf16.patch new file mode 100644 index 000000000000..a0f07da2a3e2 --- /dev/null +++ b/rocm_patch/rocm_bf16.patch @@ -0,0 +1,15 @@ +--- amd_hip_bf16.h 2024-02-06 18:28:58.268699142 +0000 ++++ amd_hip_bf16.h.new 2024-02-06 18:28:31.988647133 +0000 +@@ -90,10 +90,10 @@ + #include "math_fwd.h" // ocml device functions + + #if defined(__HIPCC_RTC__) +-#define __HOST_DEVICE__ __device__ ++#define __HOST_DEVICE__ __device__ static + #else + #include +-#define __HOST_DEVICE__ __host__ __device__ ++#define __HOST_DEVICE__ __host__ __device__ static inline + #endif + + // Since we are using unsigned short to represent data in bfloat16, it can be of different sizes on diff --git a/setup.py b/setup.py index 9cc4aea0ea75..ea58a1a49e7e 100644 --- a/setup.py +++ b/setup.py @@ -15,11 +15,16 @@ ROOT_DIR = os.path.dirname(__file__) +# If you are developing the C++ backend of vLLM, consider building vLLM with +# `python setup.py develop` since it will give you incremental builds. +# The downside is that this method is deprecated, see +# https://github.com/pypa/setuptools/issues/917 + MAIN_CUDA_VERSION = "12.1" # Supported NVIDIA GPU architectures. NVIDIA_SUPPORTED_ARCHS = {"7.0", "7.5", "8.0", "8.6", "8.9", "9.0"} -ROCM_SUPPORTED_ARCHS = {"gfx90a", "gfx942"} +ROCM_SUPPORTED_ARCHS = {"gfx90a", "gfx942", "gfx1100"} # SUPPORTED_ARCHS = NVIDIA_SUPPORTED_ARCHS.union(ROCM_SUPPORTED_ARCHS) diff --git a/vllm/config.py b/vllm/config.py index c35b6302b2cf..27c61d4d5043 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -388,16 +388,26 @@ def _verify_args(self) -> None: if self.pipeline_parallel_size > 1: raise NotImplementedError( "Pipeline parallelism is not supported yet.") - if is_hip(): + if not self.disable_custom_all_reduce and self.world_size > 1: + if is_hip(): + self.disable_custom_all_reduce = True + logger.info( + "Disabled the custom all-reduce kernel because it is not " + "supported on AMD GPUs.") + elif self.pipeline_parallel_size > 1: + self.disable_custom_all_reduce = True + logger.info( + "Disabled the custom all-reduce kernel because it is not " + "supported with pipeline parallelism.") + + # FIXME(woosuk): Fix the stability issues and re-enable the custom + # all-reduce kernel. + if not self.disable_custom_all_reduce and self.world_size > 1: self.disable_custom_all_reduce = True logger.info( - "Disabled the custom all-reduce kernel because it is not " - "supported on AMD GPUs.") - elif self.pipeline_parallel_size > 1: - self.disable_custom_all_reduce = True - logger.info( - "Disabled the custom all-reduce kernel because it is not " - "supported with pipeline parallelism.") + "Custom all-reduce kernels are temporarily disabled due to " + "stability issues. We will re-enable them once the issues are " + "resolved.") class SchedulerConfig: diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index d5e63e25d6e8..7ed9ba738b5b 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -6,7 +6,6 @@ from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, ParallelConfig, SchedulerConfig, LoRAConfig) - @dataclass class EngineArgs: """Arguments for vLLM engine.""" diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 7cba65460277..72eb2dd5ae5f 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -12,7 +12,7 @@ from vllm.logger import init_logger from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams - +from vllm.usage.usage_lib import UsageContext logger = init_logger(__name__) @@ -613,7 +613,8 @@ async def get_model_config(self) -> ModelConfig: @classmethod def from_engine_args(cls, engine_args: AsyncEngineArgs, - start_engine_loop: bool = True) -> "AsyncLLMEngine": + start_engine_loop: bool = True, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT) -> "AsyncLLMEngine": """Creates an async LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -629,7 +630,9 @@ def from_engine_args(cls, log_requests=not engine_args.disable_log_requests, log_stats=not engine_args.disable_log_stats, max_log_len=engine_args.max_log_len, - start_engine_loop=start_engine_loop) + start_engine_loop=start_engine_loop, + usage_context=usage_context + ) return engine async def do_log_stats(self) -> None: diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 02c673c96fd9..dc1a794b78ae 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -2,6 +2,8 @@ from collections import defaultdict import os import time +import platform +import pickle from typing import (TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union) @@ -20,7 +22,8 @@ from vllm.transformers_utils.tokenizer import (detokenize_incrementally, TokenizerGroup) from vllm.utils import Counter, set_cuda_visible_devices, get_ip, get_open_port, get_distributed_init_method - +from vllm.usage.usage_lib import UsageContext, is_usage_stats_enabled, usage_message +from multiprocessing import Process if ray: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -30,6 +33,11 @@ logger = init_logger(__name__) _LOCAL_LOGGING_INTERVAL_SEC = 5 +# If the env var is set, it uses the Ray's compiled DAG API +# which optimizes the control plane overhead. +# Run VLLM with VLLM_USE_RAY_COMPILED_DAG=1 to enable it. +USE_RAY_COMPILED_DAG = bool(os.getenv("VLLM_USE_RAY_COMPILED_DAG", 0)) + class LLMEngine: """An LLM engine that receives requests and generates texts. @@ -69,6 +77,7 @@ def __init__( lora_config: Optional[LoRAConfig], placement_group: Optional["PlacementGroup"], log_stats: bool, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT ) -> None: logger.info( "Initializing an LLM engine with config: " @@ -103,6 +112,11 @@ def __init__( self._init_tokenizer() self.seq_counter = Counter() + #If usage stat is enabled, collect relevant info. + if is_usage_stats_enabled(): + usage_message.report_usage(model_config.model, usage_context) + p = Process(usage_message.send_to_server()) + # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: # Disable Ray usage stats collection. @@ -124,6 +138,10 @@ def __init__( self.stat_logger = StatLogger( local_interval=_LOCAL_LOGGING_INTERVAL_SEC) + self.forward_dag = None + if USE_RAY_COMPILED_DAG: + self.forward_dag = self._compiled_ray_dag() + def get_tokenizer_for_seq(self, sequence: Sequence): return self.tokenizer.get_lora_tokenizer(sequence.lora_request) @@ -350,7 +368,7 @@ def _init_cache(self) -> None: self._run_workers("warm_up_model") @classmethod - def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": + def from_engine_args(cls, engine_args: EngineArgs, usage_context: UsageContext=UsageContext.UNKNOWN_CONTEXT) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -360,7 +378,9 @@ def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": # Create the LLM engine. engine = cls(*engine_configs, placement_group, - log_stats=not engine_args.disable_log_stats) + log_stats=not engine_args.disable_log_stats, + usage_context = usage_context + ) return engine def encode_request( @@ -806,7 +826,8 @@ def step(self) -> List[RequestOutput]: "blocks_to_swap_in": scheduler_outputs.blocks_to_swap_in, "blocks_to_swap_out": scheduler_outputs.blocks_to_swap_out, "blocks_to_copy": scheduler_outputs.blocks_to_copy, - }) + }, + use_ray_compiled_dag=USE_RAY_COMPILED_DAG) # Only the driver worker returns the sampling results. output = all_outputs[0] @@ -966,6 +987,7 @@ def _run_workers( driver_args: Optional[List[Any]] = None, driver_kwargs: Optional[Dict[str, Any]] = None, max_concurrent_workers: Optional[int] = None, + use_ray_compiled_dag: bool = False, **kwargs, ) -> Any: """Runs the given method on all workers.""" @@ -974,11 +996,16 @@ def _run_workers( raise NotImplementedError( "max_concurrent_workers is not supported yet.") - # Start the ray workers first. - ray_worker_outputs = [ - worker.execute_method.remote(method, *args, **kwargs) - for worker in self.workers - ] + if use_ray_compiled_dag: + # Right now, compiled DAG can only accept a single + # input. TODO(sang): Fix it. + output_channels = self.forward_dag.execute(1) + else: + # Start the ray workers first. + ray_worker_outputs = [ + worker.execute_method.remote(method, *args, **kwargs) + for worker in self.workers + ] if driver_args is None: driver_args = args @@ -991,6 +1018,37 @@ def _run_workers( # Get the results of the ray workers. if self.workers: - ray_worker_outputs = ray.get(ray_worker_outputs) + if use_ray_compiled_dag: + try: + ray_worker_outputs = [ + pickle.loads(chan.begin_read()) + for chan in output_channels + ] + finally: + # Has to call end_read in order to reuse the DAG. + for chan in output_channels: + chan.end_read() + else: + ray_worker_outputs = ray.get(ray_worker_outputs) return [driver_worker_output] + ray_worker_outputs + + def _compiled_ray_dag(self): + import pkg_resources + required_version = "2.9" + current_version = pkg_resources.get_distribution("ray").version + if current_version < required_version: + raise ValueError(f"Ray version {required_version} or greater is " + f"required, but found {current_version}") + + from ray.dag import MultiOutputNode, InputNode + assert self.parallel_config.worker_use_ray + + # Right now, compiled DAG requires at least 1 arg. We send + # a dummy value for now. It will be fixed soon. + with InputNode() as input_data: + forward_dag = MultiOutputNode([ + worker.execute_model_compiled_dag_remote.bind(input_data) + for worker in self.workers + ]) + return forward_dag.experimental_compile() diff --git a/vllm/engine/ray_utils.py b/vllm/engine/ray_utils.py index afbc33ed19a0..bbcbbdfea2f0 100644 --- a/vllm/engine/ray_utils.py +++ b/vllm/engine/ray_utils.py @@ -1,3 +1,5 @@ +import pickle + from typing import Optional, List, Tuple, TYPE_CHECKING from vllm.config import ParallelConfig @@ -18,6 +20,11 @@ def __init__(self, init_cached_hf_modules=False) -> None: from transformers.dynamic_module_utils import init_hf_modules init_hf_modules() self.worker = None + # Since the compiled DAG runs a main execution + # in a different thread that calls cuda.set_device. + # The flag indicates is set_device is called on + # that thread. + self.compiled_dag_cuda_device_set = False def init_worker(self, worker_init_fn): self.worker = worker_init_fn() @@ -40,6 +47,17 @@ def get_node_and_gpu_ids(self) -> Tuple[str, List[int]]: def set_cuda_visible_devices(self, device_ids) -> None: set_cuda_visible_devices(device_ids) + def execute_model_compiled_dag_remote(self, ignored): + """Used only when compiled DAG is enabled.""" + import torch + if not self.compiled_dag_cuda_device_set: + torch.cuda.set_device(self.worker.device) + self.compiled_dag_cuda_device_set = True + + output = self.worker.execute_model() + output = pickle.dumps(output) + return output + except ImportError as e: logger.warning(f"Failed to import Ray with {e!r}. " "For distributed inference, please install Ray with " diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index f7b8d258fae4..ae0df842bbe5 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -10,6 +10,7 @@ from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid +from vllm.usage.usage_lib import UsageContext TIMEOUT_KEEP_ALIVE = 5 # seconds. app = FastAPI() @@ -85,9 +86,8 @@ async def stream_results() -> AsyncGenerator[bytes, None]: help="FastAPI root_path when app is behind a path based routing proxy") parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args() - engine_args = AsyncEngineArgs.from_cli_args(args) - engine = AsyncLLMEngine.from_engine_args(engine_args) + engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.API_SERVER) app.root_path = args.root_path uvicorn.run(app, diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index fc82018d18eb..52de89aa383d 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -9,7 +9,7 @@ from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.utils import Counter - +from vllm.usage.usage_lib import UsageContext class LLM: """An LLM for generating texts from given prompts and sampling parameters. @@ -106,7 +106,7 @@ def __init__( disable_custom_all_reduce=disable_custom_all_reduce, **kwargs, ) - self.llm_engine = LLMEngine.from_engine_args(engine_args) + self.llm_engine = LLMEngine.from_engine_args(engine_args,usage_context=UsageContext.LLM) self.request_counter = Counter() def get_tokenizer( diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index deb0fddd643c..6b1720842180 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -23,7 +23,7 @@ from vllm.logger import init_logger from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion - +from vllm.usage.usage_lib import UsageContext TIMEOUT_KEEP_ALIVE = 5 # seconds openai_serving_chat: OpenAIServingChat = None @@ -212,9 +212,8 @@ async def authentication(request: Request, call_next): served_model = args.served_model_name else: served_model = args.model - engine_args = AsyncEngineArgs.from_cli_args(args) - engine = AsyncLLMEngine.from_engine_args(engine_args) + engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.OPENAI_API_SERVER) openai_serving_chat = OpenAIServingChat(engine, served_model, args.response_role, args.chat_template) diff --git a/vllm/model_executor/layers/attention.py b/vllm/model_executor/layers/attention.py index 2ce9d60f08d8..0622a54db1bc 100644 --- a/vllm/model_executor/layers/attention.py +++ b/vllm/model_executor/layers/attention.py @@ -1,6 +1,7 @@ """Multi-head attention.""" from typing import List, Optional +import importlib import torch import torch.nn as nn from xformers import ops as xops @@ -58,6 +59,40 @@ def __init__( raise ValueError(f"head_size ({self.head_size}) is not supported. " f"Supported head sizes: {_SUPPORTED_HEAD_SIZES}.") + self.use_ref_attention = self.check_use_ref_attention() + + def check_use_ref_attention(self) -> bool: + if not is_hip(): + return False + # For ROCm, check whether flash attention is installed or not. + # if not, use_ref_attention needs to be True + return importlib.util.find_spec("flash_attn") is None + + def ref_masked_attention( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + ) -> torch.Tensor: + query = query.view(-1, self.num_heads, self.head_size) + key = key.view(-1, self.num_kv_heads, self.head_size) + value = value.view(-1, self.num_kv_heads, self.head_size) + + seq_len, _, _ = query.shape + attn_mask = torch.triu(torch.ones(seq_len, + seq_len, + dtype=query.dtype, + device=query.device), + diagonal=1) + attn_mask = attn_mask * torch.finfo(query.dtype).min + + attn_weights = self.scale * torch.einsum("qhd,khd->hqk", query, + key).float() + attn_weights = attn_weights + attn_mask.float() + attn_weights = torch.softmax(attn_weights, dim=-1).to(value.dtype) + out = torch.einsum("hqk,khd->qhd", attn_weights, value) + return out + def forward( self, query: torch.Tensor, @@ -137,6 +172,16 @@ def forward( self.alibi_slopes, self.num_kv_heads, batch_size, seq_len, query.dtype) + if self.use_ref_attention: + output = self.ref_masked_attention( + query, + key, + value, + ) + # Using view got RuntimeError: view size is not compatible with input tensor's size and stride + # (at least one dimension spans across two contiguous subspaces). Use reshape instead + return output.reshape(batch_size, seq_len, hidden_size) + # TODO(woosuk): Too many view operations. Let's try to reduce # them in the future for code readability. if self.alibi_slopes is None: diff --git a/vllm/model_executor/layers/quantization/awq.py b/vllm/model_executor/layers/quantization/awq.py index 681f95821eab..3e1c814dd233 100644 --- a/vllm/model_executor/layers/quantization/awq.py +++ b/vllm/model_executor/layers/quantization/awq.py @@ -145,8 +145,8 @@ def apply_weights(self, x: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor: qweight = weights["qweight"] - qzeros = weights["qzeros"] scales = weights["scales"] + qzeros = weights["qzeros"] pack_factor = self.quant_config.pack_factor out_shape = (x.shape[:-1] + (qweight.shape[-1] * pack_factor, )) reshaped_x = x.reshape(-1, x.shape[-1]) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py new file mode 100644 index 000000000000..3fe762e3c544 --- /dev/null +++ b/vllm/usage/usage_lib.py @@ -0,0 +1,80 @@ +import requests +import os +import torch +import json +import platform +import sys +import pkg_resources +import requests +import datetime +from cloud_detect import provider +from typing import Optional +from enum import Enum +from pathlib import Path + + +_USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') #File path to store usage data locally +_USAGE_STATS_ENABLED = None +_USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') +_USAGE_STATS_URL = "http://127.0.0.1:1234" #Placeholder for sending usage data to vector.dev http server + + +def is_usage_stats_enabled(): + """Determine whether or not we can send usage stats to the server. + The logic is as follows: + - By default, it should be enabled. + - Two environment variables can disable it: + - DO_NOT_TRACK=1 + - VLLM_NO_USAGE_STATS=1 + - A file in the home directory can disable it if it exists: + - $HOME/.config/vllm/do_not_track + """ + global _USAGE_STATS_ENABLED + if _USAGE_STATS_ENABLED is None: + do_not_track = os.environ.get('DO_NOT_TRACK', '0') == '1' + no_usage_stats = os.environ.get('VLLM_NO_USAGE_STATS', '0') == '1' + do_not_track_file = os.path.exists(os.path.expanduser('~/.config/vllm/do_not_track')) + + _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats or do_not_track_file) + return _USAGE_STATS_ENABLED + +def _get_current_timestamp_ns() -> int: + return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1e9) + +class UsageContext(Enum): + UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" + LLM = "LLM" + API_SERVER = "API_SERVER" + OPENAI_API_SERVER = "OPENAI_API_SERVER" + + +class UsageMessage: + def __init__(self) -> None: + self.gpu_name : Optional[str] = None + self.provider : Optional[str] = None + self.architecture : Optional[str] = None + self.platform : Optional[str] = None + self.model : Optional[str] = None + self.vllm_version : Optional[str] = None + self.context : Optional[str] = None + self.log_time : Optional[int] = None + + def report_usage(self, model: str, context: UsageContext) -> None: + self.context = context.value + self.gpu_name = torch.cuda.get_device_name() + self.provider = provider() + self.architecture = platform.machine() + self.platform = platform.platform() + self.vllm_version = pkg_resources.get_distribution("vllm").version + self.model = model + self.log_time = _get_current_timestamp_ns() + + def write_to_file(self): + with open(_USAGE_STATS_FILE, "w") as outfile: + json.dump(vars(self), outfile) + + def send_to_server(self): + headers = {'Content-type': 'application/json'} + payload = json.dumps(vars(self)) + response = requests.post(_USAGE_STATS_URL, data=payload, headers=headers) +usage_message = UsageMessage() \ No newline at end of file From f537692ccbde9df79be8f20cb6a09cf9d479f5e3 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 12 Feb 2024 22:21:19 -0800 Subject: [PATCH 09/47] Added Comments --- vllm/engine/llm_engine.py | 1 + vllm/usage/usage_lib.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index b7c8bb0aa536..c0f6795c8b92 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -63,6 +63,7 @@ class LLMEngine: placement_group: Ray placement group for distributed execution. Required for distributed execution. log_stats: Whether to log statistics. + usage_context: Specified entry point, used for usage info collection """ def __init__( diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 3fe762e3c544..2d100c6b0cc1 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -69,12 +69,16 @@ def report_usage(self, model: str, context: UsageContext) -> None: self.model = model self.log_time = _get_current_timestamp_ns() - def write_to_file(self): + def _write_to_file(self): with open(_USAGE_STATS_FILE, "w") as outfile: json.dump(vars(self), outfile) def send_to_server(self): headers = {'Content-type': 'application/json'} payload = json.dumps(vars(self)) - response = requests.post(_USAGE_STATS_URL, data=payload, headers=headers) + try: + response = requests.post(_USAGE_STATS_URL, data=payload, headers=headers) + except requests.exceptions.RequestException as e: + print("Usage Log Request Failed") + usage_message = UsageMessage() \ No newline at end of file From 0f1ba7f7f47bab6eadb65f058221149cc78aaa1b Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 12 Feb 2024 22:21:56 -0800 Subject: [PATCH 10/47] . --- vllm/engine/llm_engine.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index c0f6795c8b92..88ccaa866f3e 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -2,6 +2,7 @@ from collections import defaultdict import os import time +import pickle from typing import (TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union) From abc3948c829d60c11dc5619fffc895530c4b9edc Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 8 Feb 2024 11:44:52 -0800 Subject: [PATCH 11/47] Collect usage info on engine initialization --- requirements.txt | 1 + vllm/engine/arg_utils.py | 1 - vllm/engine/async_llm_engine.py | 9 ++- vllm/engine/llm_engine.py | 16 ++++- vllm/entrypoints/api_server.py | 4 +- vllm/entrypoints/llm.py | 4 +- vllm/entrypoints/openai/api_server.py | 5 +- vllm/usage/usage_lib.py | 84 +++++++++++++++++++++++++++ 8 files changed, 110 insertions(+), 14 deletions(-) create mode 100644 vllm/usage/usage_lib.py diff --git a/requirements.txt b/requirements.txt index 5684b2c29634..37be5c168752 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ ray >= 2.9 sentencepiece # Required for LLaMA tokenizer. numpy torch == 2.1.2 +cloud-detect transformers >= 4.37.0 # Required for Qwen2 xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index d5e63e25d6e8..7ed9ba738b5b 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -6,7 +6,6 @@ from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, ParallelConfig, SchedulerConfig, LoRAConfig) - @dataclass class EngineArgs: """Arguments for vLLM engine.""" diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 7cba65460277..72eb2dd5ae5f 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -12,7 +12,7 @@ from vllm.logger import init_logger from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams - +from vllm.usage.usage_lib import UsageContext logger = init_logger(__name__) @@ -613,7 +613,8 @@ async def get_model_config(self) -> ModelConfig: @classmethod def from_engine_args(cls, engine_args: AsyncEngineArgs, - start_engine_loop: bool = True) -> "AsyncLLMEngine": + start_engine_loop: bool = True, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT) -> "AsyncLLMEngine": """Creates an async LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -629,7 +630,9 @@ def from_engine_args(cls, log_requests=not engine_args.disable_log_requests, log_stats=not engine_args.disable_log_stats, max_log_len=engine_args.max_log_len, - start_engine_loop=start_engine_loop) + start_engine_loop=start_engine_loop, + usage_context=usage_context + ) return engine async def do_log_stats(self) -> None: diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 03a2b1157652..88ccaa866f3e 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -21,7 +21,8 @@ from vllm.transformers_utils.tokenizer import (detokenize_incrementally, TokenizerGroup) from vllm.utils import Counter, set_cuda_visible_devices, get_ip, get_open_port, get_distributed_init_method - +from vllm.usage.usage_lib import UsageContext, is_usage_stats_enabled, usage_message +from multiprocessing import Process if ray: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -63,6 +64,7 @@ class LLMEngine: placement_group: Ray placement group for distributed execution. Required for distributed execution. log_stats: Whether to log statistics. + usage_context: Specified entry point, used for usage info collection """ def __init__( @@ -75,6 +77,7 @@ def __init__( lora_config: Optional[LoRAConfig], placement_group: Optional["PlacementGroup"], log_stats: bool, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT ) -> None: logger.info( "Initializing an LLM engine with config: " @@ -109,6 +112,11 @@ def __init__( self._init_tokenizer() self.seq_counter = Counter() + #If usage stat is enabled, collect relevant info. + if is_usage_stats_enabled(): + usage_message.report_usage(model_config.model, usage_context) + p = Process(usage_message.send_to_server()) + # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: # Disable Ray usage stats collection. @@ -360,7 +368,7 @@ def _init_cache(self) -> None: self._run_workers("warm_up_model") @classmethod - def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": + def from_engine_args(cls, engine_args: EngineArgs, usage_context: UsageContext=UsageContext.UNKNOWN_CONTEXT) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -370,7 +378,9 @@ def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": # Create the LLM engine. engine = cls(*engine_configs, placement_group, - log_stats=not engine_args.disable_log_stats) + log_stats=not engine_args.disable_log_stats, + usage_context = usage_context + ) return engine def encode_request( diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index f7b8d258fae4..ae0df842bbe5 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -10,6 +10,7 @@ from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid +from vllm.usage.usage_lib import UsageContext TIMEOUT_KEEP_ALIVE = 5 # seconds. app = FastAPI() @@ -85,9 +86,8 @@ async def stream_results() -> AsyncGenerator[bytes, None]: help="FastAPI root_path when app is behind a path based routing proxy") parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args() - engine_args = AsyncEngineArgs.from_cli_args(args) - engine = AsyncLLMEngine.from_engine_args(engine_args) + engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.API_SERVER) app.root_path = args.root_path uvicorn.run(app, diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index fc82018d18eb..52de89aa383d 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -9,7 +9,7 @@ from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.utils import Counter - +from vllm.usage.usage_lib import UsageContext class LLM: """An LLM for generating texts from given prompts and sampling parameters. @@ -106,7 +106,7 @@ def __init__( disable_custom_all_reduce=disable_custom_all_reduce, **kwargs, ) - self.llm_engine = LLMEngine.from_engine_args(engine_args) + self.llm_engine = LLMEngine.from_engine_args(engine_args,usage_context=UsageContext.LLM) self.request_counter = Counter() def get_tokenizer( diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index deb0fddd643c..6b1720842180 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -23,7 +23,7 @@ from vllm.logger import init_logger from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion - +from vllm.usage.usage_lib import UsageContext TIMEOUT_KEEP_ALIVE = 5 # seconds openai_serving_chat: OpenAIServingChat = None @@ -212,9 +212,8 @@ async def authentication(request: Request, call_next): served_model = args.served_model_name else: served_model = args.model - engine_args = AsyncEngineArgs.from_cli_args(args) - engine = AsyncLLMEngine.from_engine_args(engine_args) + engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.OPENAI_API_SERVER) openai_serving_chat = OpenAIServingChat(engine, served_model, args.response_role, args.chat_template) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py new file mode 100644 index 000000000000..2d100c6b0cc1 --- /dev/null +++ b/vllm/usage/usage_lib.py @@ -0,0 +1,84 @@ +import requests +import os +import torch +import json +import platform +import sys +import pkg_resources +import requests +import datetime +from cloud_detect import provider +from typing import Optional +from enum import Enum +from pathlib import Path + + +_USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') #File path to store usage data locally +_USAGE_STATS_ENABLED = None +_USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') +_USAGE_STATS_URL = "http://127.0.0.1:1234" #Placeholder for sending usage data to vector.dev http server + + +def is_usage_stats_enabled(): + """Determine whether or not we can send usage stats to the server. + The logic is as follows: + - By default, it should be enabled. + - Two environment variables can disable it: + - DO_NOT_TRACK=1 + - VLLM_NO_USAGE_STATS=1 + - A file in the home directory can disable it if it exists: + - $HOME/.config/vllm/do_not_track + """ + global _USAGE_STATS_ENABLED + if _USAGE_STATS_ENABLED is None: + do_not_track = os.environ.get('DO_NOT_TRACK', '0') == '1' + no_usage_stats = os.environ.get('VLLM_NO_USAGE_STATS', '0') == '1' + do_not_track_file = os.path.exists(os.path.expanduser('~/.config/vllm/do_not_track')) + + _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats or do_not_track_file) + return _USAGE_STATS_ENABLED + +def _get_current_timestamp_ns() -> int: + return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1e9) + +class UsageContext(Enum): + UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" + LLM = "LLM" + API_SERVER = "API_SERVER" + OPENAI_API_SERVER = "OPENAI_API_SERVER" + + +class UsageMessage: + def __init__(self) -> None: + self.gpu_name : Optional[str] = None + self.provider : Optional[str] = None + self.architecture : Optional[str] = None + self.platform : Optional[str] = None + self.model : Optional[str] = None + self.vllm_version : Optional[str] = None + self.context : Optional[str] = None + self.log_time : Optional[int] = None + + def report_usage(self, model: str, context: UsageContext) -> None: + self.context = context.value + self.gpu_name = torch.cuda.get_device_name() + self.provider = provider() + self.architecture = platform.machine() + self.platform = platform.platform() + self.vllm_version = pkg_resources.get_distribution("vllm").version + self.model = model + self.log_time = _get_current_timestamp_ns() + + def _write_to_file(self): + with open(_USAGE_STATS_FILE, "w") as outfile: + json.dump(vars(self), outfile) + + def send_to_server(self): + headers = {'Content-type': 'application/json'} + payload = json.dumps(vars(self)) + try: + response = requests.post(_USAGE_STATS_URL, data=payload, headers=headers) + except requests.exceptions.RequestException as e: + print("Usage Log Request Failed") + +usage_message = UsageMessage() \ No newline at end of file From f84ccaa85a41a1765683a1b68b49032034f11616 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 12 Feb 2024 22:44:36 -0800 Subject: [PATCH 12/47] Write usage to local file for testing --- vllm/usage/usage_lib.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 2d100c6b0cc1..7fb10f3c04c0 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -74,6 +74,7 @@ def _write_to_file(self): json.dump(vars(self), outfile) def send_to_server(self): + self._write_to_file() headers = {'Content-type': 'application/json'} payload = json.dumps(vars(self)) try: From b08ba86428543975fce4877cefcc7d328e93bc1b Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 12 Feb 2024 22:57:51 -0800 Subject: [PATCH 13/47] Fixed Formatting --- vllm/engine/llm_engine.py | 2 +- vllm/usage/usage_lib.py | 7 ++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 88ccaa866f3e..229dd68a76bd 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -115,7 +115,7 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): usage_message.report_usage(model_config.model, usage_context) - p = Process(usage_message.send_to_server()) + Process(usage_message.send_to_server()) # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 7fb10f3c04c0..7a48a91d887a 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -1,16 +1,13 @@ -import requests import os import torch import json import platform -import sys import pkg_resources import requests import datetime from cloud_detect import provider from typing import Optional from enum import Enum -from pathlib import Path _USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') #File path to store usage data locally @@ -78,8 +75,8 @@ def send_to_server(self): headers = {'Content-type': 'application/json'} payload = json.dumps(vars(self)) try: - response = requests.post(_USAGE_STATS_URL, data=payload, headers=headers) - except requests.exceptions.RequestException as e: + requests.post(_USAGE_STATS_URL, data=payload, headers=headers) + except requests.exceptions.RequestException: print("Usage Log Request Failed") usage_message = UsageMessage() \ No newline at end of file From 73b689a00ad121ace7491d3952cb6221142284a2 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 13 Feb 2024 10:13:14 -0800 Subject: [PATCH 14/47] formatting changes --- vllm/engine/arg_utils.py | 1 + vllm/engine/async_llm_engine.py | 14 ++++++---- vllm/engine/llm_engine.py | 32 +++++++++++---------- vllm/entrypoints/api_server.py | 3 +- vllm/entrypoints/llm.py | 4 ++- vllm/entrypoints/openai/api_server.py | 4 ++- vllm/usage/usage_lib.py | 40 ++++++++++++++++----------- 7 files changed, 59 insertions(+), 39 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 7ed9ba738b5b..d5e63e25d6e8 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -6,6 +6,7 @@ from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, ParallelConfig, SchedulerConfig, LoRAConfig) + @dataclass class EngineArgs: """Arguments for vLLM engine.""" diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 72eb2dd5ae5f..70890e6cfca4 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -13,6 +13,7 @@ from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.usage.usage_lib import UsageContext + logger = init_logger(__name__) @@ -611,10 +612,12 @@ async def get_model_config(self) -> ModelConfig: return self.engine.get_model_config() @classmethod - def from_engine_args(cls, - engine_args: AsyncEngineArgs, - start_engine_loop: bool = True, - usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT) -> "AsyncLLMEngine": + def from_engine_args( + cls, + engine_args: AsyncEngineArgs, + start_engine_loop: bool = True, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT + ) -> "AsyncLLMEngine": """Creates an async LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -631,8 +634,7 @@ def from_engine_args(cls, log_stats=not engine_args.disable_log_stats, max_log_len=engine_args.max_log_len, start_engine_loop=start_engine_loop, - usage_context=usage_context - ) + usage_context=usage_context) return engine async def do_log_stats(self) -> None: diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 229dd68a76bd..486bb7bb7ee2 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -68,16 +68,16 @@ class LLMEngine: """ def __init__( - self, - model_config: ModelConfig, - cache_config: CacheConfig, - parallel_config: ParallelConfig, - scheduler_config: SchedulerConfig, - device_config: DeviceConfig, - lora_config: Optional[LoRAConfig], - placement_group: Optional["PlacementGroup"], - log_stats: bool, - usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT + self, + model_config: ModelConfig, + cache_config: CacheConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + lora_config: Optional[LoRAConfig], + placement_group: Optional["PlacementGroup"], + log_stats: bool, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT ) -> None: logger.info( "Initializing an LLM engine with config: " @@ -115,7 +115,8 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): usage_message.report_usage(model_config.model, usage_context) - Process(usage_message.send_to_server()) + p = Process(usage_message.send_to_server()) + p.start() # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: @@ -368,7 +369,11 @@ def _init_cache(self) -> None: self._run_workers("warm_up_model") @classmethod - def from_engine_args(cls, engine_args: EngineArgs, usage_context: UsageContext=UsageContext.UNKNOWN_CONTEXT) -> "LLMEngine": + def from_engine_args( + cls, + engine_args: EngineArgs, + usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT + ) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -379,8 +384,7 @@ def from_engine_args(cls, engine_args: EngineArgs, usage_context: UsageContext=U engine = cls(*engine_configs, placement_group, log_stats=not engine_args.disable_log_stats, - usage_context = usage_context - ) + usage_context=usage_context) return engine def encode_request( diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index ae0df842bbe5..c065241579a7 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -87,7 +87,8 @@ async def stream_results() -> AsyncGenerator[bytes, None]: parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args() engine_args = AsyncEngineArgs.from_cli_args(args) - engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.API_SERVER) + engine = AsyncLLMEngine.from_engine_args( + engine_args, usage_context=UsageContext.API_SERVER) app.root_path = args.root_path uvicorn.run(app, diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 52de89aa383d..41a4ac13bb35 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -11,6 +11,7 @@ from vllm.utils import Counter from vllm.usage.usage_lib import UsageContext + class LLM: """An LLM for generating texts from given prompts and sampling parameters. @@ -106,7 +107,8 @@ def __init__( disable_custom_all_reduce=disable_custom_all_reduce, **kwargs, ) - self.llm_engine = LLMEngine.from_engine_args(engine_args,usage_context=UsageContext.LLM) + self.llm_engine = LLMEngine.from_engine_args( + engine_args, usage_context=UsageContext.LLM) self.request_counter = Counter() def get_tokenizer( diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 6b1720842180..36fa3a9bedd8 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -24,6 +24,7 @@ from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion from vllm.usage.usage_lib import UsageContext + TIMEOUT_KEEP_ALIVE = 5 # seconds openai_serving_chat: OpenAIServingChat = None @@ -213,7 +214,8 @@ async def authentication(request: Request, call_next): else: served_model = args.model engine_args = AsyncEngineArgs.from_cli_args(args) - engine = AsyncLLMEngine.from_engine_args(engine_args, usage_context=UsageContext.OPENAI_API_SERVER) + engine = AsyncLLMEngine.from_engine_args( + engine_args, usage_context=UsageContext.OPENAI_API_SERVER) openai_serving_chat = OpenAIServingChat(engine, served_model, args.response_role, args.chat_template) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 7a48a91d887a..13e70dcfe4fb 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -9,11 +9,13 @@ from typing import Optional from enum import Enum - -_USAGE_STATS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'usage_stats.json') #File path to store usage data locally +_USAGE_STATS_FILE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + 'usage_stats.json') #File path to store usage data locally _USAGE_STATS_ENABLED = None -_USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') -_USAGE_STATS_URL = "http://127.0.0.1:1234" #Placeholder for sending usage data to vector.dev http server +_USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', + 'https://stats.vllm.ai') +_USAGE_STATS_URL = "http://127.0.0.1:1234" #Placeholder for sending usage data to vector.dev http server def is_usage_stats_enabled(): @@ -30,14 +32,18 @@ def is_usage_stats_enabled(): if _USAGE_STATS_ENABLED is None: do_not_track = os.environ.get('DO_NOT_TRACK', '0') == '1' no_usage_stats = os.environ.get('VLLM_NO_USAGE_STATS', '0') == '1' - do_not_track_file = os.path.exists(os.path.expanduser('~/.config/vllm/do_not_track')) + do_not_track_file = os.path.exists( + os.path.expanduser('~/.config/vllm/do_not_track')) - _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats or do_not_track_file) + _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats + or do_not_track_file) return _USAGE_STATS_ENABLED + def _get_current_timestamp_ns() -> int: return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1e9) + class UsageContext(Enum): UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" LLM = "LLM" @@ -46,15 +52,16 @@ class UsageContext(Enum): class UsageMessage: + def __init__(self) -> None: - self.gpu_name : Optional[str] = None - self.provider : Optional[str] = None - self.architecture : Optional[str] = None - self.platform : Optional[str] = None - self.model : Optional[str] = None - self.vllm_version : Optional[str] = None - self.context : Optional[str] = None - self.log_time : Optional[int] = None + self.gpu_name: Optional[str] = None + self.provider: Optional[str] = None + self.architecture: Optional[str] = None + self.platform: Optional[str] = None + self.model: Optional[str] = None + self.vllm_version: Optional[str] = None + self.context: Optional[str] = None + self.log_time: Optional[int] = None def report_usage(self, model: str, context: UsageContext) -> None: self.context = context.value @@ -67,7 +74,7 @@ def report_usage(self, model: str, context: UsageContext) -> None: self.log_time = _get_current_timestamp_ns() def _write_to_file(self): - with open(_USAGE_STATS_FILE, "w") as outfile: + with open(_USAGE_STATS_FILE, "w") as outfile: json.dump(vars(self), outfile) def send_to_server(self): @@ -79,4 +86,5 @@ def send_to_server(self): except requests.exceptions.RequestException: print("Usage Log Request Failed") -usage_message = UsageMessage() \ No newline at end of file + +usage_message = UsageMessage() From 9c9a18826d244da566b214b80535b0fc52b947e8 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 13 Feb 2024 13:15:29 -0800 Subject: [PATCH 15/47] Minor bug fixed --- vllm/usage/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 vllm/usage/__init__.py diff --git a/vllm/usage/__init__.py b/vllm/usage/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 From d2f84cf544b5af46171e3b93654560c44d1111c9 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 13 Feb 2024 13:40:19 -0800 Subject: [PATCH 16/47] tmp --- requirements.txt | 1 + vllm/engine/llm_engine.py | 6 +++--- vllm/usage/usage_lib.py | 2 ++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 37be5c168752..de9a78bf04db 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ sentencepiece # Required for LLaMA tokenizer. numpy torch == 2.1.2 cloud-detect +requests transformers >= 4.37.0 # Required for Qwen2 xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 486bb7bb7ee2..45d94d28e373 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -22,7 +22,7 @@ TokenizerGroup) from vllm.utils import Counter, set_cuda_visible_devices, get_ip, get_open_port, get_distributed_init_method from vllm.usage.usage_lib import UsageContext, is_usage_stats_enabled, usage_message -from multiprocessing import Process +from threading import Thread if ray: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -115,8 +115,8 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): usage_message.report_usage(model_config.model, usage_context) - p = Process(usage_message.send_to_server()) - p.start() + # t = Thread(usage_message.send_to_server()) + # t.start() # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 13e70dcfe4fb..c6e83dfa65b6 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -73,6 +73,8 @@ def report_usage(self, model: str, context: UsageContext) -> None: self.model = model self.log_time = _get_current_timestamp_ns() + + def _write_to_file(self): with open(_USAGE_STATS_FILE, "w") as outfile: json.dump(vars(self), outfile) From eb48061d45b29fbf1685459c8ad528d08a3aa311 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 13 Feb 2024 18:49:39 -0800 Subject: [PATCH 17/47] Fixed Bug --- requirements.txt | 1 - vllm/engine/llm_engine.py | 6 +++--- vllm/usage/usage_lib.py | 37 +++++++++++++++++++++++++++---------- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/requirements.txt b/requirements.txt index 1f5e268234fb..134e7990fc3e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,6 @@ ray >= 2.9 sentencepiece # Required for LLaMA tokenizer. numpy torch == 2.1.2 -cloud-detect requests transformers >= 4.37.0 # Required for Qwen2 xformers == 0.0.23.post1 # Required for CUDA 12.1. diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index c0a1edc2a62b..a711f246075d 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -114,9 +114,9 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): - usage_message.report_usage(model_config.model, usage_context) - # t = Thread(usage_message.send_to_server()) - # t.start() + t = Thread(target=usage_message.report_usage, + args=(model_config.model, usage_context)) + t.start() # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index c6e83dfa65b6..690c973fa709 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -5,7 +5,7 @@ import pkg_resources import requests import datetime -from cloud_detect import provider +from pathlib import Path from typing import Optional from enum import Enum @@ -44,6 +44,27 @@ def _get_current_timestamp_ns() -> int: return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1e9) +def _detect_cloud_provider() -> str: + # Try detecting through vendor file + vendor_files = [ + '/sys/class/dmi/id/product_version', '/sys/class/dmi/id/bios_vendor', + '/sys/class/dmi/id/product_name', + '/sys/class/dmi/id/chassis_asset_tag', '/sys/class/dmi/id/sys_vendor' + ] + for vendor_file in vendor_files: + path = Path(vendor_file) + if path.is_file(): + if 'amazon' in path.read_text().lower(): + return "AWS" + elif 'Microsoft Corporation' in path.read_text(): + return "AZURE" + elif 'Google' in path.read_text(): + return "GCP" + elif 'OracleCloud' in path.read_text(): + return "OCI" + return "UNKNOWN" + + class UsageContext(Enum): UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" LLM = "LLM" @@ -66,20 +87,12 @@ def __init__(self) -> None: def report_usage(self, model: str, context: UsageContext) -> None: self.context = context.value self.gpu_name = torch.cuda.get_device_name() - self.provider = provider() + self.provider = _detect_cloud_provider() self.architecture = platform.machine() self.platform = platform.platform() self.vllm_version = pkg_resources.get_distribution("vllm").version self.model = model self.log_time = _get_current_timestamp_ns() - - - - def _write_to_file(self): - with open(_USAGE_STATS_FILE, "w") as outfile: - json.dump(vars(self), outfile) - - def send_to_server(self): self._write_to_file() headers = {'Content-type': 'application/json'} payload = json.dumps(vars(self)) @@ -88,5 +101,9 @@ def send_to_server(self): except requests.exceptions.RequestException: print("Usage Log Request Failed") + def _write_to_file(self): + with open(_USAGE_STATS_FILE, "w") as outfile: + json.dump(vars(self), outfile) + usage_message = UsageMessage() From 0684c06dec78e121b9b53c92c49e2fca2f56e533 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 13 Feb 2024 22:30:13 -0800 Subject: [PATCH 18/47] Add Google Cloud Run service URL --- vllm/usage/usage_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 690c973fa709..a6b7becd39fa 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -15,7 +15,7 @@ _USAGE_STATS_ENABLED = None _USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') -_USAGE_STATS_URL = "http://127.0.0.1:1234" #Placeholder for sending usage data to vector.dev http server +_USAGE_STATS_URL = "https://vector-dev-server-uzyrqjjayq-uc.a.run.app" #Placeholder for sending usage data to vector.dev http server def is_usage_stats_enabled(): From 8e9890ed37e14d1d54bd2225d6f0d63b1000a302 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 15 Feb 2024 18:57:04 -0800 Subject: [PATCH 19/47] More GPU CPU Mem info --- requirements.txt | 1 + vllm/engine/llm_engine.py | 5 +---- vllm/usage/usage_lib.py | 21 +++++++++++++++++++-- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/requirements.txt b/requirements.txt index 134e7990fc3e..41f48ce3c6a7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ sentencepiece # Required for LLaMA tokenizer. numpy torch == 2.1.2 requests +psutil transformers >= 4.37.0 # Required for Qwen2 xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index a711f246075d..1b2d35fc71a5 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -22,7 +22,6 @@ TokenizerGroup) from vllm.utils import Counter, set_cuda_visible_devices, get_ip, get_open_port, get_distributed_init_method from vllm.usage.usage_lib import UsageContext, is_usage_stats_enabled, usage_message -from threading import Thread if ray: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -114,9 +113,7 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): - t = Thread(target=usage_message.report_usage, - args=(model_config.model, usage_context)) - t.start() + usage_message.report_usage(model_config.model, usage_context) # Create the parallel GPU workers. if self.parallel_config.worker_use_ray: diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index a6b7becd39fa..3053057f2295 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -5,6 +5,8 @@ import pkg_resources import requests import datetime +import psutil +from threading import Thread from pathlib import Path from typing import Optional from enum import Enum @@ -75,7 +77,7 @@ class UsageContext(Enum): class UsageMessage: def __init__(self) -> None: - self.gpu_name: Optional[str] = None + self.gpu: Optional[dict] = None self.provider: Optional[str] = None self.architecture: Optional[str] = None self.platform: Optional[str] = None @@ -83,16 +85,31 @@ def __init__(self) -> None: self.vllm_version: Optional[str] = None self.context: Optional[str] = None self.log_time: Optional[int] = None + #Logical CPU count + self.num_cpu: Optional[int] = None + self.total_memory: Optional[int] = None def report_usage(self, model: str, context: UsageContext) -> None: + t = Thread(target=usage_message._report_usage, args=(model, context)) + t.start() + + def _report_usage(self, model: str, context: UsageContext) -> None: self.context = context.value - self.gpu_name = torch.cuda.get_device_name() + self.gpu = dict() + for i in range(torch.cuda.device_count()): + k = torch.cuda.get_device_properties(i).name + if k in self.gpu: + self.gpu[k] += 1 + else: + self.gpu[k] = 1 self.provider = _detect_cloud_provider() self.architecture = platform.machine() self.platform = platform.platform() self.vllm_version = pkg_resources.get_distribution("vllm").version self.model = model self.log_time = _get_current_timestamp_ns() + self.num_cpu = os.cpu_count() + self.total_memory = psutil.virtual_memory().total self._write_to_file() headers = {'Content-type': 'application/json'} payload = json.dumps(vars(self)) From d910b0549ed94f2ce49e9b8d8a1a5a3113e38824 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 26 Feb 2024 23:17:04 -0800 Subject: [PATCH 20/47] Added context constant --- vllm/engine/async_llm_engine.py | 2 +- vllm/engine/llm_engine.py | 4 ++-- vllm/entrypoints/llm.py | 2 +- vllm/usage/usage_lib.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 70890e6cfca4..3be23b7cd479 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -616,7 +616,7 @@ def from_engine_args( cls, engine_args: AsyncEngineArgs, start_engine_loop: bool = True, - usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT ) -> "AsyncLLMEngine": """Creates an async LLM engine from the engine arguments.""" # Create the engine configs. diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 85c56983d6b0..d19eba2c86a9 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -76,7 +76,7 @@ def __init__( lora_config: Optional[LoRAConfig], placement_group: Optional["PlacementGroup"], log_stats: bool, - usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT ) -> None: logger.info( "Initializing an LLM engine with config: " @@ -373,7 +373,7 @@ def _init_cache(self) -> None: def from_engine_args( cls, engine_args: EngineArgs, - usage_context: UsageContext = UsageContext.UNKNOWN_CONTEXT + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT ) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 41a4ac13bb35..616a899b076d 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -108,7 +108,7 @@ def __init__( **kwargs, ) self.llm_engine = LLMEngine.from_engine_args( - engine_args, usage_context=UsageContext.LLM) + engine_args, usage_context=UsageContext.LLM_CLASS) self.request_counter = Counter() def get_tokenizer( diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 3053057f2295..8a0ba810cbbd 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -69,10 +69,10 @@ def _detect_cloud_provider() -> str: class UsageContext(Enum): UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" - LLM = "LLM" + LLM_CLASS = "LLM_CLASS" API_SERVER = "API_SERVER" OPENAI_API_SERVER = "OPENAI_API_SERVER" - + ENGINE_CONTEXT = "ENGINE_CONTEXT" class UsageMessage: From 8cf264b455eeae70903dab242ffc84dac0dc94b6 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Mon, 26 Feb 2024 23:22:37 -0800 Subject: [PATCH 21/47] Formatting & CPU Info --- vllm/usage/usage_lib.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 8a0ba810cbbd..cdfad3d5d6cb 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -74,6 +74,7 @@ class UsageContext(Enum): OPENAI_API_SERVER = "OPENAI_API_SERVER" ENGINE_CONTEXT = "ENGINE_CONTEXT" + class UsageMessage: def __init__(self) -> None: @@ -87,6 +88,7 @@ def __init__(self) -> None: self.log_time: Optional[int] = None #Logical CPU count self.num_cpu: Optional[int] = None + self.cpu_type: Optional[str] = None self.total_memory: Optional[int] = None def report_usage(self, model: str, context: UsageContext) -> None: @@ -109,6 +111,7 @@ def _report_usage(self, model: str, context: UsageContext) -> None: self.model = model self.log_time = _get_current_timestamp_ns() self.num_cpu = os.cpu_count() + self.cpu_type = platform.processor() self.total_memory = psutil.virtual_memory().total self._write_to_file() headers = {'Content-type': 'application/json'} From 93b877306097b666df0c55597d29536fb6e783af Mon Sep 17 00:00:00 2001 From: yhu422 <92338430+yhu422@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:24:28 -0800 Subject: [PATCH 22/47] Update vllm/usage/usage_lib.py Co-authored-by: Michael Goin --- vllm/usage/usage_lib.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index cdfad3d5d6cb..2ec5b075b8e6 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -53,17 +53,21 @@ def _detect_cloud_provider() -> str: '/sys/class/dmi/id/product_name', '/sys/class/dmi/id/chassis_asset_tag', '/sys/class/dmi/id/sys_vendor' ] + # Mapping of identifiable strings to cloud providers + cloud_identifiers = { + 'amazon': "AWS", + 'microsoft corporation': "AZURE", + 'google': "GCP", + 'oraclecloud': "OCI", + } + for vendor_file in vendor_files: path = Path(vendor_file) if path.is_file(): - if 'amazon' in path.read_text().lower(): - return "AWS" - elif 'Microsoft Corporation' in path.read_text(): - return "AZURE" - elif 'Google' in path.read_text(): - return "GCP" - elif 'OracleCloud' in path.read_text(): - return "OCI" + file_content = path.read_text().lower() + for identifier, provider in cloud_identifiers.items(): + if identifier in file_content: + return provider return "UNKNOWN" From fe39b8410ca962d986994f9babecc484e378e494 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 27 Feb 2024 00:03:34 -0800 Subject: [PATCH 23/47] Added CPU info, new stat file path --- requirements.txt | 1 + vllm/engine/llm_engine.py | 3 +-- vllm/usage/usage_lib.py | 20 +++++++++++++------- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/requirements.txt b/requirements.txt index 9d671285b25a..cc53efaf8406 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ numpy torch == 2.1.2 requests psutil +py-cpuinfo transformers >= 4.38.0 # Required for Gemma. xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index d19eba2c86a9..ba59d7f7e2de 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -76,8 +76,7 @@ def __init__( lora_config: Optional[LoRAConfig], placement_group: Optional["PlacementGroup"], log_stats: bool, - usage_context: UsageContext = UsageContext.ENGINE_CONTEXT - ) -> None: + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT) -> None: logger.info( "Initializing an LLM engine with config: " f"model={model_config.model!r}, " diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 2ec5b075b8e6..225411b45080 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -6,17 +6,22 @@ import requests import datetime import psutil +import cpuinfo from threading import Thread from pathlib import Path from typing import Optional from enum import Enum +_xdg_config_home = os.getenv('XDG_CONFIG_HOME', + os.path.expanduser('~/.config')) +_vllm_internal_path = 'vllm/usage_stats.json' + _USAGE_STATS_FILE = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - 'usage_stats.json') #File path to store usage data locally + _xdg_config_home, + _vllm_internal_path) #File path to store usage data locally _USAGE_STATS_ENABLED = None -_USAGE_STATS_SEVER = os.environ.get('VLLM_USAGE_STATS_SERVER', - 'https://stats.vllm.ai') +_USAGE_STATS_SERVER = os.environ.get('VLLM_USAGE_STATS_SERVER', + 'https://stats.vllm.ai') _USAGE_STATS_URL = "https://vector-dev-server-uzyrqjjayq-uc.a.run.app" #Placeholder for sending usage data to vector.dev http server @@ -60,7 +65,7 @@ def _detect_cloud_provider() -> str: 'google': "GCP", 'oraclecloud': "OCI", } - + for vendor_file in vendor_files: path = Path(vendor_file) if path.is_file(): @@ -115,7 +120,7 @@ def _report_usage(self, model: str, context: UsageContext) -> None: self.model = model self.log_time = _get_current_timestamp_ns() self.num_cpu = os.cpu_count() - self.cpu_type = platform.processor() + self.cpu_type = cpuinfo.get_cpu_info()['brand_raw'] self.total_memory = psutil.virtual_memory().total self._write_to_file() headers = {'Content-type': 'application/json'} @@ -126,7 +131,8 @@ def _report_usage(self, model: str, context: UsageContext) -> None: print("Usage Log Request Failed") def _write_to_file(self): - with open(_USAGE_STATS_FILE, "w") as outfile: + os.makedirs(os.path.dirname(_USAGE_STATS_FILE), exist_ok=True) + with open(_USAGE_STATS_FILE, "w+") as outfile: json.dump(vars(self), outfile) From fc6e374064a78a479fedfb56ee23fdd3140432d4 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 27 Feb 2024 13:57:49 -0800 Subject: [PATCH 24/47] added gpu memory --- vllm/usage/usage_lib.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 225411b45080..5df73d256348 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -87,7 +87,7 @@ class UsageContext(Enum): class UsageMessage: def __init__(self) -> None: - self.gpu: Optional[dict] = None + self.gpu_list: Optional[dict] = None self.provider: Optional[str] = None self.architecture: Optional[str] = None self.platform: Optional[str] = None @@ -106,13 +106,12 @@ def report_usage(self, model: str, context: UsageContext) -> None: def _report_usage(self, model: str, context: UsageContext) -> None: self.context = context.value - self.gpu = dict() + self.gpu_list = [] for i in range(torch.cuda.device_count()): - k = torch.cuda.get_device_properties(i).name - if k in self.gpu: - self.gpu[k] += 1 - else: - self.gpu[k] = 1 + device_property = torch.cuda.get_device_properties(i) + name = device_property.name + memory = device_property.total_memory + self.gpu_list.append((name, memory)) self.provider = _detect_cloud_provider() self.architecture = platform.machine() self.platform = platform.platform() @@ -123,7 +122,7 @@ def _report_usage(self, model: str, context: UsageContext) -> None: self.cpu_type = cpuinfo.get_cpu_info()['brand_raw'] self.total_memory = psutil.virtual_memory().total self._write_to_file() - headers = {'Content-type': 'application/json'} + headers = {'Content-type': 'application/x-ndjson'} payload = json.dumps(vars(self)) try: requests.post(_USAGE_STATS_URL, data=payload, headers=headers) From ab23171a01d1e30592d6702775ce87b9df87a029 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 27 Feb 2024 16:14:51 -0800 Subject: [PATCH 25/47] added memory --- vllm/usage/usage_lib.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 5df73d256348..8d25bf031d1e 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -109,9 +109,9 @@ def _report_usage(self, model: str, context: UsageContext) -> None: self.gpu_list = [] for i in range(torch.cuda.device_count()): device_property = torch.cuda.get_device_properties(i) - name = device_property.name - memory = device_property.total_memory - self.gpu_list.append((name, memory)) + gpu_name = device_property.name + gpu_memory = device_property.total_memory + self.gpu_list.append({"name": gpu_name, "memory": gpu_memory}) self.provider = _detect_cloud_provider() self.architecture = platform.machine() self.platform = platform.platform() From 686c84a93252200bf0f7a747a35fff8c03c14bc2 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 29 Feb 2024 17:00:35 -0800 Subject: [PATCH 26/47] Distinguish production/testing usage, added custom domain --- .buildkite/test-template.j2 | 2 ++ vllm/usage/usage_lib.py | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.buildkite/test-template.j2 b/.buildkite/test-template.j2 index 7c1cf2b5a9b3..aef1e4af7766 100644 --- a/.buildkite/test-template.j2 +++ b/.buildkite/test-template.j2 @@ -45,6 +45,8 @@ steps: nvidia.com/gpu: "{{ step.num_gpus or default_num_gpu }}" {% endif %} env: + - name: VLLM_USAGE_SOURCE + value: ci-test - name: HF_TOKEN valueFrom: secretKeyRef: diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 8d25bf031d1e..c04c35599577 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -16,13 +16,14 @@ os.path.expanduser('~/.config')) _vllm_internal_path = 'vllm/usage_stats.json' +os.environ["VLLM_USAGE_SOURCE"] = "production" + _USAGE_STATS_FILE = os.path.join( _xdg_config_home, _vllm_internal_path) #File path to store usage data locally _USAGE_STATS_ENABLED = None _USAGE_STATS_SERVER = os.environ.get('VLLM_USAGE_STATS_SERVER', 'https://stats.vllm.ai') -_USAGE_STATS_URL = "https://vector-dev-server-uzyrqjjayq-uc.a.run.app" #Placeholder for sending usage data to vector.dev http server def is_usage_stats_enabled(): @@ -99,6 +100,7 @@ def __init__(self) -> None: self.num_cpu: Optional[int] = None self.cpu_type: Optional[str] = None self.total_memory: Optional[int] = None + self.source: Optional[str] = None; def report_usage(self, model: str, context: UsageContext) -> None: t = Thread(target=usage_message._report_usage, args=(model, context)) @@ -121,11 +123,12 @@ def _report_usage(self, model: str, context: UsageContext) -> None: self.num_cpu = os.cpu_count() self.cpu_type = cpuinfo.get_cpu_info()['brand_raw'] self.total_memory = psutil.virtual_memory().total + self.source = os.environ["VLLM_USAGE_SOURCE"] self._write_to_file() headers = {'Content-type': 'application/x-ndjson'} payload = json.dumps(vars(self)) try: - requests.post(_USAGE_STATS_URL, data=payload, headers=headers) + requests.post(_USAGE_STATS_SERVER, data=payload, headers=headers) except requests.exceptions.RequestException: print("Usage Log Request Failed") From 877eb789a9f2aeaa891875c081f58bf887f3a11c Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 29 Feb 2024 17:01:20 -0800 Subject: [PATCH 27/47] formatting --- vllm/usage/usage_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index c04c35599577..f91bfaecf5c3 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -100,7 +100,7 @@ def __init__(self) -> None: self.num_cpu: Optional[int] = None self.cpu_type: Optional[str] = None self.total_memory: Optional[int] = None - self.source: Optional[str] = None; + self.source: Optional[str] = None def report_usage(self, model: str, context: UsageContext) -> None: t = Thread(target=usage_message._report_usage, args=(model, context)) From e54f15b17bc2a980aee85d820f6628486697d691 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 5 Mar 2024 10:06:01 -0800 Subject: [PATCH 28/47] test/prod distinction --- vllm/usage/usage_lib.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index f91bfaecf5c3..b915eeda0249 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -16,8 +16,6 @@ os.path.expanduser('~/.config')) _vllm_internal_path = 'vllm/usage_stats.json' -os.environ["VLLM_USAGE_SOURCE"] = "production" - _USAGE_STATS_FILE = os.path.join( _xdg_config_home, _vllm_internal_path) #File path to store usage data locally @@ -123,7 +121,7 @@ def _report_usage(self, model: str, context: UsageContext) -> None: self.num_cpu = os.cpu_count() self.cpu_type = cpuinfo.get_cpu_info()['brand_raw'] self.total_memory = psutil.virtual_memory().total - self.source = os.environ["VLLM_USAGE_SOURCE"] + self.source = os.environ.get("VLLM_USAGE_SOURCE", "production") self._write_to_file() headers = {'Content-type': 'application/x-ndjson'} payload = json.dumps(vars(self)) From 4e35b3b393960443ddcaa5b7dead1655a1c4e325 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 5 Mar 2024 11:27:13 -0800 Subject: [PATCH 29/47] Remove cpuinfo import --- requirements.txt | 1 - vllm/usage/usage_lib.py | 12 ++++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/requirements.txt b/requirements.txt index 01bf77aa3f76..2b0f59161e3f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,6 @@ numpy torch == 2.1.2 requests psutil -py-cpuinfo transformers >= 4.38.0 # Required for Gemma. xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index b915eeda0249..1fbf1ef24a52 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -6,7 +6,6 @@ import requests import datetime import psutil -import cpuinfo from threading import Thread from pathlib import Path from typing import Optional @@ -119,16 +118,17 @@ def _report_usage(self, model: str, context: UsageContext) -> None: self.model = model self.log_time = _get_current_timestamp_ns() self.num_cpu = os.cpu_count() - self.cpu_type = cpuinfo.get_cpu_info()['brand_raw'] + #Best effort reading processor name + self.cpu_type = platform.processor() self.total_memory = psutil.virtual_memory().total self.source = os.environ.get("VLLM_USAGE_SOURCE", "production") self._write_to_file() headers = {'Content-type': 'application/x-ndjson'} payload = json.dumps(vars(self)) - try: - requests.post(_USAGE_STATS_SERVER, data=payload, headers=headers) - except requests.exceptions.RequestException: - print("Usage Log Request Failed") + # try: + # requests.post(_USAGE_STATS_SERVER, data=payload, headers=headers) + # except requests.exceptions.RequestException: + # print("Usage Log Request Failed") def _write_to_file(self): os.makedirs(os.path.dirname(_USAGE_STATS_FILE), exist_ok=True) From a1597fbef51fde19ad41b825a6471dda711d2fc4 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Tue, 5 Mar 2024 11:28:45 -0800 Subject: [PATCH 30/47] ruff --- vllm/usage/usage_lib.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 1fbf1ef24a52..334715d2253e 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -125,10 +125,10 @@ def _report_usage(self, model: str, context: UsageContext) -> None: self._write_to_file() headers = {'Content-type': 'application/x-ndjson'} payload = json.dumps(vars(self)) - # try: - # requests.post(_USAGE_STATS_SERVER, data=payload, headers=headers) - # except requests.exceptions.RequestException: - # print("Usage Log Request Failed") + try: + requests.post(_USAGE_STATS_SERVER, data=payload, headers=headers) + except requests.exceptions.RequestException: + print("Usage Log Request Failed") def _write_to_file(self): os.makedirs(os.path.dirname(_USAGE_STATS_FILE), exist_ok=True) From 84353d41d1eb692bdc2e94c1705c3bf5a3d829a4 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 14 Mar 2024 10:37:14 -0700 Subject: [PATCH 31/47] fixed merge --- vllm/engine/async_llm_engine.py | 12 ++++++++---- vllm/engine/llm_engine.py | 17 +++++++++-------- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index ca6afd8a7b55..d2180f8ae6d3 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -319,9 +319,12 @@ def __init__(self, self._errored_with: Optional[BaseException] = None @classmethod - def from_engine_args(cls, - engine_args: AsyncEngineArgs, - start_engine_loop: bool = True) -> "AsyncLLMEngine": + def from_engine_args( + cls, + engine_args: AsyncEngineArgs, + start_engine_loop: bool = True, + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT + ) -> "AsyncLLMEngine": """Creates an async LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -343,7 +346,8 @@ def from_engine_args(cls, log_requests=not engine_args.disable_log_requests, log_stats=not engine_args.disable_log_stats, max_log_len=engine_args.max_log_len, - start_engine_loop=start_engine_loop) + start_engine_loop=start_engine_loop, + usage_context=usage_context) return engine @property diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index f985465b17b5..5bd282824602 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -20,12 +20,8 @@ from vllm.transformers_utils.tokenizer import (detokenize_incrementally, TokenizerGroup) from vllm.utils import Counter -from vllm.usage.usage_lib import UsageContext, is_usage_stats_enabled, usage_message -if ray: - from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy - -if TYPE_CHECKING: - from ray.util.placement_group import PlacementGroup +from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled, + usage_message) logger = init_logger(__name__) _LOCAL_LOGGING_INTERVAL_SEC = 5 @@ -126,7 +122,11 @@ def __init__( self.stat_logger.info("cache_config", self.cache_config) @classmethod - def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": + def from_engine_args( + cls, + engine_args: EngineArgs, + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT + ) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. engine_configs = engine_args.create_engine_configs() @@ -146,7 +146,8 @@ def from_engine_args(cls, engine_args: EngineArgs) -> "LLMEngine": # Create the LLM engine. engine = cls(*engine_configs, executor_class=executor_class, - log_stats=not engine_args.disable_log_stats) + log_stats=not engine_args.disable_log_stats, + usage_context=usage_context) return engine def __reduce__(self): From f2e69fc7e967cdb7e16bcd1acfc8eafd32ed2e42 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 14 Mar 2024 13:52:30 -0700 Subject: [PATCH 32/47] Pass up model architecture info for GPUExecutor --- vllm/engine/llm_engine.py | 7 ++++--- vllm/executor/gpu_executor.py | 4 +++- vllm/executor/ray_gpu_executor.py | 1 + vllm/model_executor/__init__.py | 3 ++- vllm/model_executor/model_loader.py | 11 +++++++---- vllm/model_executor/neuron_model_loader.py | 10 ++++++---- vllm/model_executor/utils.py | 7 +++++++ vllm/worker/model_runner.py | 4 ++-- vllm/worker/neuron_worker.py | 3 +++ vllm/worker/worker.py | 2 ++ 10 files changed, 37 insertions(+), 15 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 5bd282824602..9cfbbc7ade23 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -101,13 +101,14 @@ def __init__( self._init_tokenizer() self.seq_counter = Counter() - #If usage stat is enabled, collect relevant info. - if is_usage_stats_enabled(): - usage_message.report_usage(model_config.model, usage_context) self.model_executor = executor_class(model_config, cache_config, parallel_config, scheduler_config, device_config, lora_config) + + #If usage stat is enabled, collect relevant info. + if is_usage_stats_enabled(): + usage_message.report_usage(self.model_executor.architecture, usage_context) # Create the scheduler. # NOTE: the cache_config here have been updated with the numbers of diff --git a/vllm/executor/gpu_executor.py b/vllm/executor/gpu_executor.py index 9019ee7763c7..7968e680c710 100644 --- a/vllm/executor/gpu_executor.py +++ b/vllm/executor/gpu_executor.py @@ -37,7 +37,8 @@ def __init__( self.parallel_config = parallel_config self.scheduler_config = scheduler_config self.device_config = device_config - + # Available after calling _init_worker() + self.architecture = None # Instantiate the worker and load the model to GPU. self._init_worker() @@ -75,6 +76,7 @@ def _init_worker(self): ) self.driver_worker.init_model() self.driver_worker.load_model() + self.architecture = self.driver_worker.architecture def _init_cache(self) -> None: """Profiles the memory usage and initializes the KV cache. diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 82a2b456895e..4edd3e6d5667 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -48,6 +48,7 @@ def __init__( device_config: DeviceConfig, lora_config: Optional[LoRAConfig], ) -> None: + self.architecture = None self.model_config = model_config self.cache_config = cache_config self.lora_config = lora_config diff --git a/vllm/model_executor/__init__.py b/vllm/model_executor/__init__.py index cd6dbde5f54c..227e995ef68a 100644 --- a/vllm/model_executor/__init__.py +++ b/vllm/model_executor/__init__.py @@ -1,10 +1,11 @@ from vllm.model_executor.input_metadata import InputMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.model_executor.utils import set_random_seed, get_model +from vllm.model_executor.utils import set_random_seed, get_model, get_architecture __all__ = [ "InputMetadata", "get_model", + "get_architecture", "SamplingMetadata", "set_random_seed", ] diff --git a/vllm/model_executor/model_loader.py b/vllm/model_executor/model_loader.py index cb64d80c8147..51fa50086c18 100644 --- a/vllm/model_executor/model_loader.py +++ b/vllm/model_executor/model_loader.py @@ -1,6 +1,6 @@ """Utilities for selecting and loading models.""" import contextlib -from typing import Type +from typing import Type, Tuple import torch import torch.nn as nn @@ -20,7 +20,7 @@ def _set_default_torch_dtype(dtype: torch.dtype): torch.set_default_dtype(old_dtype) -def _get_model_architecture(model_config: ModelConfig) -> Type[nn.Module]: +def _get_model_architecture(model_config: ModelConfig) -> Tuple[Type[nn.Module],str]: architectures = getattr(model_config.hf_config, "architectures", []) # Special handling for quantized Mixtral. # FIXME(woosuk): This is a temporary hack. @@ -31,16 +31,19 @@ def _get_model_architecture(model_config: ModelConfig) -> Type[nn.Module]: for arch in architectures: model_cls = ModelRegistry.load_model_cls(arch) if model_cls is not None: - return model_cls + return (model_cls, arch) raise ValueError( f"Model architectures {architectures} are not supported for now. " f"Supported architectures: {ModelRegistry.get_supported_archs()}") +def get_architecture(model_config: ModelConfig) -> str: + return _get_model_architecture(model_config)[1] + def get_model(model_config: ModelConfig, device_config: DeviceConfig, **kwargs) -> nn.Module: lora_config = kwargs.get("lora_config", None) - model_class = _get_model_architecture(model_config) + model_class = _get_model_architecture(model_config)[0] # Get the (maybe quantized) linear method. linear_method = None diff --git a/vllm/model_executor/neuron_model_loader.py b/vllm/model_executor/neuron_model_loader.py index c434b270a556..441ea8da0297 100644 --- a/vllm/model_executor/neuron_model_loader.py +++ b/vllm/model_executor/neuron_model_loader.py @@ -1,5 +1,5 @@ """Utilities for selecting and loading models.""" -from typing import Type +from typing import Type, Tuple import torch import torch.nn as nn @@ -21,16 +21,18 @@ } -def _get_model_architecture(config: PretrainedConfig) -> Type[nn.Module]: +def _get_model_architecture(config: PretrainedConfig) -> Tuple[Type[nn.Module], str]: architectures = getattr(config, "architectures", []) for arch in architectures: model_cls = ModelRegistry.load_model_cls(arch) if model_cls is not None: - return model_cls + return (model_cls, arch) raise ValueError( f"Model architectures {architectures} are not supported for now. " f"Supported architectures: {ModelRegistry.get_supported_archs()}") +def get_architecture(model_config: ModelConfig) -> str: + return _get_model_architecture(model_config.hf_config)[1] def get_model(model_config: ModelConfig, device_config: DeviceConfig, **kwargs) -> nn.Module: @@ -40,7 +42,7 @@ def get_model(model_config: ModelConfig, device_config: DeviceConfig, parallel_config = kwargs.get("parallel_config") scheduler_config = kwargs.get("scheduler_config") - model_class = _get_model_architecture(model_config.hf_config) + model_class = _get_model_architecture(model_config.hf_config)[0] linear_method = None # Create a model instance. diff --git a/vllm/model_executor/utils.py b/vllm/model_executor/utils.py index 0113e3edf067..72deba286619 100644 --- a/vllm/model_executor/utils.py +++ b/vllm/model_executor/utils.py @@ -50,3 +50,10 @@ def get_model(model_config: ModelConfig, device_config: DeviceConfig, f"vllm.model_executor.{model_loader_module}") get_model_fn = imported_model_loader.get_model return get_model_fn(model_config, device_config, **kwargs) + +def get_architecture(model_config: ModelConfig, device_config: DeviceConfig) -> str: + model_loader_module = DEVICE_TO_MODEL_LOADER_MAP[device_config.device_type] + imported_model_loader = importlib.import_module( + f"vllm.model_executor.{model_loader_module}") + get_architecture_fn = imported_model_loader.get_architecture + return get_architecture_fn(model_config) \ No newline at end of file diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 7eac576e3f0f..02410de1da5e 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -9,7 +9,7 @@ from vllm.config import (DeviceConfig, ModelConfig, LoRAConfig, ParallelConfig, SchedulerConfig) from vllm.logger import init_logger -from vllm.model_executor import get_model, InputMetadata, SamplingMetadata +from vllm.model_executor import get_architecture, get_model, InputMetadata, SamplingMetadata from vllm.model_executor.parallel_utils import cupy_utils from vllm.model_executor.parallel_utils.communication_op import ( broadcast_tensor_dict) @@ -91,7 +91,7 @@ def load_model(self) -> None: lora_config=self.lora_config, parallel_config=self.parallel_config, scheduler_config=self.scheduler_config) - + self.architecture = get_architecture(self.model_config, self.device_config) self.model_memory_usage = m.consumed_memory logger.info(f"Loading model weights took " f"{self.model_memory_usage / float(2**30):.4f} GB") diff --git a/vllm/worker/neuron_worker.py b/vllm/worker/neuron_worker.py index 340c079600c7..6db8c8d1d13e 100644 --- a/vllm/worker/neuron_worker.py +++ b/vllm/worker/neuron_worker.py @@ -51,6 +51,8 @@ def __init__( device_config, lora_config=self.lora_config, is_driver_worker=is_driver_worker) + #Available after call to load_model() + self.architecture = None # Uninitialized cache engine. Will be initialized by # self.init_cache_engine(). self.cache_config = None @@ -70,6 +72,7 @@ def init_model(self) -> None: def load_model(self): self.model_runner.load_model() + self.architecture = self.model_runner.architecture @torch.inference_mode() def profile_num_available_blocks( diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index 0dcd4018afa5..db563f5028d8 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -61,6 +61,7 @@ def __init__( lora_config=self.lora_config, kv_cache_dtype=kv_cache_dtype, is_driver_worker=is_driver_worker) + self.architecture = None # Uninitialized cache engine. Will be initialized by # self.init_cache_engine(). self.cache_config = None @@ -97,6 +98,7 @@ def init_model(self, cupy_port: Optional[int] = None) -> None: def load_model(self): self.model_runner.load_model() + self.architecture = self.model_runner.architecture @torch.inference_mode() def profile_num_available_blocks( From 4e1996740b38f2d4fd5e3081520c0326020b59bc Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 14 Mar 2024 13:54:09 -0700 Subject: [PATCH 33/47] formatting --- vllm/engine/llm_engine.py | 6 +++--- vllm/model_executor/model_loader.py | 6 ++++-- vllm/model_executor/neuron_model_loader.py | 5 ++++- vllm/model_executor/utils.py | 6 ++++-- vllm/worker/model_runner.py | 3 ++- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 9cfbbc7ade23..fd17158958c8 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -101,14 +101,14 @@ def __init__( self._init_tokenizer() self.seq_counter = Counter() - self.model_executor = executor_class(model_config, cache_config, parallel_config, scheduler_config, device_config, lora_config) - + #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): - usage_message.report_usage(self.model_executor.architecture, usage_context) + usage_message.report_usage(self.model_executor.architecture, + usage_context) # Create the scheduler. # NOTE: the cache_config here have been updated with the numbers of diff --git a/vllm/model_executor/model_loader.py b/vllm/model_executor/model_loader.py index 51fa50086c18..ef793767539e 100644 --- a/vllm/model_executor/model_loader.py +++ b/vllm/model_executor/model_loader.py @@ -20,7 +20,8 @@ def _set_default_torch_dtype(dtype: torch.dtype): torch.set_default_dtype(old_dtype) -def _get_model_architecture(model_config: ModelConfig) -> Tuple[Type[nn.Module],str]: +def _get_model_architecture( + model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: architectures = getattr(model_config.hf_config, "architectures", []) # Special handling for quantized Mixtral. # FIXME(woosuk): This is a temporary hack. @@ -36,9 +37,10 @@ def _get_model_architecture(model_config: ModelConfig) -> Tuple[Type[nn.Module], f"Model architectures {architectures} are not supported for now. " f"Supported architectures: {ModelRegistry.get_supported_archs()}") + def get_architecture(model_config: ModelConfig) -> str: return _get_model_architecture(model_config)[1] - + def get_model(model_config: ModelConfig, device_config: DeviceConfig, **kwargs) -> nn.Module: diff --git a/vllm/model_executor/neuron_model_loader.py b/vllm/model_executor/neuron_model_loader.py index 441ea8da0297..84057289a0ad 100644 --- a/vllm/model_executor/neuron_model_loader.py +++ b/vllm/model_executor/neuron_model_loader.py @@ -21,7 +21,8 @@ } -def _get_model_architecture(config: PretrainedConfig) -> Tuple[Type[nn.Module], str]: +def _get_model_architecture( + config: PretrainedConfig) -> Tuple[Type[nn.Module], str]: architectures = getattr(config, "architectures", []) for arch in architectures: model_cls = ModelRegistry.load_model_cls(arch) @@ -31,9 +32,11 @@ def _get_model_architecture(config: PretrainedConfig) -> Tuple[Type[nn.Module], f"Model architectures {architectures} are not supported for now. " f"Supported architectures: {ModelRegistry.get_supported_archs()}") + def get_architecture(model_config: ModelConfig) -> str: return _get_model_architecture(model_config.hf_config)[1] + def get_model(model_config: ModelConfig, device_config: DeviceConfig, **kwargs) -> nn.Module: from transformers_neuronx.config import (NeuronConfig, diff --git a/vllm/model_executor/utils.py b/vllm/model_executor/utils.py index 72deba286619..6f70555f6b9c 100644 --- a/vllm/model_executor/utils.py +++ b/vllm/model_executor/utils.py @@ -51,9 +51,11 @@ def get_model(model_config: ModelConfig, device_config: DeviceConfig, get_model_fn = imported_model_loader.get_model return get_model_fn(model_config, device_config, **kwargs) -def get_architecture(model_config: ModelConfig, device_config: DeviceConfig) -> str: + +def get_architecture(model_config: ModelConfig, + device_config: DeviceConfig) -> str: model_loader_module = DEVICE_TO_MODEL_LOADER_MAP[device_config.device_type] imported_model_loader = importlib.import_module( f"vllm.model_executor.{model_loader_module}") get_architecture_fn = imported_model_loader.get_architecture - return get_architecture_fn(model_config) \ No newline at end of file + return get_architecture_fn(model_config) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 02410de1da5e..1725d14b1479 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -91,7 +91,8 @@ def load_model(self) -> None: lora_config=self.lora_config, parallel_config=self.parallel_config, scheduler_config=self.scheduler_config) - self.architecture = get_architecture(self.model_config, self.device_config) + self.architecture = get_architecture(self.model_config, + self.device_config) self.model_memory_usage = m.consumed_memory logger.info(f"Loading model weights took " f"{self.model_memory_usage / float(2**30):.4f} GB") From f327f3c60539a69036e96aab35e0238078613a30 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 14 Mar 2024 13:56:15 -0700 Subject: [PATCH 34/47] formatting --- vllm/model_executor/__init__.py | 3 ++- vllm/worker/model_runner.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/__init__.py b/vllm/model_executor/__init__.py index 227e995ef68a..e5e5db437b2e 100644 --- a/vllm/model_executor/__init__.py +++ b/vllm/model_executor/__init__.py @@ -1,6 +1,7 @@ from vllm.model_executor.input_metadata import InputMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.model_executor.utils import set_random_seed, get_model, get_architecture +from vllm.model_executor.utils import (set_random_seed, get_model, + get_architecture) __all__ = [ "InputMetadata", diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 1725d14b1479..3a20c42065ec 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -9,7 +9,8 @@ from vllm.config import (DeviceConfig, ModelConfig, LoRAConfig, ParallelConfig, SchedulerConfig) from vllm.logger import init_logger -from vllm.model_executor import get_architecture, get_model, InputMetadata, SamplingMetadata +from vllm.model_executor import (get_architecture, get_model, InputMetadata, + SamplingMetadata) from vllm.model_executor.parallel_utils import cupy_utils from vllm.model_executor.parallel_utils.communication_op import ( broadcast_tensor_dict) From d9c8a44513df1b166c32052a441bb1ce4664b608 Mon Sep 17 00:00:00 2001 From: Yile Hu Date: Thu, 14 Mar 2024 15:27:32 -0700 Subject: [PATCH 35/47] Get architecture directly from configs --- vllm/engine/llm_engine.py | 5 +++-- vllm/executor/gpu_executor.py | 3 --- vllm/executor/ray_gpu_executor.py | 1 - vllm/worker/model_runner.py | 5 +---- vllm/worker/neuron_worker.py | 3 --- vllm/worker/worker.py | 2 -- 6 files changed, 4 insertions(+), 15 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index fd17158958c8..060026e41a9d 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -11,6 +11,7 @@ from vllm.engine.arg_utils import EngineArgs from vllm.executor.executor_base import ExecutorBase from vllm.engine.metrics import StatLogger, Stats +from vllm.model_executor import get_architecture from vllm.engine.ray_utils import initialize_ray_cluster from vllm.logger import init_logger from vllm.outputs import RequestOutput @@ -107,8 +108,8 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): - usage_message.report_usage(self.model_executor.architecture, - usage_context) + usage_message.report_usage( + get_architecture(model_config, device_config), usage_context) # Create the scheduler. # NOTE: the cache_config here have been updated with the numbers of diff --git a/vllm/executor/gpu_executor.py b/vllm/executor/gpu_executor.py index 7968e680c710..719c96ded123 100644 --- a/vllm/executor/gpu_executor.py +++ b/vllm/executor/gpu_executor.py @@ -37,8 +37,6 @@ def __init__( self.parallel_config = parallel_config self.scheduler_config = scheduler_config self.device_config = device_config - # Available after calling _init_worker() - self.architecture = None # Instantiate the worker and load the model to GPU. self._init_worker() @@ -76,7 +74,6 @@ def _init_worker(self): ) self.driver_worker.init_model() self.driver_worker.load_model() - self.architecture = self.driver_worker.architecture def _init_cache(self) -> None: """Profiles the memory usage and initializes the KV cache. diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 4edd3e6d5667..82a2b456895e 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -48,7 +48,6 @@ def __init__( device_config: DeviceConfig, lora_config: Optional[LoRAConfig], ) -> None: - self.architecture = None self.model_config = model_config self.cache_config = cache_config self.lora_config = lora_config diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 3a20c42065ec..6fa172773399 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -9,8 +9,7 @@ from vllm.config import (DeviceConfig, ModelConfig, LoRAConfig, ParallelConfig, SchedulerConfig) from vllm.logger import init_logger -from vllm.model_executor import (get_architecture, get_model, InputMetadata, - SamplingMetadata) +from vllm.model_executor import get_model, InputMetadata, SamplingMetadata from vllm.model_executor.parallel_utils import cupy_utils from vllm.model_executor.parallel_utils.communication_op import ( broadcast_tensor_dict) @@ -92,8 +91,6 @@ def load_model(self) -> None: lora_config=self.lora_config, parallel_config=self.parallel_config, scheduler_config=self.scheduler_config) - self.architecture = get_architecture(self.model_config, - self.device_config) self.model_memory_usage = m.consumed_memory logger.info(f"Loading model weights took " f"{self.model_memory_usage / float(2**30):.4f} GB") diff --git a/vllm/worker/neuron_worker.py b/vllm/worker/neuron_worker.py index 6db8c8d1d13e..340c079600c7 100644 --- a/vllm/worker/neuron_worker.py +++ b/vllm/worker/neuron_worker.py @@ -51,8 +51,6 @@ def __init__( device_config, lora_config=self.lora_config, is_driver_worker=is_driver_worker) - #Available after call to load_model() - self.architecture = None # Uninitialized cache engine. Will be initialized by # self.init_cache_engine(). self.cache_config = None @@ -72,7 +70,6 @@ def init_model(self) -> None: def load_model(self): self.model_runner.load_model() - self.architecture = self.model_runner.architecture @torch.inference_mode() def profile_num_available_blocks( diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index db563f5028d8..0dcd4018afa5 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -61,7 +61,6 @@ def __init__( lora_config=self.lora_config, kv_cache_dtype=kv_cache_dtype, is_driver_worker=is_driver_worker) - self.architecture = None # Uninitialized cache engine. Will be initialized by # self.init_cache_engine(). self.cache_config = None @@ -98,7 +97,6 @@ def init_model(self, cupy_port: Optional[int] = None) -> None: def load_model(self): self.model_runner.load_model() - self.architecture = self.model_runner.architecture @torch.inference_mode() def profile_num_available_blocks( From f34259aa0d880a28cab3baa5b86b44b414e7681a Mon Sep 17 00:00:00 2001 From: simon-mo Date: Sun, 17 Mar 2024 00:21:08 +0000 Subject: [PATCH 36/47] edits round --- requirements.txt | 1 + vllm/engine/llm_engine.py | 49 +++++++--- vllm/executor/gpu_executor.py | 1 + vllm/usage/usage_lib.py | 162 +++++++++++++++++++++------------- vllm/worker/model_runner.py | 1 + 5 files changed, 143 insertions(+), 71 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4089aa80db01..adbef420a588 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ numpy torch == 2.1.2 requests psutil +py-cpuinfo transformers >= 4.38.0 # Required for Gemma. xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index bac0e40a46dc..d8239eb8fa25 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -59,16 +59,17 @@ class LLMEngine: """ def __init__( - self, - model_config: ModelConfig, - cache_config: CacheConfig, - parallel_config: ParallelConfig, - scheduler_config: SchedulerConfig, - device_config: DeviceConfig, - lora_config: Optional[LoRAConfig], - executor_class: Type[ExecutorBase], - log_stats: bool, - usage_context: UsageContext = UsageContext.ENGINE_CONTEXT) -> None: + self, + model_config: ModelConfig, + cache_config: CacheConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + lora_config: Optional[LoRAConfig], + executor_class: Type[ExecutorBase], + log_stats: bool, + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, + ) -> None: logger.info( f"Initializing an LLM engine (v{vllm.__version__}) with config: " f"model={model_config.model!r}, " @@ -110,7 +111,33 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): usage_message.report_usage( - get_architecture(model_config, device_config), usage_context) + get_architecture(model_config, device_config), + usage_context, + extra_kvs={ + # Common configuration + "dtype": + str(model_config.dtype), + "tensor_parallel_size": + parallel_config.tensor_parallel_size, + "block_size": + cache_config.block_size, + "gpu_memory_utilization": + cache_config.gpu_memory_utilization, + # Quantization + "quantization": + model_config.quantization, + "kv_cache_dtype": + cache_config.cache_dtype, + # Feature flags + "enable_lora": + bool(lora_config), + "enable_prefix_caching": + cache_config.enable_prefix_caching, + "enforce_eager": + model_config.enforce_eager, + "disable_custom_all_reduce": + parallel_config.disable_custom_all_reduce, + }) # Ping the tokenizer to ensure liveness if it runs in a # different process. diff --git a/vllm/executor/gpu_executor.py b/vllm/executor/gpu_executor.py index 719c96ded123..9019ee7763c7 100644 --- a/vllm/executor/gpu_executor.py +++ b/vllm/executor/gpu_executor.py @@ -37,6 +37,7 @@ def __init__( self.parallel_config = parallel_config self.scheduler_config = scheduler_config self.device_config = device_config + # Instantiate the worker and load the model to GPU. self._init_worker() diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 334715d2253e..843f88fff779 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -1,26 +1,26 @@ -import os -import torch +import datetime import json +import logging +import os import platform -import pkg_resources -import requests -import datetime -import psutil -from threading import Thread -from pathlib import Path -from typing import Optional from enum import Enum +from pathlib import Path +from threading import Thread +from typing import Dict, Optional -_xdg_config_home = os.getenv('XDG_CONFIG_HOME', - os.path.expanduser('~/.config')) -_vllm_internal_path = 'vllm/usage_stats.json' +import cpuinfo +import pkg_resources +import psutil +import requests +import torch -_USAGE_STATS_FILE = os.path.join( - _xdg_config_home, - _vllm_internal_path) #File path to store usage data locally +_config_home = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")) +_USAGE_STATS_JSON_PATH = os.path.join(_config_home, "vllm/usage_stats.json") +_USAGE_STATS_DO_NOT_TRACK_PATH = os.path.join(_config_home, + "vllm/do_not_track") _USAGE_STATS_ENABLED = None -_USAGE_STATS_SERVER = os.environ.get('VLLM_USAGE_STATS_SERVER', - 'https://stats.vllm.ai') +_USAGE_STATS_SERVER = os.environ.get("VLLM_USAGE_STATS_SERVER", + "https://stats.vllm.ai") def is_usage_stats_enabled(): @@ -35,10 +35,9 @@ def is_usage_stats_enabled(): """ global _USAGE_STATS_ENABLED if _USAGE_STATS_ENABLED is None: - do_not_track = os.environ.get('DO_NOT_TRACK', '0') == '1' - no_usage_stats = os.environ.get('VLLM_NO_USAGE_STATS', '0') == '1' - do_not_track_file = os.path.exists( - os.path.expanduser('~/.config/vllm/do_not_track')) + do_not_track = os.environ.get("DO_NOT_TRACK", "0") == "1" + no_usage_stats = os.environ.get("VLLM_NO_USAGE_STATS", "0") == "1" + do_not_track_file = os.path.exists(_USAGE_STATS_DO_NOT_TRACK_PATH) _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats or do_not_track_file) @@ -52,16 +51,16 @@ def _get_current_timestamp_ns() -> int: def _detect_cloud_provider() -> str: # Try detecting through vendor file vendor_files = [ - '/sys/class/dmi/id/product_version', '/sys/class/dmi/id/bios_vendor', - '/sys/class/dmi/id/product_name', - '/sys/class/dmi/id/chassis_asset_tag', '/sys/class/dmi/id/sys_vendor' + "/sys/class/dmi/id/product_version", "/sys/class/dmi/id/bios_vendor", + "/sys/class/dmi/id/product_name", + "/sys/class/dmi/id/chassis_asset_tag", "/sys/class/dmi/id/sys_vendor" ] # Mapping of identifiable strings to cloud providers cloud_identifiers = { - 'amazon': "AWS", - 'microsoft corporation': "AZURE", - 'google': "GCP", - 'oraclecloud': "OCI", + "amazon": "AWS", + "microsoft corporation": "AZURE", + "google": "GCP", + "oraclecloud": "OCI", } for vendor_file in vendor_files: @@ -71,10 +70,19 @@ def _detect_cloud_provider() -> str: for identifier, provider in cloud_identifiers.items(): if identifier in file_content: return provider + + # Try detecting through environment variables + env_to_cloud_provider = { + "RUNPOD_DC_ID": "RUNPOD", + } + for env_var, provider in env_to_cloud_provider.items(): + if os.environ.get(env_var): + return provider + return "UNKNOWN" -class UsageContext(Enum): +class UsageContext(str, Enum): UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT" LLM_CLASS = "LLM_CLASS" API_SERVER = "API_SERVER" @@ -83,57 +91,91 @@ class UsageContext(Enum): class UsageMessage: + """Collect platform information and send it to the usage stats server.""" def __init__(self) -> None: - self.gpu_list: Optional[dict] = None + # NOTE: vLLM's server _only_ support flat KV pair. + # Do not use nested fields. + + # Environment Information self.provider: Optional[str] = None + self.num_cpu: Optional[int] = None + self.cpu_type: Optional[str] = None + self.cpu_family_model_stepping: Optional[str] = None + self.total_memory: Optional[int] = None self.architecture: Optional[str] = None self.platform: Optional[str] = None - self.model: Optional[str] = None + self.gpu_count: Optional[int] = None + self.gpu_type: Optional[str] = None + self.gpu_memory_per_device: Optional[int] = None + + # vLLM Information + self.model_architecture: Optional[str] = None self.vllm_version: Optional[str] = None self.context: Optional[str] = None + + # Metadata self.log_time: Optional[int] = None - #Logical CPU count - self.num_cpu: Optional[int] = None - self.cpu_type: Optional[str] = None - self.total_memory: Optional[int] = None self.source: Optional[str] = None - def report_usage(self, model: str, context: UsageContext) -> None: - t = Thread(target=usage_message._report_usage, args=(model, context)) + def report_usage(self, + model_architecture: str, + usage_context: UsageContext, + extra_kvs: Dict[str, any] = None) -> None: + t = Thread(target=self._report_usage_once, + args=(model_architecture, usage_context, extra_kvs or {}), + daemon=True) t.start() - def _report_usage(self, model: str, context: UsageContext) -> None: - self.context = context.value - self.gpu_list = [] - for i in range(torch.cuda.device_count()): - device_property = torch.cuda.get_device_properties(i) - gpu_name = device_property.name - gpu_memory = device_property.total_memory - self.gpu_list.append({"name": gpu_name, "memory": gpu_memory}) + def _report_usage_once(self, model_architecture: str, + usage_context: UsageContext, + extra_kvs: Dict[str, any]) -> None: + # Platform information + if torch.cuda.is_available(): + device_property = torch.cuda.get_device_properties(0) + self.gpu_count = torch.cuda.device_count() + self.gpu_type = device_property.name + self.gpu_memory_per_device = device_property.total_memory self.provider = _detect_cloud_provider() self.architecture = platform.machine() self.platform = platform.platform() + self.total_memory = psutil.virtual_memory().total + + info = cpuinfo.get_cpu_info() + self.num_cpu = info.get("count", None) + self.cpu_type = info.get("brand_raw", "") + self.cpu_family_model_stepping = ",".join([ + str(info.get("family", "")), + str(info.get("model", "")), + str(info.get("stepping", "")) + ]) + + # vLLM information + self.context = usage_context.value self.vllm_version = pkg_resources.get_distribution("vllm").version - self.model = model + self.model_architecture = model_architecture + + # Metadata self.log_time = _get_current_timestamp_ns() - self.num_cpu = os.cpu_count() - #Best effort reading processor name - self.cpu_type = platform.processor() - self.total_memory = psutil.virtual_memory().total self.source = os.environ.get("VLLM_USAGE_SOURCE", "production") - self._write_to_file() - headers = {'Content-type': 'application/x-ndjson'} - payload = json.dumps(vars(self)) + + data = vars(self) + if extra_kvs: + data.update(extra_kvs) + + self._write_to_file(data) try: - requests.post(_USAGE_STATS_SERVER, data=payload, headers=headers) + requests.post(_USAGE_STATS_SERVER, json=data) except requests.exceptions.RequestException: - print("Usage Log Request Failed") - - def _write_to_file(self): - os.makedirs(os.path.dirname(_USAGE_STATS_FILE), exist_ok=True) - with open(_USAGE_STATS_FILE, "w+") as outfile: - json.dump(vars(self), outfile) + # silently ignore unless we are using debug log + logging.debug("Failed to send usage data to server") + + def _write_to_file(self, data): + os.makedirs(os.path.dirname(_USAGE_STATS_JSON_PATH), exist_ok=True) + Path(_USAGE_STATS_JSON_PATH).touch(exist_ok=True) + with open(_USAGE_STATS_JSON_PATH, "a") as f: + json.dump(data, f) + f.write("\n") usage_message = UsageMessage() diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 6fa172773399..7eac576e3f0f 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -91,6 +91,7 @@ def load_model(self) -> None: lora_config=self.lora_config, parallel_config=self.parallel_config, scheduler_config=self.scheduler_config) + self.model_memory_usage = m.consumed_memory logger.info(f"Loading model weights took " f"{self.model_memory_usage / float(2**30):.4f} GB") From 30df77c10cf92ce94c14e13e2c9613b380b8ceb7 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Sun, 17 Mar 2024 07:13:45 +0000 Subject: [PATCH 37/47] ruff --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 0531e1f01d33..6f1f2faf54db 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,6 @@ import io import os import re -import shutil import subprocess import warnings from pathlib import Path From be91babf5fdce5b93fe6558ee0f2cca876d29ec8 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Thu, 28 Mar 2024 21:34:25 +0000 Subject: [PATCH 38/47] fix format --- vllm/engine/async_llm_engine.py | 2 +- vllm/engine/llm_engine.py | 4 ++-- vllm/entrypoints/api_server.py | 2 +- vllm/entrypoints/llm.py | 2 +- vllm/entrypoints/openai/api_server.py | 2 +- vllm/model_executor/model_loader.py | 2 +- vllm/model_executor/neuron_model_loader.py | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index bbb8dcfcfd37..ece4a9aed12c 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -15,8 +15,8 @@ from vllm.lora.request import LoRARequest from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams -from vllm.usage.usage_lib import UsageContext from vllm.sequence import MultiModalData +from vllm.usage.usage_lib import UsageContext logger = init_logger(__name__) ENGINE_ITERATION_TIMEOUT_S = int( diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 61f15acd71c8..d6fc9bd89b20 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -9,11 +9,11 @@ from vllm.core.scheduler import Scheduler, SchedulerOutputs from vllm.engine.arg_utils import EngineArgs from vllm.engine.metrics import StatLogger, Stats -from vllm.model_executor import get_architecture from vllm.engine.ray_utils import initialize_ray_cluster from vllm.executor.executor_base import ExecutorBase from vllm.logger import init_logger from vllm.lora.request import LoRARequest +from vllm.model_executor import get_architecture from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.sequence import (MultiModalData, SamplerOutput, Sequence, @@ -22,9 +22,9 @@ from vllm.transformers_utils.detokenizer import Detokenizer from vllm.transformers_utils.tokenizer_group import (BaseTokenizerGroup, get_tokenizer_group) -from vllm.utils import Counter from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled, usage_message) +from vllm.utils import Counter logger = init_logger(__name__) _LOCAL_LOGGING_INTERVAL_SEC = 5 diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index 7a3f3b591b8d..2a47eae112c1 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -18,8 +18,8 @@ from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.sampling_params import SamplingParams -from vllm.utils import random_uuid from vllm.usage.usage_lib import UsageContext +from vllm.utils import random_uuid TIMEOUT_KEEP_ALIVE = 5 # seconds. app = FastAPI() diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index a06ca7b5739a..5777e8179a1c 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -10,8 +10,8 @@ from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.sequence import MultiModalData -from vllm.utils import Counter from vllm.usage.usage_lib import UsageContext +from vllm.utils import Counter class LLM: diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 3a3146a18e51..e550943c8872 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -21,8 +21,8 @@ CompletionRequest, ErrorResponse) from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion -from vllm.usage.usage_lib import UsageContext from vllm.logger import init_logger +from vllm.usage.usage_lib import UsageContext TIMEOUT_KEEP_ALIVE = 5 # seconds diff --git a/vllm/model_executor/model_loader.py b/vllm/model_executor/model_loader.py index 8e345f8fe787..93f2867a84f0 100644 --- a/vllm/model_executor/model_loader.py +++ b/vllm/model_executor/model_loader.py @@ -1,6 +1,6 @@ """Utilities for selecting and loading models.""" import contextlib -from typing import Type, Tuple +from typing import Tuple, Type import torch import torch.nn as nn diff --git a/vllm/model_executor/neuron_model_loader.py b/vllm/model_executor/neuron_model_loader.py index 59568da15d4c..e71968d77804 100644 --- a/vllm/model_executor/neuron_model_loader.py +++ b/vllm/model_executor/neuron_model_loader.py @@ -1,7 +1,7 @@ """Utilities for selecting and loading neuron models.""" import importlib import os -from typing import Optional, Type, Tuple +from typing import Optional, Tuple, Type import torch import torch.nn as nn From 4f047435c5358bc02e30048222293b62c737b5f7 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Thu, 28 Mar 2024 22:02:20 +0000 Subject: [PATCH 39/47] finish all code level functionality --- Dockerfile | 6 +++--- vllm/engine/llm_engine.py | 4 ++-- vllm/model_executor/model_loader.py | 4 ++-- vllm/usage/usage_lib.py | 28 +++++++++++++++++++++++++++- 4 files changed, 34 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 20bbff34b7fc..d9ed7ff19e47 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,8 +38,6 @@ RUN --mount=type=cache,target=/root/.cache/pip \ # copy input files COPY csrc csrc COPY setup.py setup.py -COPY cmake cmake -COPY CMakeLists.txt CMakeLists.txt COPY requirements.txt requirements.txt COPY pyproject.toml pyproject.toml COPY vllm/__init__.py vllm/__init__.py @@ -97,7 +95,7 @@ RUN --mount=type=cache,target=/root/.cache/pip VLLM_USE_PRECOMPILED=1 pip instal #################### RUNTIME BASE IMAGE #################### # We used base cuda image because pytorch installs its own cuda libraries. -# However pynccl depends on cuda libraries so we had to switch to the runtime image +# However cupy depends on cuda libraries so we had to switch to the runtime image # In the future it would be nice to get a container with pytorch and cuda without duplicating cuda FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS vllm-base @@ -127,5 +125,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ COPY --from=build /workspace/vllm/*.so /workspace/vllm/ COPY vllm vllm +ENV VLLM_USAGE_SOURCE production-docker-image + ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] #################### OPENAI API SERVER #################### diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index d6fc9bd89b20..ef3287e562b4 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -13,7 +13,7 @@ from vllm.executor.executor_base import ExecutorBase from vllm.logger import init_logger from vllm.lora.request import LoRARequest -from vllm.model_executor import get_architecture +from vllm.model_executor.model_loader import get_architecture_class_name from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.sequence import (MultiModalData, SamplerOutput, Sequence, @@ -116,7 +116,7 @@ def __init__( #If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): usage_message.report_usage( - get_architecture(model_config, device_config), + get_architecture_class_name(model_config), usage_context, extra_kvs={ # Common configuration diff --git a/vllm/model_executor/model_loader.py b/vllm/model_executor/model_loader.py index 93f2867a84f0..2745dbd89ab0 100644 --- a/vllm/model_executor/model_loader.py +++ b/vllm/model_executor/model_loader.py @@ -43,7 +43,7 @@ def _get_model_architecture( f"Supported architectures: {ModelRegistry.get_supported_archs()}") -def get_architecture(model_config: ModelConfig) -> str: +def get_architecture_class_name(model_config: ModelConfig) -> str: return _get_model_architecture(model_config)[1] @@ -51,7 +51,7 @@ def get_model(model_config: ModelConfig, device_config: DeviceConfig, **kwargs) -> nn.Module: lora_config = kwargs.get("lora_config", None) vision_language_config = kwargs.get("vision_language_config", None) - model_class = _get_model_architecture(model_config) + model_class = _get_model_architecture(model_config)[0] # Get the (maybe quantized) linear method. linear_method = None diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 843f88fff779..3c0240b0234f 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -7,6 +7,8 @@ from pathlib import Path from threading import Thread from typing import Dict, Optional +import time +from uuid import uuid4 import cpuinfo import pkg_resources @@ -97,6 +99,8 @@ def __init__(self) -> None: # NOTE: vLLM's server _only_ support flat KV pair. # Do not use nested fields. + self.uuid = str(uuid4()) + # Environment Information self.provider: Optional[str] = None self.num_cpu: Optional[int] = None @@ -122,11 +126,17 @@ def report_usage(self, model_architecture: str, usage_context: UsageContext, extra_kvs: Dict[str, any] = None) -> None: - t = Thread(target=self._report_usage_once, + t = Thread(target=self._report_usage_worker, args=(model_architecture, usage_context, extra_kvs or {}), daemon=True) t.start() + def _report_usage_worker(self, model_architecture: str, + usage_context: UsageContext, + extra_kvs: Dict[str, any]) -> None: + self._report_usage_once(model_architecture, usage_context, extra_kvs) + self._report_continous_usage() + def _report_usage_once(self, model_architecture: str, usage_context: UsageContext, extra_kvs: Dict[str, any]) -> None: @@ -164,6 +174,22 @@ def _report_usage_once(self, model_architecture: str, data.update(extra_kvs) self._write_to_file(data) + self._send_to_server(data) + + def _report_continous_usage(self): + """Report usage every 10 minutes. + + This helps us to collect more data points for uptime of vLLM usages. + This function can also help send over performance metrics over time. + """ + while True: + time.sleep(600) + data = {"uuid": self.uuid, "log_time": _get_current_timestamp_ns()} + + self._write_to_file(data) + self._send_to_server(data) + + def _send_to_server(self, data): try: requests.post(_USAGE_STATS_SERVER, json=data) except requests.exceptions.RequestException: From f4bf86263ca5cda84e387db1873573f32ffeb974 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Thu, 28 Mar 2024 22:34:37 +0000 Subject: [PATCH 40/47] add wip doc --- docs/source/serving/usage_stats.md | 32 ++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 docs/source/serving/usage_stats.md diff --git a/docs/source/serving/usage_stats.md b/docs/source/serving/usage_stats.md new file mode 100644 index 000000000000..1c2bbbd22152 --- /dev/null +++ b/docs/source/serving/usage_stats.md @@ -0,0 +1,32 @@ +# Usage Stats Collection + +```json +{ + "uuid": "fbe880e9-084d-4cab-a395-8984c50f1109", + "provider": "GCP", + "num_cpu": 24, + "cpu_type": "Intel(R) Xeon(R) CPU @ 2.20GHz", + "cpu_family_model_stepping": "6,85,7", + "total_memory": 101261135872, + "architecture": "x86_64", + "platform": "Linux-5.10.0-28-cloud-amd64-x86_64-with-glibc2.31", + "gpu_count": 2, + "gpu_type": "NVIDIA L4", + "gpu_memory_per_device": 23580639232, + "model_architecture": "OPTForCausalLM", + "vllm_version": "0.3.2+cu123", + "context": "LLM_CLASS", + "log_time": 1711663373492490000, + "source": "production", + "dtype": "torch.float16", + "tensor_parallel_size": 1, + "block_size": 16, + "gpu_memory_utilization": 0.9, + "quantization": null, + "kv_cache_dtype": "auto", + "enable_lora": false, + "enable_prefix_caching": false, + "enforce_eager": false, + "disable_custom_all_reduce": true +} +``` \ No newline at end of file From 200678835bb73fbbe1f0bc06a2d1017d92fed2e5 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Thu, 28 Mar 2024 22:48:45 +0000 Subject: [PATCH 41/47] revert some fixes --- Dockerfile | 2 +- vllm/model_executor/neuron_model_loader.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index d9ed7ff19e47..122f00f7b490 100644 --- a/Dockerfile +++ b/Dockerfile @@ -95,7 +95,7 @@ RUN --mount=type=cache,target=/root/.cache/pip VLLM_USE_PRECOMPILED=1 pip instal #################### RUNTIME BASE IMAGE #################### # We used base cuda image because pytorch installs its own cuda libraries. -# However cupy depends on cuda libraries so we had to switch to the runtime image +# However nccl depends on cuda libraries so we had to switch to the runtime image # In the future it would be nice to get a container with pytorch and cuda without duplicating cuda FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS vllm-base diff --git a/vllm/model_executor/neuron_model_loader.py b/vllm/model_executor/neuron_model_loader.py index e71968d77804..43d17ad373b8 100644 --- a/vllm/model_executor/neuron_model_loader.py +++ b/vllm/model_executor/neuron_model_loader.py @@ -1,7 +1,7 @@ """Utilities for selecting and loading neuron models.""" import importlib import os -from typing import Optional, Tuple, Type +from typing import Optional, Type import torch import torch.nn as nn @@ -96,8 +96,7 @@ def load_weights(self, model_name_or_path: str, **kwargs): self.model.to_neuron() -def _get_model_architecture( - config: PretrainedConfig) -> Tuple[Type[nn.Module], str]: +def _get_model_architecture(config: PretrainedConfig) -> Type[nn.Module]: architectures = getattr(config, "architectures", []) for arch in architectures: if arch in _NEURON_SUPPORTED_MODELS: From db715c8183f9509329f67d9c6ea8033746ae51cf Mon Sep 17 00:00:00 2001 From: simon-mo Date: Thu, 28 Mar 2024 22:50:46 +0000 Subject: [PATCH 42/47] more fixes --- Dockerfile | 2 +- vllm/engine/async_llm_engine.py | 22 ++++++++++++---------- vllm/engine/llm_engine.py | 12 +++++++----- 3 files changed, 20 insertions(+), 16 deletions(-) diff --git a/Dockerfile b/Dockerfile index 122f00f7b490..8e9e46a7aba8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -95,7 +95,7 @@ RUN --mount=type=cache,target=/root/.cache/pip VLLM_USE_PRECOMPILED=1 pip instal #################### RUNTIME BASE IMAGE #################### # We used base cuda image because pytorch installs its own cuda libraries. -# However nccl depends on cuda libraries so we had to switch to the runtime image +# However pynccl depends on cuda libraries so we had to switch to the runtime image # In the future it would be nice to get a container with pytorch and cuda without duplicating cuda FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS vllm-base diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index ece4a9aed12c..2e6f5d69a042 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -324,7 +324,7 @@ def from_engine_args( cls, engine_args: AsyncEngineArgs, start_engine_loop: bool = True, - usage_context: UsageContext = UsageContext.ENGINE_CONTEXT + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, ) -> "AsyncLLMEngine": """Creates an async LLM engine from the engine arguments.""" # Create the engine configs. @@ -345,15 +345,17 @@ def from_engine_args( from vllm.executor.gpu_executor import GPUExecutorAsync executor_class = GPUExecutorAsync # Create the async LLM engine. - engine = cls(parallel_config.worker_use_ray, - engine_args.engine_use_ray, - *engine_configs, - executor_class, - log_requests=not engine_args.disable_log_requests, - log_stats=not engine_args.disable_log_stats, - max_log_len=engine_args.max_log_len, - start_engine_loop=start_engine_loop, - usage_context=usage_context) + engine = cls( + parallel_config.worker_use_ray, + engine_args.engine_use_ray, + *engine_configs, + executor_class, + log_requests=not engine_args.disable_log_requests, + log_stats=not engine_args.disable_log_stats, + max_log_len=engine_args.max_log_len, + start_engine_loop=start_engine_loop, + usage_context=usage_context, + ) return engine @property diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index ef3287e562b4..581a0c5dccd2 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -164,7 +164,7 @@ def __init__( def from_engine_args( cls, engine_args: EngineArgs, - usage_context: UsageContext = UsageContext.ENGINE_CONTEXT + usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, ) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. @@ -187,10 +187,12 @@ def from_engine_args( executor_class = GPUExecutor # Create the LLM engine. - engine = cls(*engine_configs, - executor_class=executor_class, - log_stats=not engine_args.disable_log_stats, - usage_context=usage_context) + engine = cls( + *engine_configs, + executor_class=executor_class, + log_stats=not engine_args.disable_log_stats, + usage_context=usage_context, + ) return engine def __reduce__(self): From 2c1e5573b3af9b0d4ab1d34130425e0037015fb9 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Thu, 28 Mar 2024 23:05:17 +0000 Subject: [PATCH 43/47] finish doc, readability pass --- docs/source/serving/usage_stats.md | 28 +++++++++++++++++++++++++++- vllm/engine/llm_engine.py | 4 +++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/docs/source/serving/usage_stats.md b/docs/source/serving/usage_stats.md index 1c2bbbd22152..fa4c6668460c 100644 --- a/docs/source/serving/usage_stats.md +++ b/docs/source/serving/usage_stats.md @@ -1,5 +1,13 @@ # Usage Stats Collection +vLLM collects usage data by default. This data is used to help engineers working on the project to better understand which hardware and model configuration is widely used, so we can prioritize our attention to the workload that matters. The data is collected is anonymous, transparent, and does not contain any sensitive information. The collected data is also going to be publically released so that the community can benefit from the insights. + +## What data is collected? + +You can see the up to date list of data collected by vLLM in the [usage_lib.py](https://github.com/vllm-project/vllm/blob/main/vllm/usage/usage_lib.py). + +Here is an example as of v0.4.0: + ```json { "uuid": "fbe880e9-084d-4cab-a395-8984c50f1109", @@ -29,4 +37,22 @@ "enforce_eager": false, "disable_custom_all_reduce": true } -``` \ No newline at end of file +``` + +You can preview the data being collected by running the following command: + +```bash +tail ~/.config/vllm/usage_stats.json +``` + +## Opt-out of Usage Stats Collection + +You can opt-out the collection through either the existence of environment variable (`VLLM_NO_USAGE_STATS` or `DO_NOT_TRACK`) +or the existence of the file `~/.config/vllm/do_not_track`. + +```bash +# any of the following way can disable the usage stats collection +export VLLM_NO_USAGE_STATS=1 +export DO_NOT_TRACK=1 +mkdir -p ~/.config/vllm && touch ~/.config/vllm/do_not_track +``` diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 581a0c5dccd2..3a9e20f42b39 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -113,7 +113,7 @@ def __init__( device_config, lora_config, vision_language_config) - #If usage stat is enabled, collect relevant info. + # If usage stat is enabled, collect relevant info. if is_usage_stats_enabled(): usage_message.report_usage( get_architecture_class_name(model_config), @@ -128,11 +128,13 @@ def __init__( cache_config.block_size, "gpu_memory_utilization": cache_config.gpu_memory_utilization, + # Quantization "quantization": model_config.quantization, "kv_cache_dtype": cache_config.cache_dtype, + # Feature flags "enable_lora": bool(lora_config), From 42e66b8e4692bdb534fe5b8b5c81ff160d6c31eb Mon Sep 17 00:00:00 2001 From: simon-mo Date: Thu, 28 Mar 2024 23:07:09 +0000 Subject: [PATCH 44/47] edit pass --- docs/source/serving/usage_stats.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/source/serving/usage_stats.md b/docs/source/serving/usage_stats.md index fa4c6668460c..a1e4b1c38aca 100644 --- a/docs/source/serving/usage_stats.md +++ b/docs/source/serving/usage_stats.md @@ -1,6 +1,6 @@ # Usage Stats Collection -vLLM collects usage data by default. This data is used to help engineers working on the project to better understand which hardware and model configuration is widely used, so we can prioritize our attention to the workload that matters. The data is collected is anonymous, transparent, and does not contain any sensitive information. The collected data is also going to be publically released so that the community can benefit from the insights. +vLLM collects anonymous usage data by default to help the engineering team better understand which hardware and model configurations are widely used. This data allows them to prioritize their efforts on the most common workloads. The collected data is transparent, does not contain any sensitive information, and will be publicly released for the community's benefit. ## What data is collected? @@ -39,7 +39,7 @@ Here is an example as of v0.4.0: } ``` -You can preview the data being collected by running the following command: +You can preview the collected data by running the following command: ```bash tail ~/.config/vllm/usage_stats.json @@ -47,11 +47,10 @@ tail ~/.config/vllm/usage_stats.json ## Opt-out of Usage Stats Collection -You can opt-out the collection through either the existence of environment variable (`VLLM_NO_USAGE_STATS` or `DO_NOT_TRACK`) -or the existence of the file `~/.config/vllm/do_not_track`. +You can opt-out of usage stats collection by setting the VLLM_NO_USAGE_STATS or DO_NOT_TRACK environment variable, or by creating a ~/.config/vllm/do_not_track file: ```bash -# any of the following way can disable the usage stats collection +# Any of the following methods can disable usage stats collection export VLLM_NO_USAGE_STATS=1 export DO_NOT_TRACK=1 mkdir -p ~/.config/vllm && touch ~/.config/vllm/do_not_track From a4e57426ecf0f7b0447a61cb90e2987fac1b1149 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Thu, 28 Mar 2024 23:22:53 +0000 Subject: [PATCH 45/47] fix doc and isort --- docs/source/index.rst | 1 + vllm/usage/usage_lib.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index 72081588b1bc..5196ef062dc1 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -73,6 +73,7 @@ Documentation serving/deploying_with_docker serving/distributed_serving serving/metrics + serving/usage_stats serving/integrations .. toctree:: diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 3c0240b0234f..8248595ee363 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -3,11 +3,11 @@ import logging import os import platform +import time from enum import Enum from pathlib import Path from threading import Thread from typing import Dict, Optional -import time from uuid import uuid4 import cpuinfo From ba63b44b264d136a04165b89cc78cea86fbbfdf8 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Fri, 29 Mar 2024 03:17:37 +0000 Subject: [PATCH 46/47] bad merge --- Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile b/Dockerfile index 8e9e46a7aba8..48da2994c858 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,6 +38,8 @@ RUN --mount=type=cache,target=/root/.cache/pip \ # copy input files COPY csrc csrc COPY setup.py setup.py +COPY cmake cmake +COPY CMakeLists.txt CMakeLists.txt COPY requirements.txt requirements.txt COPY pyproject.toml pyproject.toml COPY vllm/__init__.py vllm/__init__.py From 58fb78de95aa81e3b26de2feac0629a684c09b75 Mon Sep 17 00:00:00 2001 From: simon-mo Date: Fri, 29 Mar 2024 03:34:45 +0000 Subject: [PATCH 47/47] add to amd req txt --- requirements-neuron.txt | 3 +++ requirements-rocm.txt | 2 ++ 2 files changed, 5 insertions(+) diff --git a/requirements-neuron.txt b/requirements-neuron.txt index 858472c20ca8..6828bd4fd1fc 100644 --- a/requirements-neuron.txt +++ b/requirements-neuron.txt @@ -7,3 +7,6 @@ fastapi uvicorn[standard] pydantic >= 2.0 # Required for OpenAI server. prometheus_client >= 0.18.0 +requests +psutil +py-cpuinfo \ No newline at end of file diff --git a/requirements-rocm.txt b/requirements-rocm.txt index 6acf70695cef..cea1183edc0d 100644 --- a/requirements-rocm.txt +++ b/requirements-rocm.txt @@ -2,6 +2,8 @@ cmake>=3.21 ninja # For faster builds. typing-extensions>=4.8.0 starlette +requests +py-cpuinfo psutil ray >= 2.9 sentencepiece # Required for LLaMA tokenizer.