-
Notifications
You must be signed in to change notification settings - Fork 521
[Docs]: update metric names for clarity and consistency #1822
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -36,9 +36,9 @@ vllm_num_requests_running{model_name="meta-llama/Llama-2-7b-chat-hf"} 2.0 | |
| # HELP vllm_num_requests_waiting Number of requests waiting to be processed. | ||
| # TYPE vllm_num_requests_waiting gauge | ||
| vllm_num_requests_waiting{model_name="meta-llama/Llama-2-7b-chat-hf"} 3.0 | ||
| # HELP vllm_gpu_cache_usage_perc GPU KV-cache usage. 1.0 means 100 percent usage. | ||
| # TYPE vllm_gpu_cache_usage_perc gauge | ||
| vllm_gpu_cache_usage_perc 0.75 | ||
| # HELP vllm_kv_cache_usage_perc GPU KV-cache usage. 1.0 means 100 percent usage. | ||
| # TYPE vllm_kv_cache_usage_perc gauge | ||
| vllm_kv_cache_usage_perc 0.75 | ||
| # HELP vllm_time_to_first_token_seconds Histogram of time to first token in seconds. | ||
| # TYPE vllm_time_to_first_token_seconds histogram | ||
| vllm_time_to_first_token_seconds_bucket{model_name="meta-llama/Llama-2-7b-chat-hf",le="0.001"} 0.0 | ||
|
|
@@ -100,7 +100,7 @@ func setupMockMetrics() { | |
| MetricSource: PodRawMetrics, | ||
| MetricType: MetricType{Raw: Gauge}, | ||
| EngineMetricsNameMapping: map[string]string{ | ||
| "vllm": "vllm_gpu_cache_usage_perc", | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. plz remove this part. We need a fallback mechanism; if we make this change, the value won't be available in versions prior to v0.10.0. This will be supported in this pr #1814
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @googs1025 Addressed your comment, PTAL again |
||
| "vllm": "vllm_kv_cache_usage_perc", | ||
| "sglang": "sglang_cache_usage", | ||
| }, | ||
| Description: "Cache usage percentage", | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -39,7 +39,7 @@ const ( | |||||
| AvgTPOT5mPod = "avg_tpot_pod_5m" | ||||||
| AvgPromptToksPerReq = "avg_prompt_toks_per_req" | ||||||
| AvgGenerationToksPerReq = "avg_generation_toks_per_req" | ||||||
| GPUCacheUsagePerc = "gpu_cache_usage_perc" | ||||||
| GPUCacheUsagePerc = "kv_cache_usage_perc" | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For consistency with the new metric name
Suggested change
|
||||||
| GPUBusyTimeRatio = "gpu_busy_time_ratio" | ||||||
| CPUCacheUsagePerc = "cpu_cache_usage_perc" | ||||||
| EngineUtilization = "engine_utilization" | ||||||
|
|
@@ -304,7 +304,7 @@ var ( | |||||
| Raw: Counter, | ||||||
| }, | ||||||
| EngineMetricsNameMapping: map[string]string{ | ||||||
| "vllm": "vllm:gpu_cache_usage_perc", | ||||||
| "vllm": "vllm:kv_cache_usage_perc", | ||||||
| "sglang": "sglang:token_usage", // Based on https://github.com/sgl-project/sglang/issues/5979 | ||||||
| "xllm": "kv_cache_utilization", | ||||||
| }, | ||||||
|
|
||||||
Uh oh!
There was an error while loading. Please reload this page.