Skip to content

Commit ef978fe

Browse files
authored
Port metrics from aioprometheus to prometheus_client (vllm-project#2730)
1 parent f7c1234 commit ef978fe

File tree

9 files changed

+133
-87
lines changed

9 files changed

+133
-87
lines changed

docs/source/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@
7272

7373
# Mock out external dependencies here.
7474
autodoc_mock_imports = [
75-
"torch", "transformers", "psutil", "aioprometheus", "sentencepiece",
75+
"torch", "transformers", "psutil", "prometheus_client", "sentencepiece",
7676
"vllm.cuda_utils", "vllm._C"
7777
]
7878

requirements-neuron.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,4 @@ neuronx-cc
66
fastapi
77
uvicorn[standard]
88
pydantic >= 2.0 # Required for OpenAI server.
9-
aioprometheus[starlette]
9+
prometheus_client

requirements-rocm.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,4 @@ transformers >= 4.38.0 # Required for Gemma.
1010
fastapi
1111
uvicorn[standard]
1212
pydantic >= 2.0 # Required for OpenAI server.
13-
aioprometheus[starlette]
13+
prometheus_client

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ xformers == 0.0.23.post1 # Required for CUDA 12.1.
99
fastapi
1010
uvicorn[standard]
1111
pydantic >= 2.0 # Required for OpenAI server.
12-
aioprometheus[starlette]
12+
prometheus_client
1313
pynvml == 11.5.0
1414
triton >= 2.1.0
1515
cupy-cuda12x == 12.1.0 # Required for CUDA graphs. CUDA 11.8 users should install cupy-cuda11x instead.

tests/conftest.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,7 @@ def __init__(
165165
dtype: str = "half",
166166
disable_log_stats: bool = True,
167167
tensor_parallel_size: int = 1,
168+
**kwargs,
168169
) -> None:
169170
self.model = LLM(
170171
model=model_name,
@@ -174,6 +175,7 @@ def __init__(
174175
swap_space=0,
175176
disable_log_stats=disable_log_stats,
176177
tensor_parallel_size=tensor_parallel_size,
178+
**kwargs,
177179
)
178180

179181
def generate(

tests/metrics/test_metrics.py

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import pytest
2-
import vllm.engine.metrics
32

43
MODELS = [
54
"facebook/opt-125m",
@@ -16,10 +15,10 @@ def test_metric_counter_prompt_tokens(
1615
dtype: str,
1716
max_tokens: int,
1817
) -> None:
19-
# Reset metric
20-
vllm.engine.metrics.counter_prompt_tokens.set_value({}, 0)
21-
22-
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
18+
vllm_model = vllm_runner(model,
19+
dtype=dtype,
20+
disable_log_stats=False,
21+
gpu_memory_utilization=0.4)
2322
tokenizer = vllm_model.model.get_tokenizer()
2423
prompt_token_counts = [len(tokenizer.encode(p)) for p in example_prompts]
2524
# This test needs at least 2 prompts in a batch of different lengths to verify their token count is correct despite padding.
@@ -29,7 +28,9 @@ def test_metric_counter_prompt_tokens(
2928
vllm_prompt_token_count = sum(prompt_token_counts)
3029

3130
_ = vllm_model.generate_greedy(example_prompts, max_tokens)
32-
metric_count = vllm.engine.metrics.counter_prompt_tokens.get_value({})
31+
stat_logger = vllm_model.model.llm_engine.stat_logger
32+
metric_count = stat_logger.metrics.counter_prompt_tokens.labels(
33+
**stat_logger.labels)._value.get()
3334

3435
assert vllm_prompt_token_count == metric_count, (
3536
f"prompt token count: {vllm_prompt_token_count!r}\nmetric: {metric_count!r}"
@@ -46,13 +47,15 @@ def test_metric_counter_generation_tokens(
4647
dtype: str,
4748
max_tokens: int,
4849
) -> None:
49-
# Reset metric
50-
vllm.engine.metrics.counter_generation_tokens.set_value({}, 0)
51-
52-
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
50+
vllm_model = vllm_runner(model,
51+
dtype=dtype,
52+
disable_log_stats=False,
53+
gpu_memory_utilization=0.4)
5354
vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
5455
tokenizer = vllm_model.model.get_tokenizer()
55-
metric_count = vllm.engine.metrics.counter_generation_tokens.get_value({})
56+
stat_logger = vllm_model.model.llm_engine.stat_logger
57+
metric_count = stat_logger.metrics.counter_generation_tokens.labels(
58+
**stat_logger.labels)._value.get()
5659
vllm_generation_count = 0
5760
for i in range(len(example_prompts)):
5861
vllm_output_ids, vllm_output_str = vllm_outputs[i]

vllm/engine/llm_engine.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,8 @@ def __init__(
128128
# Metric Logging.
129129
if self.log_stats:
130130
self.stat_logger = StatLogger(
131-
local_interval=_LOCAL_LOGGING_INTERVAL_SEC)
131+
local_interval=_LOCAL_LOGGING_INTERVAL_SEC,
132+
labels=dict(model_name=model_config.model))
132133

133134
self.forward_dag = None
134135
if USE_RAY_COMPILED_DAG:

vllm/engine/metrics.py

Lines changed: 107 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -1,66 +1,94 @@
11
from vllm.logger import init_logger
2-
from aioprometheus import Counter, Gauge, Histogram
2+
from prometheus_client import Counter, Gauge, Histogram, REGISTRY, disable_created_metrics
33

44
import time
55
import numpy as np
6-
from typing import List
6+
from typing import Dict, List
77
from dataclasses import dataclass
88

99
logger = init_logger(__name__)
1010

11-
labels = {}
12-
13-
14-
def add_global_metrics_labels(**kwargs):
15-
labels.update(kwargs)
16-
11+
disable_created_metrics()
1712

1813
# The begin-* and end* here are used by the documentation generator
1914
# to extract the metrics definitions.
2015

16+
2117
# begin-metrics-definitions
22-
gauge_avg_prompt_throughput = Gauge("vllm:avg_prompt_throughput_toks_per_s",
23-
"Average prefill throughput in tokens/s.")
24-
gauge_avg_generation_throughput = Gauge(
25-
"vllm:avg_generation_throughput_toks_per_s",
26-
"Average generation throughput in tokens/s.")
27-
counter_prompt_tokens = Counter("vllm:prompt_tokens_total",
28-
"Number of prefill tokens processed.")
29-
counter_generation_tokens = Counter("vllm:generation_tokens_total",
30-
"Number of generation tokens processed.")
31-
32-
gauge_scheduler_running = Gauge(
33-
"vllm:num_requests_running",
34-
"Number of requests currently running on GPU.")
35-
gauge_scheduler_swapped = Gauge("vllm:num_requests_swapped",
36-
"Number of requests swapped to CPU.")
37-
gauge_scheduler_waiting = Gauge("vllm:num_requests_waiting",
38-
"Number of requests waiting to be processed.")
39-
40-
gauge_gpu_cache_usage = Gauge(
41-
"vllm:gpu_cache_usage_perc",
42-
"GPU KV-cache usage. 1 means 100 percent usage.")
43-
gauge_cpu_cache_usage = Gauge(
44-
"vllm:cpu_cache_usage_perc",
45-
"CPU KV-cache usage. 1 means 100 percent usage.")
46-
47-
histogram_time_to_first_token = Histogram(
48-
"vllm:time_to_first_token_seconds",
49-
"Histogram of time to first token in seconds.",
50-
buckets=[
51-
0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5, 0.75, 1.0,
52-
2.5, 5.0, 7.5, 10.0
53-
])
54-
histogram_time_per_output_tokens = Histogram(
55-
"vllm:time_per_output_token_seconds",
56-
"Histogram of time per output token in seconds.",
57-
buckets=[
58-
0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 2.5
59-
])
60-
histogram_e2e_request_latency = Histogram(
61-
"vllm:e2e_request_latency_seconds",
62-
"Histogram of end to end request latency in seconds.",
63-
buckets=[1.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0])
18+
class Metrics:
19+
20+
def __init__(self, labelnames: List[str]):
21+
# Unregister any existing vLLM collectors
22+
for collector in list(REGISTRY._collector_to_names):
23+
if hasattr(collector, "_name") and "vllm" in collector._name:
24+
REGISTRY.unregister(collector)
25+
26+
# System stats
27+
self.gauge_scheduler_running = Gauge(
28+
name="vllm:num_requests_running",
29+
documentation="Number of requests currently running on GPU.",
30+
labelnames=labelnames)
31+
self.gauge_scheduler_swapped = Gauge(
32+
name="vllm:num_requests_swapped",
33+
documentation="Number of requests swapped to CPU.",
34+
labelnames=labelnames)
35+
self.gauge_scheduler_waiting = Gauge(
36+
name="vllm:num_requests_waiting",
37+
documentation="Number of requests waiting to be processed.",
38+
labelnames=labelnames)
39+
self.gauge_gpu_cache_usage = Gauge(
40+
name="vllm:gpu_cache_usage_perc",
41+
documentation="GPU KV-cache usage. 1 means 100 percent usage.",
42+
labelnames=labelnames)
43+
self.gauge_cpu_cache_usage = Gauge(
44+
name="vllm:cpu_cache_usage_perc",
45+
documentation="CPU KV-cache usage. 1 means 100 percent usage.",
46+
labelnames=labelnames)
47+
48+
# Raw stats from last model iteration
49+
self.counter_prompt_tokens = Counter(
50+
name="vllm:prompt_tokens_total",
51+
documentation="Number of prefill tokens processed.",
52+
labelnames=labelnames)
53+
self.counter_generation_tokens = Counter(
54+
name="vllm:generation_tokens_total",
55+
documentation="Number of generation tokens processed.",
56+
labelnames=labelnames)
57+
self.histogram_time_to_first_token = Histogram(
58+
name="vllm:time_to_first_token_seconds",
59+
documentation="Histogram of time to first token in seconds.",
60+
labelnames=labelnames,
61+
buckets=[
62+
0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5,
63+
0.75, 1.0, 2.5, 5.0, 7.5, 10.0
64+
])
65+
self.histogram_time_per_output_token = Histogram(
66+
name="vllm:time_per_output_token_seconds",
67+
documentation="Histogram of time per output token in seconds.",
68+
labelnames=labelnames,
69+
buckets=[
70+
0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75,
71+
1.0, 2.5
72+
])
73+
self.histogram_e2e_request_latency = Histogram(
74+
name="vllm:e2e_request_latency_seconds",
75+
documentation="Histogram of end to end request latency in seconds.",
76+
labelnames=labelnames,
77+
buckets=[1.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0])
78+
79+
# Legacy metrics
80+
self.gauge_avg_prompt_throughput = Gauge(
81+
name="vllm:avg_prompt_throughput_toks_per_s",
82+
documentation="Average prefill throughput in tokens/s.",
83+
labelnames=labelnames,
84+
)
85+
self.gauge_avg_generation_throughput = Gauge(
86+
name="vllm:avg_generation_throughput_toks_per_s",
87+
documentation="Average generation throughput in tokens/s.",
88+
labelnames=labelnames,
89+
)
90+
91+
6492
# end-metrics-definitions
6593

6694

@@ -87,7 +115,7 @@ class Stats:
87115
class StatLogger:
88116
"""StatLogger is used LLMEngine to log to Promethus and Stdout."""
89117

90-
def __init__(self, local_interval: float) -> None:
118+
def __init__(self, local_interval: float, labels: Dict[str, str]) -> None:
91119
# Metadata for logging locally.
92120
self.last_local_log = time.monotonic()
93121
self.local_interval = local_interval
@@ -96,6 +124,10 @@ def __init__(self, local_interval: float) -> None:
96124
self.num_prompt_tokens: List[int] = []
97125
self.num_generation_tokens: List[int] = []
98126

127+
# Prometheus metrics
128+
self.labels = labels
129+
self.metrics = Metrics(labelnames=list(labels.keys()))
130+
99131
def _get_throughput(self, tracked_stats: List[int], now: float) -> float:
100132
return float(np.sum(tracked_stats) / (now - self.last_local_log))
101133

@@ -105,23 +137,33 @@ def _local_interval_elapsed(self, now: float) -> bool:
105137

106138
def _log_prometheus(self, stats: Stats) -> None:
107139
# Set system stat gauges.
108-
gauge_scheduler_running.set(labels, stats.num_running)
109-
gauge_scheduler_swapped.set(labels, stats.num_swapped)
110-
gauge_scheduler_waiting.set(labels, stats.num_waiting)
111-
gauge_gpu_cache_usage.set(labels, stats.gpu_cache_usage)
112-
gauge_cpu_cache_usage.set(labels, stats.cpu_cache_usage)
140+
self.metrics.gauge_scheduler_running.labels(**self.labels).set(
141+
stats.num_running)
142+
self.metrics.gauge_scheduler_swapped.labels(**self.labels).set(
143+
stats.num_swapped)
144+
self.metrics.gauge_scheduler_waiting.labels(**self.labels).set(
145+
stats.num_waiting)
146+
self.metrics.gauge_gpu_cache_usage.labels(**self.labels).set(
147+
stats.gpu_cache_usage)
148+
self.metrics.gauge_cpu_cache_usage.labels(**self.labels).set(
149+
stats.cpu_cache_usage)
113150

114151
# Add to token counters.
115-
counter_prompt_tokens.add(labels, stats.num_prompt_tokens)
116-
counter_generation_tokens.add(labels, stats.num_generation_tokens)
152+
self.metrics.counter_prompt_tokens.labels(**self.labels).inc(
153+
stats.num_prompt_tokens)
154+
self.metrics.counter_generation_tokens.labels(**self.labels).inc(
155+
stats.num_generation_tokens)
117156

118157
# Observe request level latencies in histograms.
119158
for ttft in stats.time_to_first_tokens:
120-
histogram_time_to_first_token.observe(labels, ttft)
159+
self.metrics.histogram_time_to_first_token.labels(
160+
**self.labels).observe(ttft)
121161
for tpot in stats.time_per_output_tokens:
122-
histogram_time_per_output_tokens.observe(labels, tpot)
162+
self.metrics.histogram_time_per_output_token.labels(
163+
**self.labels).observe(tpot)
123164
for e2e in stats.time_e2e_requests:
124-
histogram_e2e_request_latency.observe(labels, e2e)
165+
self.metrics.histogram_e2e_request_latency.labels(
166+
**self.labels).observe(e2e)
125167

126168
def _log_prometheus_interval(self, prompt_throughput: float,
127169
generation_throughput: float) -> None:
@@ -130,8 +172,10 @@ def _log_prometheus_interval(self, prompt_throughput: float,
130172
# Moving forward, we should use counters like counter_prompt_tokens, counter_generation_tokens
131173
# Which log raw data and calculate summaries using rate() on the grafana/prometheus side.
132174
# See https://github.com/vllm-project/vllm/pull/2316#discussion_r1464204666
133-
gauge_avg_prompt_throughput.set(labels, prompt_throughput)
134-
gauge_avg_generation_throughput.set(labels, generation_throughput)
175+
self.metrics.gauge_avg_prompt_throughput.labels(
176+
**self.labels).set(prompt_throughput)
177+
self.metrics.gauge_avg_generation_throughput.labels(
178+
**self.labels).set(generation_throughput)
135179

136180
def log(self, stats: Stats) -> None:
137181
"""Called by LLMEngine.

vllm/entrypoints/openai/api_server.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
import importlib
77
import inspect
88

9-
from aioprometheus import MetricsMiddleware
10-
from aioprometheus.asgi.starlette import metrics
9+
from prometheus_client import make_asgi_app
1110
import fastapi
1211
import uvicorn
1312
from http import HTTPStatus
@@ -18,7 +17,6 @@
1817

1918
from vllm.engine.arg_utils import AsyncEngineArgs
2019
from vllm.engine.async_llm_engine import AsyncLLMEngine
21-
from vllm.engine.metrics import add_global_metrics_labels
2220
from vllm.entrypoints.openai.protocol import CompletionRequest, ChatCompletionRequest, ErrorResponse
2321
from vllm.logger import init_logger
2422
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
@@ -141,8 +139,9 @@ def parse_args():
141139
return parser.parse_args()
142140

143141

144-
app.add_middleware(MetricsMiddleware) # Trace HTTP server metrics
145-
app.add_route("/metrics", metrics) # Exposes HTTP metrics
142+
# Add prometheus asgi middleware to route /metrics requests
143+
metrics_app = make_asgi_app()
144+
app.mount("/metrics", metrics_app)
146145

147146

148147
@app.exception_handler(RequestValidationError)
@@ -242,9 +241,6 @@ async def authentication(request: Request, call_next):
242241
openai_serving_completion = OpenAIServingCompletion(
243242
engine, served_model, args.lora_modules)
244243

245-
# Register labels for metrics
246-
add_global_metrics_labels(model_name=engine_args.model)
247-
248244
app.root_path = args.root_path
249245
uvicorn.run(app,
250246
host=args.host,

0 commit comments

Comments
 (0)