diff --git a/docs/source/conf.py b/docs/source/conf.py index adbe67b21a0c8..5a45c6f9d1e0a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -72,7 +72,7 @@ # Mock out external dependencies here. autodoc_mock_imports = [ - "torch", "transformers", "psutil", "aioprometheus", "sentencepiece", + "torch", "transformers", "psutil", "prometheus_client", "sentencepiece", "vllm.cuda_utils", "vllm._C" ] diff --git a/requirements-neuron.txt b/requirements-neuron.txt index 3f30ed08f037d..36e629add664d 100644 --- a/requirements-neuron.txt +++ b/requirements-neuron.txt @@ -6,4 +6,4 @@ neuronx-cc fastapi uvicorn[standard] pydantic >= 2.0 # Required for OpenAI server. -aioprometheus[starlette] +prometheus_client diff --git a/requirements-rocm.txt b/requirements-rocm.txt index 42b89ae84aa45..e759ba7d028d9 100644 --- a/requirements-rocm.txt +++ b/requirements-rocm.txt @@ -10,4 +10,4 @@ transformers >= 4.38.0 # Required for Gemma. fastapi uvicorn[standard] pydantic >= 2.0 # Required for OpenAI server. -aioprometheus[starlette] +prometheus_client diff --git a/requirements.txt b/requirements.txt index de08bd29beaf9..de93ba6354cda 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ xformers == 0.0.23.post1 # Required for CUDA 12.1. fastapi uvicorn[standard] pydantic >= 2.0 # Required for OpenAI server. -aioprometheus[starlette] +prometheus_client pynvml == 11.5.0 triton >= 2.1.0 cupy-cuda12x == 12.1.0 # Required for CUDA graphs. CUDA 11.8 users should install cupy-cuda11x instead. diff --git a/tests/conftest.py b/tests/conftest.py index 6af9b36b6febe..30a3df89d9f12 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -165,6 +165,7 @@ def __init__( dtype: str = "half", disable_log_stats: bool = True, tensor_parallel_size: int = 1, + **kwargs, ) -> None: self.model = LLM( model=model_name, @@ -174,6 +175,7 @@ def __init__( swap_space=0, disable_log_stats=disable_log_stats, tensor_parallel_size=tensor_parallel_size, + **kwargs, ) def generate( diff --git a/tests/entrypoints/test_openai_server.py b/tests/entrypoints/test_openai_server.py index 3a359502c39d5..29d0e6fd537d5 100644 --- a/tests/entrypoints/test_openai_server.py +++ b/tests/entrypoints/test_openai_server.py @@ -155,15 +155,18 @@ async def test_single_chat_session(server, client: openai.AsyncOpenAI, }] # test single completion - chat_completion = await client.chat.completions.create( - model=model_name, - messages=messages, - max_tokens=10, - ) + chat_completion = await client.chat.completions.create(model=model_name, + messages=messages, + max_tokens=10, + logprobs=True, + top_logprobs=10) assert chat_completion.id is not None assert chat_completion.choices is not None and len( chat_completion.choices) == 1 assert chat_completion.choices[0].message is not None + assert chat_completion.choices[0].logprobs is not None + assert chat_completion.choices[0].logprobs.top_logprobs is not None + assert len(chat_completion.choices[0].logprobs.top_logprobs[0]) == 10 message = chat_completion.choices[0].message assert message.content is not None and len(message.content) >= 10 assert message.role == "assistant" @@ -198,13 +201,11 @@ async def test_completion_streaming(server, client: openai.AsyncOpenAI, single_output = single_completion.choices[0].text single_usage = single_completion.usage - stream = await client.completions.create( - model=model_name, - prompt=prompt, - max_tokens=5, - temperature=0.0, - stream=True, - ) + stream = await client.completions.create(model=model_name, + prompt=prompt, + max_tokens=5, + temperature=0.0, + stream=True) chunks = [] async for chunk in stream: chunks.append(chunk.choices[0].text) diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py index fe09aa8237f24..410bdfa5c69e2 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py @@ -1,5 +1,4 @@ import pytest -import vllm.engine.metrics MODELS = [ "facebook/opt-125m", @@ -16,10 +15,10 @@ def test_metric_counter_prompt_tokens( dtype: str, max_tokens: int, ) -> None: - # Reset metric - vllm.engine.metrics.counter_prompt_tokens.set_value({}, 0) - - vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False) + vllm_model = vllm_runner(model, + dtype=dtype, + disable_log_stats=False, + gpu_memory_utilization=0.4) tokenizer = vllm_model.model.get_tokenizer() prompt_token_counts = [len(tokenizer.encode(p)) for p in example_prompts] # This test needs at least 2 prompts in a batch of different lengths to verify their token count is correct despite padding. @@ -29,7 +28,9 @@ def test_metric_counter_prompt_tokens( vllm_prompt_token_count = sum(prompt_token_counts) _ = vllm_model.generate_greedy(example_prompts, max_tokens) - metric_count = vllm.engine.metrics.counter_prompt_tokens.get_value({}) + stat_logger = vllm_model.model.llm_engine.stat_logger + metric_count = stat_logger.metrics.counter_prompt_tokens.labels( + **stat_logger.labels)._value.get() assert vllm_prompt_token_count == metric_count, ( f"prompt token count: {vllm_prompt_token_count!r}\nmetric: {metric_count!r}" @@ -46,13 +47,15 @@ def test_metric_counter_generation_tokens( dtype: str, max_tokens: int, ) -> None: - # Reset metric - vllm.engine.metrics.counter_generation_tokens.set_value({}, 0) - - vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False) + vllm_model = vllm_runner(model, + dtype=dtype, + disable_log_stats=False, + gpu_memory_utilization=0.4) vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) tokenizer = vllm_model.model.get_tokenizer() - metric_count = vllm.engine.metrics.counter_generation_tokens.get_value({}) + stat_logger = vllm_model.model.llm_engine.stat_logger + metric_count = stat_logger.metrics.counter_generation_tokens.labels( + **stat_logger.labels)._value.get() vllm_generation_count = 0 for i in range(len(example_prompts)): vllm_output_ids, vllm_output_str = vllm_outputs[i] diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index c12486b43853a..c7fce7e287d9d 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -128,7 +128,8 @@ def __init__( # Metric Logging. if self.log_stats: self.stat_logger = StatLogger( - local_interval=_LOCAL_LOGGING_INTERVAL_SEC) + local_interval=_LOCAL_LOGGING_INTERVAL_SEC, + labels=dict(model_name=model_config.model)) self.forward_dag = None if USE_RAY_COMPILED_DAG: diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index e613b9f551b2f..83e66a9372272 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -1,66 +1,94 @@ from vllm.logger import init_logger -from aioprometheus import Counter, Gauge, Histogram +from prometheus_client import Counter, Gauge, Histogram, REGISTRY, disable_created_metrics import time import numpy as np -from typing import List +from typing import Dict, List from dataclasses import dataclass logger = init_logger(__name__) -labels = {} - - -def add_global_metrics_labels(**kwargs): - labels.update(kwargs) - +disable_created_metrics() # The begin-* and end* here are used by the documentation generator # to extract the metrics definitions. + # begin-metrics-definitions -gauge_avg_prompt_throughput = Gauge("vllm:avg_prompt_throughput_toks_per_s", - "Average prefill throughput in tokens/s.") -gauge_avg_generation_throughput = Gauge( - "vllm:avg_generation_throughput_toks_per_s", - "Average generation throughput in tokens/s.") -counter_prompt_tokens = Counter("vllm:prompt_tokens_total", - "Number of prefill tokens processed.") -counter_generation_tokens = Counter("vllm:generation_tokens_total", - "Number of generation tokens processed.") - -gauge_scheduler_running = Gauge( - "vllm:num_requests_running", - "Number of requests currently running on GPU.") -gauge_scheduler_swapped = Gauge("vllm:num_requests_swapped", - "Number of requests swapped to CPU.") -gauge_scheduler_waiting = Gauge("vllm:num_requests_waiting", - "Number of requests waiting to be processed.") - -gauge_gpu_cache_usage = Gauge( - "vllm:gpu_cache_usage_perc", - "GPU KV-cache usage. 1 means 100 percent usage.") -gauge_cpu_cache_usage = Gauge( - "vllm:cpu_cache_usage_perc", - "CPU KV-cache usage. 1 means 100 percent usage.") - -histogram_time_to_first_token = Histogram( - "vllm:time_to_first_token_seconds", - "Histogram of time to first token in seconds.", - buckets=[ - 0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5, 0.75, 1.0, - 2.5, 5.0, 7.5, 10.0 - ]) -histogram_time_per_output_tokens = Histogram( - "vllm:time_per_output_token_seconds", - "Histogram of time per output token in seconds.", - buckets=[ - 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 2.5 - ]) -histogram_e2e_request_latency = Histogram( - "vllm:e2e_request_latency_seconds", - "Histogram of end to end request latency in seconds.", - buckets=[1.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0]) +class Metrics: + + def __init__(self, labelnames: List[str]): + # Unregister any existing vLLM collectors + for collector in list(REGISTRY._collector_to_names): + if hasattr(collector, "_name") and "vllm" in collector._name: + REGISTRY.unregister(collector) + + # System stats + self.gauge_scheduler_running = Gauge( + name="vllm:num_requests_running", + documentation="Number of requests currently running on GPU.", + labelnames=labelnames) + self.gauge_scheduler_swapped = Gauge( + name="vllm:num_requests_swapped", + documentation="Number of requests swapped to CPU.", + labelnames=labelnames) + self.gauge_scheduler_waiting = Gauge( + name="vllm:num_requests_waiting", + documentation="Number of requests waiting to be processed.", + labelnames=labelnames) + self.gauge_gpu_cache_usage = Gauge( + name="vllm:gpu_cache_usage_perc", + documentation="GPU KV-cache usage. 1 means 100 percent usage.", + labelnames=labelnames) + self.gauge_cpu_cache_usage = Gauge( + name="vllm:cpu_cache_usage_perc", + documentation="CPU KV-cache usage. 1 means 100 percent usage.", + labelnames=labelnames) + + # Raw stats from last model iteration + self.counter_prompt_tokens = Counter( + name="vllm:prompt_tokens_total", + documentation="Number of prefill tokens processed.", + labelnames=labelnames) + self.counter_generation_tokens = Counter( + name="vllm:generation_tokens_total", + documentation="Number of generation tokens processed.", + labelnames=labelnames) + self.histogram_time_to_first_token = Histogram( + name="vllm:time_to_first_token_seconds", + documentation="Histogram of time to first token in seconds.", + labelnames=labelnames, + buckets=[ + 0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5, + 0.75, 1.0, 2.5, 5.0, 7.5, 10.0 + ]) + self.histogram_time_per_output_token = Histogram( + name="vllm:time_per_output_token_seconds", + documentation="Histogram of time per output token in seconds.", + labelnames=labelnames, + buckets=[ + 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75, + 1.0, 2.5 + ]) + self.histogram_e2e_request_latency = Histogram( + name="vllm:e2e_request_latency_seconds", + documentation="Histogram of end to end request latency in seconds.", + labelnames=labelnames, + buckets=[1.0, 2.5, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0]) + + # Legacy metrics + self.gauge_avg_prompt_throughput = Gauge( + name="vllm:avg_prompt_throughput_toks_per_s", + documentation="Average prefill throughput in tokens/s.", + labelnames=labelnames, + ) + self.gauge_avg_generation_throughput = Gauge( + name="vllm:avg_generation_throughput_toks_per_s", + documentation="Average generation throughput in tokens/s.", + labelnames=labelnames, + ) + + # end-metrics-definitions @@ -87,7 +115,7 @@ class Stats: class StatLogger: """StatLogger is used LLMEngine to log to Promethus and Stdout.""" - def __init__(self, local_interval: float) -> None: + def __init__(self, local_interval: float, labels: Dict[str, str]) -> None: # Metadata for logging locally. self.last_local_log = time.monotonic() self.local_interval = local_interval @@ -96,6 +124,10 @@ def __init__(self, local_interval: float) -> None: self.num_prompt_tokens: List[int] = [] self.num_generation_tokens: List[int] = [] + # Prometheus metrics + self.labels = labels + self.metrics = Metrics(labelnames=list(labels.keys())) + def _get_throughput(self, tracked_stats: List[int], now: float) -> float: return float(np.sum(tracked_stats) / (now - self.last_local_log)) @@ -105,23 +137,33 @@ def _local_interval_elapsed(self, now: float) -> bool: def _log_prometheus(self, stats: Stats) -> None: # Set system stat gauges. - gauge_scheduler_running.set(labels, stats.num_running) - gauge_scheduler_swapped.set(labels, stats.num_swapped) - gauge_scheduler_waiting.set(labels, stats.num_waiting) - gauge_gpu_cache_usage.set(labels, stats.gpu_cache_usage) - gauge_cpu_cache_usage.set(labels, stats.cpu_cache_usage) + self.metrics.gauge_scheduler_running.labels(**self.labels).set( + stats.num_running) + self.metrics.gauge_scheduler_swapped.labels(**self.labels).set( + stats.num_swapped) + self.metrics.gauge_scheduler_waiting.labels(**self.labels).set( + stats.num_waiting) + self.metrics.gauge_gpu_cache_usage.labels(**self.labels).set( + stats.gpu_cache_usage) + self.metrics.gauge_cpu_cache_usage.labels(**self.labels).set( + stats.cpu_cache_usage) # Add to token counters. - counter_prompt_tokens.add(labels, stats.num_prompt_tokens) - counter_generation_tokens.add(labels, stats.num_generation_tokens) + self.metrics.counter_prompt_tokens.labels(**self.labels).inc( + stats.num_prompt_tokens) + self.metrics.counter_generation_tokens.labels(**self.labels).inc( + stats.num_generation_tokens) # Observe request level latencies in histograms. for ttft in stats.time_to_first_tokens: - histogram_time_to_first_token.observe(labels, ttft) + self.metrics.histogram_time_to_first_token.labels( + **self.labels).observe(ttft) for tpot in stats.time_per_output_tokens: - histogram_time_per_output_tokens.observe(labels, tpot) + self.metrics.histogram_time_per_output_token.labels( + **self.labels).observe(tpot) for e2e in stats.time_e2e_requests: - histogram_e2e_request_latency.observe(labels, e2e) + self.metrics.histogram_e2e_request_latency.labels( + **self.labels).observe(e2e) def _log_prometheus_interval(self, prompt_throughput: float, generation_throughput: float) -> None: @@ -130,8 +172,10 @@ def _log_prometheus_interval(self, prompt_throughput: float, # Moving forward, we should use counters like counter_prompt_tokens, counter_generation_tokens # Which log raw data and calculate summaries using rate() on the grafana/prometheus side. # See https://github.com/vllm-project/vllm/pull/2316#discussion_r1464204666 - gauge_avg_prompt_throughput.set(labels, prompt_throughput) - gauge_avg_generation_throughput.set(labels, generation_throughput) + self.metrics.gauge_avg_prompt_throughput.labels( + **self.labels).set(prompt_throughput) + self.metrics.gauge_avg_generation_throughput.labels( + **self.labels).set(generation_throughput) def log(self, stats: Stats) -> None: """Called by LLMEngine. diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index a217605452e3a..b2f040114a078 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -6,8 +6,7 @@ import importlib import inspect -from aioprometheus import MetricsMiddleware -from aioprometheus.asgi.starlette import metrics +from prometheus_client import make_asgi_app import fastapi import uvicorn from http import HTTPStatus @@ -18,7 +17,6 @@ from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine -from vllm.engine.metrics import add_global_metrics_labels from vllm.entrypoints.openai.protocol import CompletionRequest, ChatCompletionRequest, ErrorResponse from vllm.logger import init_logger from vllm.entrypoints.openai.serving_chat import OpenAIServingChat @@ -141,8 +139,9 @@ def parse_args(): return parser.parse_args() -app.add_middleware(MetricsMiddleware) # Trace HTTP server metrics -app.add_route("/metrics", metrics) # Exposes HTTP metrics +# Add prometheus asgi middleware to route /metrics requests +metrics_app = make_asgi_app() +app.mount("/metrics", metrics_app) @app.exception_handler(RequestValidationError) @@ -242,9 +241,6 @@ async def authentication(request: Request, call_next): openai_serving_completion = OpenAIServingCompletion( engine, served_model, args.lora_modules) - # Register labels for metrics - add_global_metrics_labels(model_name=engine_args.model) - app.root_path = args.root_path uvicorn.run(app, host=args.host, diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 7c2aa707775ff..f57a2fb775783 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -63,6 +63,8 @@ class ChatCompletionRequest(BaseModel): seed: Optional[int] = None stop: Optional[Union[str, List[str]]] = Field(default_factory=list) stream: Optional[bool] = False + logprobs: Optional[bool] = False + top_logprobs: Optional[int] = None presence_penalty: Optional[float] = 0.0 frequency_penalty: Optional[float] = 0.0 logit_bias: Optional[Dict[str, float]] = None @@ -84,6 +86,8 @@ class ChatCompletionRequest(BaseModel): length_penalty: Optional[float] = 1.0 def to_sampling_params(self) -> SamplingParams: + if self.logprobs and not self.top_logprobs: + raise ValueError("Top logprobs must be set when logprobs is.") return SamplingParams( n=self.n, presence_penalty=self.presence_penalty, @@ -96,6 +100,8 @@ def to_sampling_params(self) -> SamplingParams: stop=self.stop, stop_token_ids=self.stop_token_ids, max_tokens=self.max_tokens, + logprobs=self.top_logprobs if self.logprobs else None, + prompt_logprobs=self.top_logprobs if self.echo else None, best_of=self.best_of, top_k=self.top_k, ignore_eos=self.ignore_eos, @@ -216,6 +222,7 @@ class ChatMessage(BaseModel): class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage + logprobs: Optional[LogProbs] = None finish_reason: Optional[Literal["stop", "length"]] = None @@ -236,6 +243,7 @@ class DeltaMessage(BaseModel): class ChatCompletionResponseStreamChoice(BaseModel): index: int delta: DeltaMessage + logprobs: Optional[LogProbs] = None finish_reason: Optional[Literal["stop", "length"]] = None diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 850797ae4b9b6..dd152583c2329 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -101,7 +101,10 @@ async def chat_completion_stream_generator( role = self.get_chat_request_role(request) for i in range(request.n): choice_data = ChatCompletionResponseStreamChoice( - index=i, delta=DeltaMessage(role=role), finish_reason=None) + index=i, + delta=DeltaMessage(role=role), + logprobs=None, + finish_reason=None) chunk = ChatCompletionStreamResponse(id=request_id, object=chunk_object_type, created=created_time, @@ -118,6 +121,7 @@ async def chat_completion_stream_generator( "content") and request.messages[-1].get( "role") == role: last_msg_content = request.messages[-1]["content"] + if last_msg_content: for i in range(request.n): choice_data = ChatCompletionResponseStreamChoice( @@ -129,6 +133,7 @@ async def chat_completion_stream_generator( object=chunk_object_type, created=created_time, choices=[choice_data], + logprobs=None, model=model_name) data = chunk.model_dump_json(exclude_unset=True) yield f"data: {data}\n\n" @@ -145,15 +150,29 @@ async def chat_completion_stream_generator( if finish_reason_sent[i]: continue + delta_token_ids = output.token_ids[previous_num_tokens[i]:] + top_logprobs = output.logprobs[ + previous_num_tokens[i]:] if output.logprobs else None + + if request.logprobs: + logprobs = self._create_logprobs( + token_ids=delta_token_ids, + top_logprobs=top_logprobs, + num_output_top_logprobs=request.logprobs, + initial_text_offset=len(previous_texts[i]), + ) + else: + logprobs = None + delta_text = output.text[len(previous_texts[i]):] previous_texts[i] = output.text previous_num_tokens[i] = len(output.token_ids) - if output.finish_reason is None: # Send token-by-token response for each request.n choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(content=delta_text), + logprobs=logprobs, finish_reason=None) chunk = ChatCompletionStreamResponse( id=request_id, @@ -174,6 +193,7 @@ async def chat_completion_stream_generator( choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(content=delta_text), + logprobs=logprobs, finish_reason=output.finish_reason) chunk = ChatCompletionStreamResponse( id=request_id, @@ -208,11 +228,25 @@ async def chat_completion_full_generator( assert final_res is not None choices = [] + role = self.get_chat_request_role(request) for output in final_res.outputs: + token_ids = output.token_ids + top_logprobs = output.logprobs + + if request.logprobs: + logprobs = self._create_logprobs( + token_ids=token_ids, + top_logprobs=top_logprobs, + num_output_top_logprobs=request.logprobs, + ) + else: + logprobs = None + choice_data = ChatCompletionResponseChoice( index=output.index, message=ChatMessage(role=role, content=output.text), + logprobs=logprobs, finish_reason=output.finish_reason, ) choices.append(choice_data)