Skip to content

Commit

Permalink
Fix vllm api_server v1/models error (#12867)
Browse files Browse the repository at this point in the history
  • Loading branch information
hzjane authored Feb 21, 2025
1 parent 8077850 commit 3ea5389
Showing 1 changed file with 7 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,10 @@ def base(request: Request) -> OpenAIServing:
return tokenization(request)


def models(request: Request) -> OpenAIServingModels:
return request.app.state.openai_serving_models


def chat(request: Request) -> Optional[OpenAIServingChat]:
return request.app.state.openai_serving_chat

Expand Down Expand Up @@ -345,10 +349,10 @@ async def detokenize(request: DetokenizeRequest, raw_request: Request):

@router.get("/v1/models")
async def show_available_models(raw_request: Request):
handler = base(raw_request)
handler = models(raw_request)

models = await handler.show_available_models()
return JSONResponse(content=models.model_dump())
models_ = await handler.show_available_models()
return JSONResponse(content=models_.model_dump())


@router.get("/version")
Expand Down

0 comments on commit 3ea5389

Please sign in to comment.