Skip to content

Commit 20133cf

Browse files
[Frontend] enable custom logging for the uvicorn server (OpenAI API server) (#18403)
Signed-off-by: François Paupier <francois.paupier@gmail.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com>
1 parent ebb1ec9 commit 20133cf

File tree

2 files changed

+27
-2
lines changed

2 files changed

+27
-2
lines changed

vllm/entrypoints/openai/api_server.py

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import gc
66
import importlib
77
import inspect
8+
import json
89
import multiprocessing
910
import os
1011
import signal
@@ -16,7 +17,6 @@
1617
from contextlib import asynccontextmanager
1718
from functools import partial
1819
from http import HTTPStatus
19-
from json import JSONDecodeError
2020
from typing import Annotated, Any, Optional
2121

2222
import prometheus_client
@@ -930,7 +930,7 @@ async def invocations(raw_request: Request):
930930
"""
931931
try:
932932
body = await raw_request.json()
933-
except JSONDecodeError as e:
933+
except json.JSONDecodeError as e:
934934
raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
935935
detail=f"JSON decode error: {e}") from e
936936

@@ -1003,6 +1003,18 @@ async def unload_lora_adapter(request: UnloadLoRAAdapterRequest,
10031003
return Response(status_code=200, content=response)
10041004

10051005

1006+
def load_log_config(log_config_file: Optional[str]) -> Optional[dict]:
1007+
if not log_config_file:
1008+
return None
1009+
try:
1010+
with open(log_config_file) as f:
1011+
return json.load(f)
1012+
except Exception as e:
1013+
logger.warning("Failed to load log config from file %s: error %s",
1014+
log_config_file, e)
1015+
return None
1016+
1017+
10061018
def build_app(args: Namespace) -> FastAPI:
10071019
if args.disable_fastapi_docs:
10081020
app = FastAPI(openapi_url=None,
@@ -1324,6 +1336,11 @@ async def run_server_worker(listen_address,
13241336

13251337
server_index = client_config.get("client_index", 0) if client_config else 0
13261338

1339+
# Load logging config for uvicorn if specified
1340+
log_config = load_log_config(args.log_config_file)
1341+
if log_config is not None:
1342+
uvicorn_kwargs['log_config'] = log_config
1343+
13271344
async with build_async_engine_client(args, client_config) as engine_client:
13281345
app = build_app(args)
13291346

vllm/entrypoints/openai/cli_args.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from collections.abc import Sequence
1212
from typing import Optional, Union, get_args
1313

14+
import vllm.envs as envs
1415
from vllm.engine.arg_utils import AsyncEngineArgs, optional_type
1516
from vllm.entrypoints.chat_utils import (ChatTemplateContentFormatOption,
1617
validate_chat_template)
@@ -243,6 +244,13 @@ def make_arg_parser(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
243244
" into OpenAI API format, the name register in this plugin can be used "
244245
"in ``--tool-call-parser``.")
245246

247+
parser.add_argument(
248+
"--log-config-file",
249+
type=str,
250+
default=envs.VLLM_LOGGING_CONFIG_PATH,
251+
help="Path to logging config JSON file for both vllm and uvicorn",
252+
)
253+
246254
parser = AsyncEngineArgs.add_cli_args(parser)
247255

248256
parser.add_argument('--max-log-len',

0 commit comments

Comments
 (0)