Skip to content

Commit e7774f0

Browse files
authored
Lint
1 parent e7b7fc7 commit e7774f0

File tree

1 file changed

+17
-19
lines changed

1 file changed

+17
-19
lines changed

llama_cpp/server/app.py

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,49 +1,47 @@
11
from __future__ import annotations
22

33
import os
4+
import contextlib
45
import json
56
import typing
6-
import contextlib
7-
8-
from threading import Lock
97
from functools import partial
10-
from typing import Iterator, List, Optional, Union, Dict
8+
from threading import Lock
119

12-
import llama_cpp
10+
from typing import Dict, Iterator, List, Optional, Union
1311

1412
import anyio
1513
from anyio.streams.memory import MemoryObjectSendStream
16-
from starlette.concurrency import run_in_threadpool, iterate_in_threadpool
17-
from fastapi import Depends, FastAPI, APIRouter, Request, HTTPException, status, Body
14+
from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request, status
1815
from fastapi.middleware import Middleware
1916
from fastapi.middleware.cors import CORSMiddleware
2017
from fastapi.security import HTTPBearer
2118
from sse_starlette.sse import EventSourceResponse
22-
from starlette_context.plugins import RequestIdPlugin # type: ignore
19+
from starlette.concurrency import iterate_in_threadpool, run_in_threadpool
2320
from starlette_context.middleware import RawContextMiddleware
24-
21+
from starlette_context.plugins import RequestIdPlugin # type: ignore
22+
23+
import llama_cpp
24+
from llama_cpp.server.errors import RouteErrorHandler
2525
from llama_cpp.server.model import (
2626
LlamaProxy,
2727
)
2828
from llama_cpp.server.settings import (
2929
ConfigFileSettings,
30-
Settings,
3130
ModelSettings,
3231
ServerSettings,
32+
Settings,
3333
)
3434
from llama_cpp.server.types import (
35+
CreateChatCompletionRequest,
3536
CreateCompletionRequest,
3637
CreateEmbeddingRequest,
37-
CreateChatCompletionRequest,
38+
DetokenizeInputRequest,
39+
DetokenizeInputResponse,
3840
ModelList,
41+
TokenizeInputCountResponse,
3942
TokenizeInputRequest,
4043
TokenizeInputResponse,
41-
TokenizeInputCountResponse,
42-
DetokenizeInputRequest,
43-
DetokenizeInputResponse,
4444
)
45-
from llama_cpp.server.errors import RouteErrorHandler
46-
4745

4846
router = APIRouter(route_class=RouteErrorHandler)
4947

@@ -150,7 +148,7 @@ def create_app(
150148
set_llama_proxy(model_settings=model_settings)
151149

152150
if server_settings.disable_ping_events:
153-
set_ping_message_factory(lambda: bytes())
151+
set_ping_message_factory(lambda: b"")
154152

155153
return app
156154

@@ -248,7 +246,7 @@ async def authenticate(
248246
"schema": {
249247
"type": "string",
250248
"title": "Server Side Streaming response, when stream=True. "
251-
+ "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format", # noqa: E501
249+
+ "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format",
252250
"example": """data: {... see CreateCompletionResponse ...} \\n\\n data: ... \\n\\n ... data: [DONE]""",
253251
}
254252
},
@@ -386,7 +384,7 @@ async def create_embedding(
386384
"schema": {
387385
"type": "string",
388386
"title": "Server Side Streaming response, when stream=True"
389-
+ "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format", # noqa: E501
387+
+ "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format",
390388
"example": """data: {... see CreateChatCompletionResponse ...} \\n\\n data: ... \\n\\n ... data: [DONE]""",
391389
}
392390
},

0 commit comments

Comments
 (0)