Skip to content

Commit a55649c

Browse files
authored
Lint
1 parent d980fa9 commit a55649c

File tree

1 file changed

+30
-33
lines changed

1 file changed

+30
-33
lines changed

llama_cpp/llama.py

Lines changed: 30 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,59 +1,56 @@
11
from __future__ import annotations
22

3+
import contextlib
4+
import ctypes
5+
import fnmatch
6+
import json
7+
import multiprocessing
38
import os
49
import sys
5-
import uuid
610
import time
7-
import json
8-
import ctypes
911
import typing
10-
import fnmatch
12+
import uuid
1113
import warnings
12-
import contextlib
13-
import multiprocessing
14+
from collections import deque
15+
from pathlib import Path
1416

1517
from typing import (
1618
Any,
19+
Callable,
20+
Deque,
21+
Dict,
22+
Generator,
23+
Iterator,
1724
List,
1825
Literal,
1926
Optional,
20-
Union,
21-
Generator,
2227
Sequence,
23-
Iterator,
24-
Deque,
25-
Callable,
26-
Dict,
27-
)
28-
from collections import deque
29-
from pathlib import Path
30-
31-
32-
from .llama_types import *
33-
from .llama_grammar import LlamaGrammar
34-
from .llama_cache import (
35-
BaseLlamaCache, # type: ignore
28+
Union,
3629
)
37-
from .llama_tokenizer import BaseLlamaTokenizer, LlamaTokenizer
38-
import llama_cpp.llama_cpp as llama_cpp
39-
import llama_cpp.llama_chat_format as llama_chat_format
40-
41-
from llama_cpp.llama_speculative import LlamaDraftModel
4230

4331
import numpy as np
4432
import numpy.typing as npt
4533

34+
from llama_cpp import llama_chat_format, llama_cpp
35+
from llama_cpp.llama_speculative import LlamaDraftModel
36+
4637
from ._internals import (
47-
_LlamaModel, # type: ignore
48-
_LlamaContext, # type: ignore
4938
_LlamaBatch, # type: ignore
50-
_LlamaTokenDataArray, # type: ignore
51-
_LlamaSamplingParams, # type: ignore
39+
_LlamaContext, # type: ignore
40+
_LlamaModel, # type: ignore
5241
_LlamaSamplingContext, # type: ignore
42+
_LlamaSamplingParams, # type: ignore
43+
_LlamaTokenDataArray, # type: ignore
5344
_normalize_embedding, # type: ignore
5445
)
5546
from ._logger import set_verbose
5647
from ._utils import suppress_stdout_stderr
48+
from .llama_cache import (
49+
BaseLlamaCache, # type: ignore
50+
)
51+
from .llama_grammar import LlamaGrammar
52+
from .llama_tokenizer import BaseLlamaTokenizer, LlamaTokenizer
53+
from .llama_types import *
5754

5855

5956
class Llama:
@@ -1036,7 +1033,7 @@ def _create_completion(
10361033
assert self._ctx is not None
10371034
assert suffix is None or suffix.__class__ is str
10381035

1039-
completion_id: str = f"cmpl-{str(uuid.uuid4())}"
1036+
completion_id: str = f"cmpl-{uuid.uuid4()!s}"
10401037
created: int = int(time.time())
10411038
bos_token_id: int = self.token_bos()
10421039
cls_token_id: int = self._model.token_cls()
@@ -2127,7 +2124,7 @@ def from_pretrained(
21272124
local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto",
21282125
cache_dir: Optional[Union[str, os.PathLike[str]]] = None,
21292126
**kwargs: Any,
2130-
) -> "Llama":
2127+
) -> Llama:
21312128
"""Create a Llama model from a pretrained model name or path.
21322129
This method requires the huggingface-hub package.
21332130
You can install it with `pip install huggingface-hub`.
@@ -2142,7 +2139,7 @@ def from_pretrained(
21422139
Returns:
21432140
A Llama model."""
21442141
try:
2145-
from huggingface_hub import hf_hub_download, HfFileSystem
2142+
from huggingface_hub import HfFileSystem, hf_hub_download
21462143
from huggingface_hub.utils import validate_repo_id
21472144
except ImportError:
21482145
raise ImportError(

0 commit comments

Comments
 (0)