File tree Expand file tree Collapse file tree 5 files changed +27
-415
lines changed Expand file tree Collapse file tree 5 files changed +27
-415
lines changed Original file line number Diff line number Diff line change 2
2
LocalLab - A lightweight AI inference server for running LLMs locally
3
3
"""
4
4
5
- __version__ = "0.4.40 "
5
+ __version__ = "0.4.41 "
6
6
7
7
# Only import what's necessary initially, lazy-load the rest
8
8
from .logger import get_logger
Original file line number Diff line number Diff line change @@ -47,7 +47,18 @@ def __init__(self):
47
47
self .tokenizer = None
48
48
self .current_model = None
49
49
self ._loading = False
50
- self ._last_use = time .time ()
50
+ self ._last_use = time .time () # Initialize _last_use
51
+ self .response_cache = {} # Add cache dictionary
52
+
53
+ @property
54
+ def last_used (self ) -> float :
55
+ """Get the timestamp of last model use"""
56
+ return self ._last_use
57
+
58
+ @last_used .setter
59
+ def last_used (self , value : float ):
60
+ """Set the timestamp of last model use"""
61
+ self ._last_use = value
51
62
52
63
def _get_quantization_config (self ) -> Optional [Dict [str , Any ]]:
53
64
"""Get quantization configuration based on settings"""
You can’t perform that action at this time.
0 commit comments