Skip to content

Commit ddf51d4

Browse files
committed
(fix) litellm utils test
1 parent 91010cd commit ddf51d4

File tree

1 file changed

+65
-48
lines changed

1 file changed

+65
-48
lines changed

litellm/__init__.py

Lines changed: 65 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -135,41 +135,41 @@
135135
require_auth_for_metrics_endpoint: Optional[bool] = False
136136
argilla_batch_size: Optional[int] = None
137137
datadog_use_v1: Optional[bool] = False # if you want to use v1 datadog logged payload.
138-
gcs_pub_sub_use_v1: Optional[
139-
bool
140-
] = False # if you want to use v1 gcs pubsub logged payload
141-
generic_api_use_v1: Optional[
142-
bool
143-
] = False # if you want to use v1 generic api logged payload
138+
gcs_pub_sub_use_v1: Optional[bool] = (
139+
False # if you want to use v1 gcs pubsub logged payload
140+
)
141+
generic_api_use_v1: Optional[bool] = (
142+
False # if you want to use v1 generic api logged payload
143+
)
144144
argilla_transformation_object: Optional[Dict[str, Any]] = None
145-
_async_input_callback: List[
146-
Union[str, Callable, CustomLogger]
147-
] = [] # internal variable - async custom callbacks are routed here.
148-
_async_success_callback: List[
149-
Union[str, Callable, CustomLogger]
150-
] = [] # internal variable - async custom callbacks are routed here.
151-
_async_failure_callback: List[
152-
Union[str, Callable, CustomLogger]
153-
] = [] # internal variable - async custom callbacks are routed here.
145+
_async_input_callback: List[Union[str, Callable, CustomLogger]] = (
146+
[]
147+
) # internal variable - async custom callbacks are routed here.
148+
_async_success_callback: List[Union[str, Callable, CustomLogger]] = (
149+
[]
150+
) # internal variable - async custom callbacks are routed here.
151+
_async_failure_callback: List[Union[str, Callable, CustomLogger]] = (
152+
[]
153+
) # internal variable - async custom callbacks are routed here.
154154
pre_call_rules: List[Callable] = []
155155
post_call_rules: List[Callable] = []
156156
turn_off_message_logging: Optional[bool] = False
157157
log_raw_request_response: bool = False
158158
redact_messages_in_exceptions: Optional[bool] = False
159159
redact_user_api_key_info: Optional[bool] = False
160160
filter_invalid_headers: Optional[bool] = False
161-
add_user_information_to_llm_headers: Optional[
162-
bool
163-
] = None # adds user_id, team_id, token hash (params from StandardLoggingMetadata) to request headers
161+
add_user_information_to_llm_headers: Optional[bool] = (
162+
None # adds user_id, team_id, token hash (params from StandardLoggingMetadata) to request headers
163+
)
164164
store_audit_logs = False # Enterprise feature, allow users to see audit logs
165165
### end of callbacks #############
166166

167-
email: Optional[
168-
str
169-
] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
170-
token: Optional[
171-
str
172-
] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
167+
email: Optional[str] = (
168+
None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
169+
)
170+
token: Optional[str] = (
171+
None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
172+
)
173173
telemetry = True
174174
max_tokens: int = DEFAULT_MAX_TOKENS # OpenAI Defaults
175175
drop_params = bool(os.getenv("LITELLM_DROP_PARAMS", False))
@@ -253,20 +253,24 @@
253253
enable_caching_on_provider_specific_optional_params: bool = (
254254
False # feature-flag for caching on optional params - e.g. 'top_k'
255255
)
256-
caching: bool = False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
257-
caching_with_models: bool = False # # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
258-
cache: Optional[
259-
Cache
260-
] = None # cache object <- use this - https://docs.litellm.ai/docs/caching
256+
caching: bool = (
257+
False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
258+
)
259+
caching_with_models: bool = (
260+
False # # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
261+
)
262+
cache: Optional[Cache] = (
263+
None # cache object <- use this - https://docs.litellm.ai/docs/caching
264+
)
261265
default_in_memory_ttl: Optional[float] = None
262266
default_redis_ttl: Optional[float] = None
263267
default_redis_batch_cache_expiry: Optional[float] = None
264268
model_alias_map: Dict[str, str] = {}
265269
model_group_alias_map: Dict[str, str] = {}
266270
max_budget: float = 0.0 # set the max budget across all providers
267-
budget_duration: Optional[
268-
str
269-
] = None # proxy only - resets budget after fixed duration. You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d").
271+
budget_duration: Optional[str] = (
272+
None # proxy only - resets budget after fixed duration. You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d").
273+
)
270274
default_soft_budget: float = (
271275
DEFAULT_SOFT_BUDGET # by default all litellm proxy keys have a soft budget of 50.0
272276
)
@@ -275,11 +279,15 @@
275279

276280
_current_cost = 0.0 # private variable, used if max budget is set
277281
error_logs: Dict = {}
278-
add_function_to_prompt: bool = False # if function calling not supported by api, append function call details to system prompt
282+
add_function_to_prompt: bool = (
283+
False # if function calling not supported by api, append function call details to system prompt
284+
)
279285
client_session: Optional[httpx.Client] = None
280286
aclient_session: Optional[httpx.AsyncClient] = None
281287
model_fallbacks: Optional[List] = None # Deprecated for 'litellm.fallbacks'
282-
model_cost_map_url: str = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
288+
model_cost_map_url: str = (
289+
"https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
290+
)
283291
suppress_debug_info = False
284292
dynamodb_table_name: Optional[str] = None
285293
s3_callback_params: Optional[Dict] = None
@@ -306,9 +314,13 @@
306314

307315

308316
######## Networking Settings ########
309-
use_aiohttp_transport: bool = True # Older variable, aiohttp is now the default. use disable_aiohttp_transport instead.
317+
use_aiohttp_transport: bool = (
318+
True # Older variable, aiohttp is now the default. use disable_aiohttp_transport instead.
319+
)
310320
disable_aiohttp_transport: bool = False # Set this to true to use httpx instead
311-
force_ipv4: bool = False # when True, litellm will force ipv4 for all LLM requests. Some users have seen httpx ConnectionError when using ipv6.
321+
force_ipv4: bool = (
322+
False # when True, litellm will force ipv4 for all LLM requests. Some users have seen httpx ConnectionError when using ipv6.
323+
)
312324
module_level_aclient = AsyncHTTPHandler(
313325
timeout=request_timeout, client_alias="module level aclient"
314326
)
@@ -322,13 +334,13 @@
322334
context_window_fallbacks: Optional[List] = None
323335
content_policy_fallbacks: Optional[List] = None
324336
allowed_fails: int = 3
325-
num_retries_per_request: Optional[
326-
int
327-
] = None # for the request overall (incl. fallbacks + model retries)
337+
num_retries_per_request: Optional[int] = (
338+
None # for the request overall (incl. fallbacks + model retries)
339+
)
328340
####### SECRET MANAGERS #####################
329-
secret_manager_client: Optional[
330-
Any
331-
] = None # list of instantiated key management clients - e.g. azure kv, infisical, etc.
341+
secret_manager_client: Optional[Any] = (
342+
None # list of instantiated key management clients - e.g. azure kv, infisical, etc.
343+
)
332344
_google_kms_resource_name: Optional[str] = None
333345
_key_management_system: Optional[KeyManagementSystem] = None
334346
_key_management_settings: KeyManagementSettings = KeyManagementSettings()
@@ -456,6 +468,7 @@ def identify(event_details):
456468
nscale_models: List = []
457469
nebius_models: List = []
458470
nebius_embedding_models: List = []
471+
deepgram_models: List = []
459472

460473

461474
def is_bedrock_pricing_only_model(key: str) -> bool:
@@ -627,6 +640,8 @@ def add_known_models():
627640
snowflake_models.append(key)
628641
elif value.get("litellm_provider") == "featherless_ai":
629642
featherless_ai_models.append(key)
643+
elif value.get("litellm_provider") == "deepgram":
644+
deepgram_models.append(key)
630645

631646

632647
add_known_models()
@@ -708,6 +723,7 @@ def add_known_models():
708723
+ llama_models
709724
+ featherless_ai_models
710725
+ nscale_models
726+
+ deepgram_models
711727
)
712728

713729
model_list_set = set(model_list)
@@ -771,6 +787,7 @@ def add_known_models():
771787
"meta_llama": llama_models,
772788
"nscale": nscale_models,
773789
"featherless_ai": featherless_ai_models,
790+
"deepgram": deepgram_models,
774791
}
775792

776793
# mapping for those models which have larger equivalents
@@ -1141,12 +1158,12 @@ def add_known_models():
11411158
from .types.utils import GenericStreamingChunk
11421159

11431160
custom_provider_map: List[CustomLLMItem] = []
1144-
_custom_providers: List[
1145-
str
1146-
] = [] # internal helper util, used to track names of custom providers
1147-
disable_hf_tokenizer_download: Optional[
1148-
bool
1149-
] = None # disable huggingface tokenizer download. Defaults to openai clk100
1161+
_custom_providers: List[str] = (
1162+
[]
1163+
) # internal helper util, used to track names of custom providers
1164+
disable_hf_tokenizer_download: Optional[bool] = (
1165+
None # disable huggingface tokenizer download. Defaults to openai clk100
1166+
)
11501167
global_disable_no_log_param: bool = False
11511168

11521169
### PASSTHROUGH ###

0 commit comments

Comments
 (0)