|
135 | 135 | require_auth_for_metrics_endpoint: Optional[bool] = False
|
136 | 136 | argilla_batch_size: Optional[int] = None
|
137 | 137 | datadog_use_v1: Optional[bool] = False # if you want to use v1 datadog logged payload.
|
138 |
| -gcs_pub_sub_use_v1: Optional[ |
139 |
| - bool |
140 |
| -] = False # if you want to use v1 gcs pubsub logged payload |
141 |
| -generic_api_use_v1: Optional[ |
142 |
| - bool |
143 |
| -] = False # if you want to use v1 generic api logged payload |
| 138 | +gcs_pub_sub_use_v1: Optional[bool] = ( |
| 139 | + False # if you want to use v1 gcs pubsub logged payload |
| 140 | +) |
| 141 | +generic_api_use_v1: Optional[bool] = ( |
| 142 | + False # if you want to use v1 generic api logged payload |
| 143 | +) |
144 | 144 | argilla_transformation_object: Optional[Dict[str, Any]] = None
|
145 |
| -_async_input_callback: List[ |
146 |
| - Union[str, Callable, CustomLogger] |
147 |
| -] = [] # internal variable - async custom callbacks are routed here. |
148 |
| -_async_success_callback: List[ |
149 |
| - Union[str, Callable, CustomLogger] |
150 |
| -] = [] # internal variable - async custom callbacks are routed here. |
151 |
| -_async_failure_callback: List[ |
152 |
| - Union[str, Callable, CustomLogger] |
153 |
| -] = [] # internal variable - async custom callbacks are routed here. |
| 145 | +_async_input_callback: List[Union[str, Callable, CustomLogger]] = ( |
| 146 | + [] |
| 147 | +) # internal variable - async custom callbacks are routed here. |
| 148 | +_async_success_callback: List[Union[str, Callable, CustomLogger]] = ( |
| 149 | + [] |
| 150 | +) # internal variable - async custom callbacks are routed here. |
| 151 | +_async_failure_callback: List[Union[str, Callable, CustomLogger]] = ( |
| 152 | + [] |
| 153 | +) # internal variable - async custom callbacks are routed here. |
154 | 154 | pre_call_rules: List[Callable] = []
|
155 | 155 | post_call_rules: List[Callable] = []
|
156 | 156 | turn_off_message_logging: Optional[bool] = False
|
157 | 157 | log_raw_request_response: bool = False
|
158 | 158 | redact_messages_in_exceptions: Optional[bool] = False
|
159 | 159 | redact_user_api_key_info: Optional[bool] = False
|
160 | 160 | filter_invalid_headers: Optional[bool] = False
|
161 |
| -add_user_information_to_llm_headers: Optional[ |
162 |
| - bool |
163 |
| -] = None # adds user_id, team_id, token hash (params from StandardLoggingMetadata) to request headers |
| 161 | +add_user_information_to_llm_headers: Optional[bool] = ( |
| 162 | + None # adds user_id, team_id, token hash (params from StandardLoggingMetadata) to request headers |
| 163 | +) |
164 | 164 | store_audit_logs = False # Enterprise feature, allow users to see audit logs
|
165 | 165 | ### end of callbacks #############
|
166 | 166 |
|
167 |
| -email: Optional[ |
168 |
| - str |
169 |
| -] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 |
170 |
| -token: Optional[ |
171 |
| - str |
172 |
| -] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 |
| 167 | +email: Optional[str] = ( |
| 168 | + None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 |
| 169 | +) |
| 170 | +token: Optional[str] = ( |
| 171 | + None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 |
| 172 | +) |
173 | 173 | telemetry = True
|
174 | 174 | max_tokens: int = DEFAULT_MAX_TOKENS # OpenAI Defaults
|
175 | 175 | drop_params = bool(os.getenv("LITELLM_DROP_PARAMS", False))
|
|
253 | 253 | enable_caching_on_provider_specific_optional_params: bool = (
|
254 | 254 | False # feature-flag for caching on optional params - e.g. 'top_k'
|
255 | 255 | )
|
256 |
| -caching: bool = False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 |
257 |
| -caching_with_models: bool = False # # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 |
258 |
| -cache: Optional[ |
259 |
| - Cache |
260 |
| -] = None # cache object <- use this - https://docs.litellm.ai/docs/caching |
| 256 | +caching: bool = ( |
| 257 | + False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 |
| 258 | +) |
| 259 | +caching_with_models: bool = ( |
| 260 | + False # # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 |
| 261 | +) |
| 262 | +cache: Optional[Cache] = ( |
| 263 | + None # cache object <- use this - https://docs.litellm.ai/docs/caching |
| 264 | +) |
261 | 265 | default_in_memory_ttl: Optional[float] = None
|
262 | 266 | default_redis_ttl: Optional[float] = None
|
263 | 267 | default_redis_batch_cache_expiry: Optional[float] = None
|
264 | 268 | model_alias_map: Dict[str, str] = {}
|
265 | 269 | model_group_alias_map: Dict[str, str] = {}
|
266 | 270 | max_budget: float = 0.0 # set the max budget across all providers
|
267 |
| -budget_duration: Optional[ |
268 |
| - str |
269 |
| -] = None # proxy only - resets budget after fixed duration. You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d"). |
| 271 | +budget_duration: Optional[str] = ( |
| 272 | + None # proxy only - resets budget after fixed duration. You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d"). |
| 273 | +) |
270 | 274 | default_soft_budget: float = (
|
271 | 275 | DEFAULT_SOFT_BUDGET # by default all litellm proxy keys have a soft budget of 50.0
|
272 | 276 | )
|
|
275 | 279 |
|
276 | 280 | _current_cost = 0.0 # private variable, used if max budget is set
|
277 | 281 | error_logs: Dict = {}
|
278 |
| -add_function_to_prompt: bool = False # if function calling not supported by api, append function call details to system prompt |
| 282 | +add_function_to_prompt: bool = ( |
| 283 | + False # if function calling not supported by api, append function call details to system prompt |
| 284 | +) |
279 | 285 | client_session: Optional[httpx.Client] = None
|
280 | 286 | aclient_session: Optional[httpx.AsyncClient] = None
|
281 | 287 | model_fallbacks: Optional[List] = None # Deprecated for 'litellm.fallbacks'
|
282 |
| -model_cost_map_url: str = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json" |
| 288 | +model_cost_map_url: str = ( |
| 289 | + "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json" |
| 290 | +) |
283 | 291 | suppress_debug_info = False
|
284 | 292 | dynamodb_table_name: Optional[str] = None
|
285 | 293 | s3_callback_params: Optional[Dict] = None
|
|
306 | 314 |
|
307 | 315 |
|
308 | 316 | ######## Networking Settings ########
|
309 |
| -use_aiohttp_transport: bool = True # Older variable, aiohttp is now the default. use disable_aiohttp_transport instead. |
| 317 | +use_aiohttp_transport: bool = ( |
| 318 | + True # Older variable, aiohttp is now the default. use disable_aiohttp_transport instead. |
| 319 | +) |
310 | 320 | disable_aiohttp_transport: bool = False # Set this to true to use httpx instead
|
311 |
| -force_ipv4: bool = False # when True, litellm will force ipv4 for all LLM requests. Some users have seen httpx ConnectionError when using ipv6. |
| 321 | +force_ipv4: bool = ( |
| 322 | + False # when True, litellm will force ipv4 for all LLM requests. Some users have seen httpx ConnectionError when using ipv6. |
| 323 | +) |
312 | 324 | module_level_aclient = AsyncHTTPHandler(
|
313 | 325 | timeout=request_timeout, client_alias="module level aclient"
|
314 | 326 | )
|
|
322 | 334 | context_window_fallbacks: Optional[List] = None
|
323 | 335 | content_policy_fallbacks: Optional[List] = None
|
324 | 336 | allowed_fails: int = 3
|
325 |
| -num_retries_per_request: Optional[ |
326 |
| - int |
327 |
| -] = None # for the request overall (incl. fallbacks + model retries) |
| 337 | +num_retries_per_request: Optional[int] = ( |
| 338 | + None # for the request overall (incl. fallbacks + model retries) |
| 339 | +) |
328 | 340 | ####### SECRET MANAGERS #####################
|
329 |
| -secret_manager_client: Optional[ |
330 |
| - Any |
331 |
| -] = None # list of instantiated key management clients - e.g. azure kv, infisical, etc. |
| 341 | +secret_manager_client: Optional[Any] = ( |
| 342 | + None # list of instantiated key management clients - e.g. azure kv, infisical, etc. |
| 343 | +) |
332 | 344 | _google_kms_resource_name: Optional[str] = None
|
333 | 345 | _key_management_system: Optional[KeyManagementSystem] = None
|
334 | 346 | _key_management_settings: KeyManagementSettings = KeyManagementSettings()
|
@@ -456,6 +468,7 @@ def identify(event_details):
|
456 | 468 | nscale_models: List = []
|
457 | 469 | nebius_models: List = []
|
458 | 470 | nebius_embedding_models: List = []
|
| 471 | +deepgram_models: List = [] |
459 | 472 |
|
460 | 473 |
|
461 | 474 | def is_bedrock_pricing_only_model(key: str) -> bool:
|
@@ -627,6 +640,8 @@ def add_known_models():
|
627 | 640 | snowflake_models.append(key)
|
628 | 641 | elif value.get("litellm_provider") == "featherless_ai":
|
629 | 642 | featherless_ai_models.append(key)
|
| 643 | + elif value.get("litellm_provider") == "deepgram": |
| 644 | + deepgram_models.append(key) |
630 | 645 |
|
631 | 646 |
|
632 | 647 | add_known_models()
|
@@ -708,6 +723,7 @@ def add_known_models():
|
708 | 723 | + llama_models
|
709 | 724 | + featherless_ai_models
|
710 | 725 | + nscale_models
|
| 726 | + + deepgram_models |
711 | 727 | )
|
712 | 728 |
|
713 | 729 | model_list_set = set(model_list)
|
@@ -771,6 +787,7 @@ def add_known_models():
|
771 | 787 | "meta_llama": llama_models,
|
772 | 788 | "nscale": nscale_models,
|
773 | 789 | "featherless_ai": featherless_ai_models,
|
| 790 | + "deepgram": deepgram_models, |
774 | 791 | }
|
775 | 792 |
|
776 | 793 | # mapping for those models which have larger equivalents
|
@@ -1141,12 +1158,12 @@ def add_known_models():
|
1141 | 1158 | from .types.utils import GenericStreamingChunk
|
1142 | 1159 |
|
1143 | 1160 | custom_provider_map: List[CustomLLMItem] = []
|
1144 |
| -_custom_providers: List[ |
1145 |
| - str |
1146 |
| -] = [] # internal helper util, used to track names of custom providers |
1147 |
| -disable_hf_tokenizer_download: Optional[ |
1148 |
| - bool |
1149 |
| -] = None # disable huggingface tokenizer download. Defaults to openai clk100 |
| 1161 | +_custom_providers: List[str] = ( |
| 1162 | + [] |
| 1163 | +) # internal helper util, used to track names of custom providers |
| 1164 | +disable_hf_tokenizer_download: Optional[bool] = ( |
| 1165 | + None # disable huggingface tokenizer download. Defaults to openai clk100 |
| 1166 | +) |
1150 | 1167 | global_disable_no_log_param: bool = False
|
1151 | 1168 |
|
1152 | 1169 | ### PASSTHROUGH ###
|
|
0 commit comments