@@ -13,6 +13,7 @@ import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions.j
13
13
import type { InferenceProviderMappingEntry , InferenceProviderOrPolicy , InferenceTask , RequestArgs } from "../types.js" ;
14
14
import { templates } from "./templates.exported.js" ;
15
15
import { getLogger } from "../lib/logger.js" ;
16
+ import { HF_ROUTER_AUTO_ENDPOINT } from "../config.js" ;
16
17
17
18
export type InferenceSnippetOptions = {
18
19
streaming ?: boolean ;
@@ -37,7 +38,7 @@ const CLIENTS: Record<InferenceSnippetLanguage, Client[]> = {
37
38
38
39
const CLIENTS_AUTO_POLICY : Partial < Record < InferenceSnippetLanguage , Client [ ] > > = {
39
40
js : [ "huggingface.js" ] ,
40
- python : [ "huggingface_hub" ] ,
41
+ python : [ "huggingface_hub" , "openai" ] ,
41
42
} ;
42
43
43
44
type InputPreparationFn = ( model : ModelDataMinimal , opts ?: Record < string , unknown > ) => object ;
@@ -179,7 +180,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar
179
180
{
180
181
accessToken : accessTokenOrPlaceholder ,
181
182
provider,
182
- endpointUrl : opts ?. endpointUrl ,
183
+ endpointUrl : opts ?. endpointUrl ?? ( provider === "auto" ? HF_ROUTER_AUTO_ENDPOINT : undefined ) ,
183
184
...inputs ,
184
185
} as RequestArgs ,
185
186
inferenceProviderMapping ,
0 commit comments