diff --git a/packages/inference/README.md b/packages/inference/README.md index 0ea60b2be7..bd9c358451 100644 --- a/packages/inference/README.md +++ b/packages/inference/README.md @@ -47,6 +47,7 @@ Your access token should be kept private. If you need to protect it in front-end You can send inference requests to third-party providers with the inference client. Currently, we support the following providers: +- [BagelNet](https://bagel.net) - [Fal.ai](https://fal.ai) - [Featherless AI](https://featherless.ai) - [Fireworks AI](https://fireworks.ai) @@ -82,6 +83,7 @@ When authenticated with a Hugging Face access token, the request is routed throu When authenticated with a third-party provider key, the request is made directly against that provider's inference API. Only a subset of models are supported when requesting third-party providers. You can check the list of supported models per pipeline tasks here: +- [BagelNet supported models](https://huggingface.co/api/partners/bagelnet/models) - [Fal.ai supported models](https://huggingface.co/api/partners/fal-ai/models) - [Featherless AI supported models](https://huggingface.co/api/partners/featherless-ai/models) - [Fireworks AI supported models](https://huggingface.co/api/partners/fireworks-ai/models) diff --git a/packages/inference/src/lib/getProviderHelper.ts b/packages/inference/src/lib/getProviderHelper.ts index 147fe08ec5..dd6429bc17 100644 --- a/packages/inference/src/lib/getProviderHelper.ts +++ b/packages/inference/src/lib/getProviderHelper.ts @@ -1,3 +1,4 @@ +import * as BagelNet from "../providers/bagelnet.js"; import * as BlackForestLabs from "../providers/black-forest-labs.js"; import * as Cerebras from "../providers/cerebras.js"; import * as Cohere from "../providers/cohere.js"; @@ -52,6 +53,9 @@ import type { InferenceProvider, InferenceProviderOrPolicy, InferenceTask } from import { InferenceClientInputError } from "../errors.js"; export const PROVIDERS: Record>> = { + bagelnet: { + conversational: new BagelNet.BagelNetConversational(), + }, "black-forest-labs": { "text-to-image": new BlackForestLabs.BlackForestLabsTextToImageTask(), }, diff --git a/packages/inference/src/providers/bagelnet.ts b/packages/inference/src/providers/bagelnet.ts new file mode 100644 index 0000000000..b54dd74983 --- /dev/null +++ b/packages/inference/src/providers/bagelnet.ts @@ -0,0 +1,25 @@ +import { BaseConversationalTask } from "./providerHelper.js"; + +export class BagelNetConversational extends BaseConversationalTask { + constructor() { + super("bagelnet", "https://api.bagel.net", false); + } + + override makeRoute(): string { + return "/v1/chat/completions"; + } + + override preparePayload(params: any) { + return { + model: params.model, + messages: params.messages, + max_tokens: params.max_tokens, + temperature: params.temperature, + stream: params.stream ?? false, + }; + } + + override getResponse(r: any) { + return r; + } +} \ No newline at end of file diff --git a/packages/inference/src/providers/consts.ts b/packages/inference/src/providers/consts.ts index 995161970a..1fb7ebebd8 100644 --- a/packages/inference/src/providers/consts.ts +++ b/packages/inference/src/providers/consts.ts @@ -18,6 +18,7 @@ export const HARDCODED_MODEL_INFERENCE_MAPPING: Record< * Example: * "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct", */ + bagelnet: {}, "black-forest-labs": {}, cerebras: {}, cohere: {}, diff --git a/packages/inference/src/types.ts b/packages/inference/src/types.ts index 5d6be233d8..6daf0d39cb 100644 --- a/packages/inference/src/types.ts +++ b/packages/inference/src/types.ts @@ -45,6 +45,7 @@ export interface Options { export type InferenceTask = Exclude | "conversational"; export const INFERENCE_PROVIDERS = [ + "bagelnet", "black-forest-labs", "cerebras", "cohere",