Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/types/src/global-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,7 @@ export const SECRET_STATE_KEYS = [
"doubaoApiKey",
"moonshotApiKey",
"mistralApiKey",
"minimaxApiKey",
"unboundApiKey",
"requestyApiKey",
"xaiApiKey",
Expand Down
17 changes: 17 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import {
vscodeLlmModels,
xaiModels,
internationalZAiModels,
minimaxModels,
} from "./providers/index.js"

/**
Expand Down Expand Up @@ -131,6 +132,7 @@ export const providerNames = [
"groq",
"mistral",
"moonshot",
"minimax",
"openai-native",
"qwen-code",
"roo",
Expand Down Expand Up @@ -327,6 +329,13 @@ const moonshotSchema = apiModelIdProviderModelSchema.extend({
moonshotApiKey: z.string().optional(),
})

const minimaxSchema = apiModelIdProviderModelSchema.extend({
minimaxBaseUrl: z
.union([z.literal("https://api.minimax.io/v1"), z.literal("https://api.minimaxi.com/v1")])
.optional(),
minimaxApiKey: z.string().optional(),
})

const unboundSchema = baseProviderSettingsSchema.extend({
unboundApiKey: z.string().optional(),
unboundModelId: z.string().optional(),
Expand Down Expand Up @@ -435,6 +444,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
deepInfraSchema.merge(z.object({ apiProvider: z.literal("deepinfra") })),
doubaoSchema.merge(z.object({ apiProvider: z.literal("doubao") })),
moonshotSchema.merge(z.object({ apiProvider: z.literal("moonshot") })),
minimaxSchema.merge(z.object({ apiProvider: z.literal("minimax") })),
unboundSchema.merge(z.object({ apiProvider: z.literal("unbound") })),
requestySchema.merge(z.object({ apiProvider: z.literal("requesty") })),
humanRelaySchema.merge(z.object({ apiProvider: z.literal("human-relay") })),
Expand Down Expand Up @@ -476,6 +486,7 @@ export const providerSettingsSchema = z.object({
...deepInfraSchema.shape,
...doubaoSchema.shape,
...moonshotSchema.shape,
...minimaxSchema.shape,
...unboundSchema.shape,
...requestySchema.shape,
...humanRelaySchema.shape,
Expand Down Expand Up @@ -560,6 +571,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
"gemini-cli": "apiModelId",
mistral: "apiModelId",
moonshot: "apiModelId",
minimax: "apiModelId",
deepseek: "apiModelId",
deepinfra: "deepInfraModelId",
doubao: "apiModelId",
Expand Down Expand Up @@ -671,6 +683,11 @@ export const MODELS_BY_PROVIDER: Record<
label: "Moonshot",
models: Object.keys(moonshotModels),
},
minimax: {
id: "minimax",
label: "MiniMax",
models: Object.keys(minimaxModels),
},
"openai-native": {
id: "openai-native",
label: "OpenAI",
Expand Down
1 change: 1 addition & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,4 @@ export * from "./xai.js"
export * from "./vercel-ai-gateway.js"
export * from "./zai.js"
export * from "./deepinfra.js"
export * from "./minimax.js"
22 changes: 22 additions & 0 deletions packages/types/src/providers/minimax.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import type { ModelInfo } from "../model.js"

// Minimax
// https://www.minimax.io/platform/document/text_api_intro
// https://www.minimax.io/platform/document/pricing
export type MinimaxModelId = keyof typeof minimaxModels
export const minimaxDefaultModelId: MinimaxModelId = "MiniMax-M2"

export const minimaxModels = {
"MiniMax-M2": {
maxTokens: 128_000,
contextWindow: 192_000,
supportsImages: false,
supportsPromptCache: false,
inputPrice: 0.3,
outputPrice: 1.2,
cacheWritesPrice: 0,
cacheReadsPrice: 0,
},
} as const satisfies Record<string, ModelInfo>

export const MINIMAX_DEFAULT_TEMPERATURE = 1.0
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import {
FeatherlessHandler,
VercelAiGatewayHandler,
DeepInfraHandler,
MiniMaxHandler,
} from "./providers"
import { NativeOllamaHandler } from "./providers/native-ollama"

Expand Down Expand Up @@ -165,6 +166,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new FeatherlessHandler(options)
case "vercel-ai-gateway":
return new VercelAiGatewayHandler(options)
case "minimax":
return new MiniMaxHandler(options)
default:
apiProvider satisfies "gemini-cli" | undefined
return new AnthropicHandler(options)
Expand Down
277 changes: 277 additions & 0 deletions src/api/providers/__tests__/minimax.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,277 @@
// npx vitest run src/api/providers/__tests__/minimax.spec.ts

vitest.mock("vscode", () => ({
workspace: {
getConfiguration: vitest.fn().mockReturnValue({
get: vitest.fn().mockReturnValue(600), // Default timeout in seconds
}),
},
}))

import OpenAI from "openai"
import { Anthropic } from "@anthropic-ai/sdk"

import { type MinimaxModelId, minimaxDefaultModelId, minimaxModels } from "@roo-code/types"

import { MiniMaxHandler } from "../minimax"

vitest.mock("openai", () => {
const createMock = vitest.fn()
return {
default: vitest.fn(() => ({ chat: { completions: { create: createMock } } })),
}
})

describe("MiniMaxHandler", () => {
let handler: MiniMaxHandler
let mockCreate: any

beforeEach(() => {
vitest.clearAllMocks()
mockCreate = (OpenAI as unknown as any)().chat.completions.create
})

describe("International MiniMax (default)", () => {
beforeEach(() => {
handler = new MiniMaxHandler({
minimaxApiKey: "test-minimax-api-key",
minimaxBaseUrl: "https://api.minimax.io/v1",
})
})

it("should use the correct international MiniMax base URL by default", () => {
new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" })
expect(OpenAI).toHaveBeenCalledWith(
expect.objectContaining({
baseURL: "https://api.minimax.io/v1",
}),
)
})

it("should use the provided API key", () => {
const minimaxApiKey = "test-minimax-api-key"
new MiniMaxHandler({ minimaxApiKey })
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey }))
})

it("should return default model when no model is specified", () => {
const model = handler.getModel()
expect(model.id).toBe(minimaxDefaultModelId)
expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId])
})

it("should return specified model when valid model is provided", () => {
const testModelId: MinimaxModelId = "MiniMax-M2"
const handlerWithModel = new MiniMaxHandler({
apiModelId: testModelId,
minimaxApiKey: "test-minimax-api-key",
})
const model = handlerWithModel.getModel()
expect(model.id).toBe(testModelId)
expect(model.info).toEqual(minimaxModels[testModelId])
})

it("should return MiniMax-M2 model with correct configuration", () => {
const testModelId: MinimaxModelId = "MiniMax-M2"
const handlerWithModel = new MiniMaxHandler({
apiModelId: testModelId,
minimaxApiKey: "test-minimax-api-key",
})
const model = handlerWithModel.getModel()
expect(model.id).toBe(testModelId)
expect(model.info).toEqual(minimaxModels[testModelId])
expect(model.info.contextWindow).toBe(192_000)
expect(model.info.maxTokens).toBe(128_000)
expect(model.info.supportsPromptCache).toBe(false)
})
})

describe("China MiniMax", () => {
beforeEach(() => {
handler = new MiniMaxHandler({
minimaxApiKey: "test-minimax-api-key",
minimaxBaseUrl: "https://api.minimaxi.com/v1",
})
})

it("should use the correct China MiniMax base URL", () => {
new MiniMaxHandler({
minimaxApiKey: "test-minimax-api-key",
minimaxBaseUrl: "https://api.minimaxi.com/v1",
})
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ baseURL: "https://api.minimaxi.com/v1" }))
})

it("should use the provided API key for China", () => {
const minimaxApiKey = "test-minimax-api-key"
new MiniMaxHandler({ minimaxApiKey, minimaxBaseUrl: "https://api.minimaxi.com/v1" })
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey }))
})

it("should return default model when no model is specified", () => {
const model = handler.getModel()
expect(model.id).toBe(minimaxDefaultModelId)
expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId])
})
})

describe("Default behavior", () => {
it("should default to international base URL when none is specified", () => {
const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" })
expect(OpenAI).toHaveBeenCalledWith(
expect.objectContaining({
baseURL: "https://api.minimax.io/v1",
}),
)

const model = handlerDefault.getModel()
expect(model.id).toBe(minimaxDefaultModelId)
expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId])
})

it("should default to MiniMax-M2 model", () => {
const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" })
const model = handlerDefault.getModel()
expect(model.id).toBe("MiniMax-M2")
})
})

describe("API Methods", () => {
beforeEach(() => {
handler = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" })
})

it("completePrompt method should return text from MiniMax API", async () => {
const expectedResponse = "This is a test response from MiniMax"
mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] })
const result = await handler.completePrompt("test prompt")
expect(result).toBe(expectedResponse)
})

it("should handle errors in completePrompt", async () => {
const errorMessage = "MiniMax API error"
mockCreate.mockRejectedValueOnce(new Error(errorMessage))
await expect(handler.completePrompt("test prompt")).rejects.toThrow()
})

it("createMessage should yield text content from stream", async () => {
const testContent = "This is test content from MiniMax stream"

mockCreate.mockImplementationOnce(() => {
return {
[Symbol.asyncIterator]: () => ({
next: vitest
.fn()
.mockResolvedValueOnce({
done: false,
value: { choices: [{ delta: { content: testContent } }] },
})
.mockResolvedValueOnce({ done: true }),
}),
}
})

const stream = handler.createMessage("system prompt", [])
const firstChunk = await stream.next()

expect(firstChunk.done).toBe(false)
expect(firstChunk.value).toEqual({ type: "text", text: testContent })
})

it("createMessage should yield usage data from stream", async () => {
mockCreate.mockImplementationOnce(() => {
return {
[Symbol.asyncIterator]: () => ({
next: vitest
.fn()
.mockResolvedValueOnce({
done: false,
value: {
choices: [{ delta: {} }],
usage: { prompt_tokens: 10, completion_tokens: 20 },
},
})
.mockResolvedValueOnce({ done: true }),
}),
}
})

const stream = handler.createMessage("system prompt", [])
const firstChunk = await stream.next()

expect(firstChunk.done).toBe(false)
expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20 })
})

it("createMessage should pass correct parameters to MiniMax client", async () => {
const modelId: MinimaxModelId = "MiniMax-M2"
const modelInfo = minimaxModels[modelId]
const handlerWithModel = new MiniMaxHandler({
apiModelId: modelId,
minimaxApiKey: "test-minimax-api-key",
})

mockCreate.mockImplementationOnce(() => {
return {
[Symbol.asyncIterator]: () => ({
async next() {
return { done: true }
},
}),
}
})

const systemPrompt = "Test system prompt for MiniMax"
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for MiniMax" }]

const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages)
await messageGenerator.next()

expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: modelId,
max_tokens: modelInfo.maxTokens,
temperature: 1,
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
stream: true,
stream_options: { include_usage: true },
}),
undefined,
)
})

it("should use temperature 1 by default", async () => {
mockCreate.mockImplementationOnce(() => {
return {
[Symbol.asyncIterator]: () => ({
async next() {
return { done: true }
},
}),
}
})

const messageGenerator = handler.createMessage("test", [])
await messageGenerator.next()

expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
temperature: 1,
}),
undefined,
)
})
})

describe("Model Configuration", () => {
it("should correctly configure MiniMax-M2 model properties", () => {
const model = minimaxModels["MiniMax-M2"]
expect(model.maxTokens).toBe(128_000)
expect(model.contextWindow).toBe(192_000)
expect(model.supportsImages).toBe(false)
expect(model.supportsPromptCache).toBe(false)
expect(model.inputPrice).toBe(0.3)
expect(model.outputPrice).toBe(1.2)
})
})
})
Loading