From 9a151629483a6dc7ab2d1ab1fe3fec3b171004ab Mon Sep 17 00:00:00 2001 From: David Frizelle Date: Tue, 15 Apr 2025 22:30:30 +1000 Subject: [PATCH] Add GPT-4.1 models to OpenAI ChatModel enum Signed-off-by: David Frizelle --- .../ai/openai/api/OpenAiApi.java | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/models/spring-ai-openai/src/main/java/org/springframework/ai/openai/api/OpenAiApi.java b/models/spring-ai-openai/src/main/java/org/springframework/ai/openai/api/OpenAiApi.java index 433f59d9697..bf376cfdd7c 100644 --- a/models/spring-ai-openai/src/main/java/org/springframework/ai/openai/api/OpenAiApi.java +++ b/models/spring-ai-openai/src/main/java/org/springframework/ai/openai/api/OpenAiApi.java @@ -347,6 +347,34 @@ public enum ChatModel implements ChatModelDescription { */ O3_MINI("o3-mini"), + /** + * GPT-4.1 is our flagship model for complex tasks. It is well suited for + * problem solving across domains. + *

+ * Context window: 1,047,576 tokens. Max output tokens: 32,768 tokens. The + * knowledge cutoff for GPT-4.1 models is June, 2024. + *

+ */ + GPT_4_1("gpt-4.1"), + + /** + * GPT-4.1-mini is balanced for intelligence, speed, and cost. + *

+ * Context window: 1,047,576 tokens. Max output tokens: 32,768 tokens. The + * knowledge cutoff for GPT-4.1 models is June, 2024. + *

+ */ + GPT_4_1_MINI("gpt-4.1-mini"), + + /** + * GPT-4.1-nano is our fastest, most cost-effective GPT-4.1 model. + *

+ * Context window: 1,047,576 tokens. Max output tokens: 32,768 tokens. The + * knowledge cutoff for GPT-4.1 models is June, 2024. + *

+ */ + GPT_4_1_NANO("gpt-4.1-nano"), + /** * GPT-4o ("omni") is our versatile, high-intelligence flagship model. It * accepts both text and image inputs and produces text outputs (including