Skip to content

Commit 1b1daa7

Browse files
committed
Add GEMINI_PRO_1_5_PRO and GEMINI_PRO_1_5_FLASH and update geminie maven BOM to 26.39.0
Also fix few Ollama docs and code formatting issue.
1 parent 549c480 commit 1b1daa7

File tree

10 files changed

+43
-21
lines changed

10 files changed

+43
-21
lines changed

models/spring-ai-ollama/src/main/java/org/springframework/ai/ollama/OllamaChatClient.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ public OllamaChatClient withModel(String model) {
8787
/**
8888
* @deprecated Use {@link OllamaOptions} constructor instead.
8989
*/
90+
@Deprecated
9091
public OllamaChatClient withDefaultOptions(OllamaOptions options) {
9192
this.defaultOptions = options;
9293
return this;

models/spring-ai-ollama/src/main/java/org/springframework/ai/ollama/OllamaEmbeddingClient.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,11 @@ public OllamaEmbeddingClient(OllamaApi ollamaApi) {
6666
this.ollamaApi = ollamaApi;
6767
}
6868

69+
public OllamaEmbeddingClient(OllamaApi ollamaApi, OllamaOptions defaultOptions) {
70+
this.ollamaApi = ollamaApi;
71+
this.defaultOptions = defaultOptions;
72+
}
73+
6974
/**
7075
* @deprecated Use {@link OllamaOptions#setModel} instead.
7176
*/
@@ -75,6 +80,10 @@ public OllamaEmbeddingClient withModel(String model) {
7580
return this;
7681
}
7782

83+
/**
84+
* @deprecated Use {@link OllamaOptions} constructor instead.
85+
*/
86+
@Deprecated
7887
public OllamaEmbeddingClient withDefaultOptions(OllamaOptions options) {
7988
this.defaultOptions = options;
8089
return this;

models/spring-ai-ollama/src/test/java/org/springframework/ai/ollama/OllamaChatClientMultimodalIT.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@
2525
import org.junit.jupiter.api.Test;
2626
import org.testcontainers.junit.jupiter.Container;
2727
import org.testcontainers.junit.jupiter.Testcontainers;
28+
import org.testcontainers.ollama.OllamaContainer;
2829

29-
import org.springframework.ai.chat.ChatResponse;
3030
import org.springframework.ai.chat.messages.Media;
3131
import org.springframework.ai.chat.messages.UserMessage;
3232
import org.springframework.ai.chat.prompt.Prompt;
@@ -38,7 +38,6 @@
3838
import org.springframework.context.annotation.Bean;
3939
import org.springframework.core.io.ClassPathResource;
4040
import org.springframework.util.MimeTypeUtils;
41-
import org.testcontainers.ollama.OllamaContainer;
4241

4342
import static org.assertj.core.api.Assertions.assertThat;
4443

models/spring-ai-ollama/src/test/java/org/springframework/ai/ollama/OllamaChatRequestTests.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
*/
3131
public class OllamaChatRequestTests {
3232

33-
OllamaChatClient client = new OllamaChatClient(new OllamaApi()).withDefaultOptions(
33+
OllamaChatClient client = new OllamaChatClient(new OllamaApi(),
3434
new OllamaOptions().withModel("MODEL_NAME").withTopK(99).withTemperature(66.6f).withNumGPU(1));
3535

3636
@Test
@@ -105,8 +105,8 @@ public void createRequestWithPromptOptionsModelOverride() {
105105
@Test
106106
public void createRequestWithDefaultOptionsModelOverride() {
107107

108-
OllamaChatClient client2 = new OllamaChatClient(new OllamaApi())
109-
.withDefaultOptions(new OllamaOptions().withModel("DEFAULT_OPTIONS_MODEL"));
108+
OllamaChatClient client2 = new OllamaChatClient(new OllamaApi(),
109+
new OllamaOptions().withModel("DEFAULT_OPTIONS_MODEL"));
110110

111111
var request = client2.ollamaChatRequest(new Prompt("Test message content"), true);
112112

models/spring-ai-vertex-ai-gemini/src/main/java/org/springframework/ai/vertexai/gemini/VertexAiGeminiChatClient.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,11 @@ public enum ChatModel {
9999

100100
GEMINI_PRO_VISION("gemini-pro-vision"),
101101

102-
GEMINI_PRO("gemini-pro");
102+
GEMINI_PRO("gemini-pro"),
103+
104+
GEMINI_PRO_1_5_PRO("gemini-1.5-pro-preview-0514"),
105+
106+
GEMINI_PRO_1_5_FLASH("gemini-1.5-flash-preview-0514");
103107

104108
ChatModel(String value) {
105109
this.value = value;

models/spring-ai-vertex-ai-gemini/src/test/java/org/springframework/ai/vertexai/gemini/function/VertexAiGeminiChatClientFunctionCallingIT.java

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ public void afterEach() {
6868
}
6969

7070
@Test
71-
@Disabled("Google Vertex AI degraded support for parallel function calls")
71+
// @Disabled("Google Vertex AI degraded support for parallel function calls")
7272
public void functionCallExplicitOpenApiSchema() {
7373

7474
UserMessage userMessage = new UserMessage(
@@ -98,7 +98,8 @@ public void functionCallExplicitOpenApiSchema() {
9898
""";
9999

100100
var promptOptions = VertexAiGeminiChatOptions.builder()
101-
.withModel(VertexAiGeminiChatClient.ChatModel.GEMINI_PRO.getValue())
101+
// .withModel(VertexAiGeminiChatClient.ChatModel.GEMINI_PRO.getValue())
102+
.withModel(VertexAiGeminiChatClient.ChatModel.GEMINI_PRO_1_5_PRO.getValue())
102103
.withFunctionCallbacks(List.of(FunctionCallbackWrapper.builder(new MockWeatherService())
103104
.withName("get_current_weather")
104105
.withDescription("Get the current weather in a given location")
@@ -125,7 +126,8 @@ public void functionCallTestInferredOpenApiSchema() {
125126
List<Message> messages = new ArrayList<>(List.of(userMessage));
126127

127128
var promptOptions = VertexAiGeminiChatOptions.builder()
128-
.withModel(VertexAiGeminiChatClient.ChatModel.GEMINI_PRO.getValue())
129+
.withModel(VertexAiGeminiChatClient.ChatModel.GEMINI_PRO_1_5_PRO.getValue())
130+
// .withModel(VertexAiGeminiChatClient.ChatModel.GEMINI_PRO.getValue())
129131
.withFunctionCallbacks(List.of(
130132
FunctionCallbackWrapper.builder(new MockWeatherService())
131133
.withSchemaType(SchemaType.OPEN_API_SCHEMA)

pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@
129129
<jackson.version>2.16.1</jackson.version>
130130
<djl.version>0.26.0</djl.version>
131131
<onnxruntime.version>1.17.0</onnxruntime.version>
132-
<com.google.cloud.version>26.37.0</com.google.cloud.version>
132+
<com.google.cloud.version>26.39.0</com.google.cloud.version>
133133
<qdrant.version>1.9.1</qdrant.version>
134134
<spring-retry.version>2.0.5</spring-retry.version>
135135
<ibm.sdk.version>9.20.0</ibm.sdk.version>

spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/ollama-chat.adoc

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,8 @@ Next, create an `OllamaChatClient` instance and use it to text generations reque
249249
----
250250
var ollamaApi = new OllamaApi();
251251
252-
var chatClient = new OllamaChatClient(ollamaApi).withModel(MODEL)
253-
.withDefaultOptions(OllamaOptions.create()
252+
var chatClient = new OllamaChatClient(ollamaApi,
253+
OllamaOptions.create()
254254
.withModel(OllamaOptions.DEFAULT_MODEL)
255255
.withTemperature(0.9f));
256256
@@ -274,6 +274,8 @@ image::ollama-chat-completion-api.jpg[OllamaApi Chat Completion API Diagram, 800
274274

275275
Here is a simple snippet showing how to use the API programmatically:
276276

277+
NOTE: The `OllamaApi` is low level api and is not recommended for direct use. Use the `OllamaChatClient` instead.
278+
277279
[source,java]
278280
----
279281
OllamaApi ollamaApi =

spring-ai-spring-boot-autoconfigure/src/main/java/org/springframework/ai/autoconfigure/ollama/OllamaAutoConfiguration.java

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,9 +57,7 @@ public OllamaApi ollamaApi(OllamaConnectionDetails connectionDetails, RestClient
5757
@ConditionalOnProperty(prefix = OllamaChatProperties.CONFIG_PREFIX, name = "enabled", havingValue = "true",
5858
matchIfMissing = true)
5959
public OllamaChatClient ollamaChatClient(OllamaApi ollamaApi, OllamaChatProperties properties) {
60-
61-
return new OllamaChatClient(ollamaApi).withModel(properties.getModel())
62-
.withDefaultOptions(properties.getOptions());
60+
return new OllamaChatClient(ollamaApi, properties.getOptions());
6361
}
6462

6563
@Bean
@@ -68,8 +66,7 @@ public OllamaChatClient ollamaChatClient(OllamaApi ollamaApi, OllamaChatProperti
6866
matchIfMissing = true)
6967
public OllamaEmbeddingClient ollamaEmbeddingClient(OllamaApi ollamaApi, OllamaEmbeddingProperties properties) {
7068

71-
return new OllamaEmbeddingClient(ollamaApi).withModel(properties.getModel())
72-
.withDefaultOptions(properties.getOptions());
69+
return new OllamaEmbeddingClient(ollamaApi, properties.getOptions());
7370
}
7471

7572
private static class PropertiesOllamaConnectionDetails implements OllamaConnectionDetails {

spring-ai-spring-boot-autoconfigure/src/test/java/org/springframework/ai/autoconfigure/vertexai/gemini/tool/FunctionCallWithFunctionBeanIT.java

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,10 @@ class FunctionCallWithFunctionBeanIT {
5454
@Test
5555
void functionCallTest() {
5656

57-
contextRunner
58-
.withPropertyValues("spring.ai.vertex.ai.gemini.chat.options.model="
59-
+ VertexAiGeminiChatClient.ChatModel.GEMINI_PRO.getValue())
57+
contextRunner.withPropertyValues("spring.ai.vertex.ai.gemini.chat.options.model="
58+
// + VertexAiGeminiChatClient.ChatModel.GEMINI_PRO.getValue())
59+
+ VertexAiGeminiChatClient.ChatModel.GEMINI_PRO_1_5_PRO.getValue())
60+
// + VertexAiGeminiChatClient.ChatModel.GEMINI_PRO_1_5_FLASH.getValue())
6061
.run(context -> {
6162

6263
VertexAiGeminiChatClient chatClient = context.getBean(VertexAiGeminiChatClient.class);
@@ -67,15 +68,22 @@ void functionCallTest() {
6768
If the information was not fetched call the function again. Repeat at most 3 times.
6869
""");
6970
var userMessage = new UserMessage(
70-
"What's the weather like in San Francisco, Paris and in Tokyo (Japan)?");
71+
// "What's the weather like in San Francisco, Paris and in Tokyo?
72+
// Please let me know how many function calls you've preformed.");
73+
"What's the weather like in San Francisco, Paris and in Tokyo?");
7174

7275
ChatResponse response = chatClient.call(new Prompt(List.of(systemMessage, userMessage),
7376
VertexAiGeminiChatOptions.builder().withFunction("weatherFunction").build()));
77+
// ChatResponse response = chatClient.call(new
78+
// Prompt(List.of(userMessage),
79+
// VertexAiGeminiChatOptions.builder().withFunction("weatherFunction").build()));
7480

7581
logger.info("Response: {}", response);
7682

7783
assertThat(response.getResult().getOutput().getContent()).contains("30", "10", "15");
7884

85+
Thread.sleep(10000);
86+
7987
response = chatClient.call(new Prompt(List.of(systemMessage, userMessage),
8088
VertexAiGeminiChatOptions.builder().withFunction("weatherFunction3").build()));
8189

0 commit comments

Comments
 (0)