Skip to content

Commit 2b5ecde

Browse files
committed
Updated
1 parent ed2b92e commit 2b5ecde

File tree

5 files changed

+180
-237
lines changed

5 files changed

+180
-237
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ The options are as follows:
196196
| `llm.WithTemperature(float64)` | Yes | Yes | Yes | - | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. |
197197
| `llm.WithTopP(float64)` | Yes | Yes | Yes | - | Nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. |
198198
| `llm.WithTopK(uint64)` | Yes | Yes | No | - | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. |
199-
| `llm.WithMaxTokens(uint64)` | - | Yes | Yes | - | The maximum number of tokens to generate in the response. |
199+
| `llm.WithMaxTokens(uint64)` | No | Yes | Yes | - | The maximum number of tokens to generate in the response. |
200200
| `llm.WithStream(func(llm.Completion))` | Can be enabled when tools are not used | Yes | Yes | - | Stream the response to a function. |
201201
| `llm.WithToolChoice(string, string, ...)` | No | Yes | Use `auto`, `any`, `none`, `required` or a function name. Only the first argument is used. | - | The tool to use for the model. |
202202
| `llm.WithToolKit(llm.ToolKit)` | Cannot be combined with streaming | Yes | Yes | - | The set of tools to use. |

pkg/ollama/chat_test.go

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,16 @@ package ollama_test
22

33
import (
44
"context"
5+
"fmt"
6+
"os"
7+
"strings"
58
"testing"
69

710
// Packages
811

912
llm "github.com/mutablelogic/go-llm"
1013
ollama "github.com/mutablelogic/go-llm/pkg/ollama"
14+
tool "github.com/mutablelogic/go-llm/pkg/tool"
1115
assert "github.com/stretchr/testify/assert"
1216
)
1317

@@ -65,4 +69,121 @@ func Test_chat_001(t *testing.T) {
6569
}
6670
t.Log(response)
6771
})
72+
73+
t.Run("System", func(t *testing.T) {
74+
assert := assert.New(t)
75+
response, err := client.Chat(context.TODO(), model.UserPrompt("why is the sky blue?"), llm.WithSystemPrompt("reply as if you are shakespeare"))
76+
if !assert.NoError(err) {
77+
t.FailNow()
78+
}
79+
t.Log(response)
80+
})
81+
82+
t.Run("Seed", func(t *testing.T) {
83+
assert := assert.New(t)
84+
response, err := client.Chat(context.TODO(), model.UserPrompt("why is the sky blue?"), llm.WithSeed(1234))
85+
if !assert.NoError(err) {
86+
t.FailNow()
87+
}
88+
t.Log(response)
89+
})
90+
91+
t.Run("Format", func(t *testing.T) {
92+
assert := assert.New(t)
93+
response, err := client.Chat(context.TODO(), model.UserPrompt("why is the sky blue? Reply in JSON format"), llm.WithFormat("json"))
94+
if !assert.NoError(err) {
95+
t.FailNow()
96+
}
97+
t.Log(response)
98+
})
99+
100+
t.Run("PresencePenalty", func(t *testing.T) {
101+
assert := assert.New(t)
102+
response, err := client.Chat(context.TODO(), model.UserPrompt("why is the sky blue?t"), llm.WithPresencePenalty(-1.0))
103+
if !assert.NoError(err) {
104+
t.FailNow()
105+
}
106+
t.Log(response)
107+
})
108+
109+
t.Run("FrequencyPenalty", func(t *testing.T) {
110+
assert := assert.New(t)
111+
response, err := client.Chat(context.TODO(), model.UserPrompt("why is the sky blue?t"), llm.WithFrequencyPenalty(1.0))
112+
if !assert.NoError(err) {
113+
t.FailNow()
114+
}
115+
t.Log(response)
116+
})
117+
}
118+
119+
func Test_chat_002(t *testing.T) {
120+
assert := assert.New(t)
121+
model, err := client.PullModel(context.TODO(), "llava:7b")
122+
if !assert.NoError(err) {
123+
t.FailNow()
124+
}
125+
assert.NotNil(model)
126+
127+
f, err := os.Open("testdata/guggenheim.jpg")
128+
if !assert.NoError(err) {
129+
t.FailNow()
130+
}
131+
defer f.Close()
132+
133+
// Describe an image
134+
r, err := client.Chat(context.TODO(), model.UserPrompt("Provide a short caption for this image", llm.WithAttachment(f)))
135+
if assert.NoError(err) {
136+
assert.Equal("assistant", r.Role())
137+
assert.Equal(1, r.Num())
138+
assert.NotEmpty(r.Text(0))
139+
t.Log(r.Text(0))
140+
}
141+
}
142+
143+
func Test_chat_003(t *testing.T) {
144+
assert := assert.New(t)
145+
model, err := client.PullModel(context.TODO(), "llama3.2")
146+
if !assert.NoError(err) {
147+
t.FailNow()
148+
}
149+
assert.NotNil(model)
150+
151+
toolkit := tool.NewToolKit()
152+
toolkit.Register(&weather{})
153+
154+
// Get the weather for a city
155+
r, err := client.Chat(context.TODO(), model.UserPrompt("What is the weather in the capital city of germany?"), llm.WithToolKit(toolkit))
156+
if assert.NoError(err) {
157+
assert.Equal("assistant", r.Role())
158+
assert.Equal(1, r.Num())
159+
160+
calls := r.ToolCalls(0)
161+
assert.NotEmpty(calls)
162+
163+
var w weather
164+
assert.NoError(calls[0].Decode(&w))
165+
assert.Equal("berlin", strings.ToLower(w.City))
166+
}
167+
}
168+
169+
type weather struct {
170+
City string `json:"city" help:"The city to get the weather for"`
171+
}
172+
173+
func (weather) Name() string {
174+
return "weather_in_city"
175+
}
176+
177+
func (weather) Description() string {
178+
return "Get the weather for a city"
179+
}
180+
181+
func (w weather) Run(ctx context.Context) (any, error) {
182+
var result struct {
183+
City string `json:"city"`
184+
Weather string `json:"weather"`
185+
}
186+
result.City = w.City
187+
result.Weather = fmt.Sprintf("The weather in %q is sunny and warm", w.City)
188+
return result, nil
68189
}

pkg/ollama/chat_test.go_old

Lines changed: 0 additions & 146 deletions
This file was deleted.

pkg/ollama/session_test.go

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
package ollama_test
2+
3+
import (
4+
"context"
5+
"testing"
6+
7+
// Packages
8+
llm "github.com/mutablelogic/go-llm"
9+
tool "github.com/mutablelogic/go-llm/pkg/tool"
10+
assert "github.com/stretchr/testify/assert"
11+
)
12+
13+
func Test_session_001(t *testing.T) {
14+
assert := assert.New(t)
15+
model, err := client.PullModel(context.TODO(), "llama3.2")
16+
if !assert.NoError(err) {
17+
t.FailNow()
18+
}
19+
assert.NotNil(model)
20+
21+
session := model.Context()
22+
if assert.NotNil(session) {
23+
err := session.FromUser(context.TODO(), "Hello, how are you?")
24+
assert.NoError(err)
25+
t.Log(session)
26+
}
27+
}
28+
29+
func Test_session_002(t *testing.T) {
30+
assert := assert.New(t)
31+
model, err := client.PullModel(context.TODO(), "llama3.2")
32+
if !assert.NoError(err) {
33+
t.FailNow()
34+
}
35+
assert.NotNil(model)
36+
37+
toolkit := tool.NewToolKit()
38+
toolkit.Register(&weather{})
39+
40+
session := model.Context(llm.WithToolKit(toolkit))
41+
if !assert.NotNil(session) {
42+
t.FailNow()
43+
}
44+
45+
assert.NoError(session.FromUser(context.TODO(), "What is the weather like in London today?"))
46+
calls := session.ToolCalls(0)
47+
if assert.Len(calls, 1) {
48+
assert.Equal("weather_in_city", calls[0].Name())
49+
50+
result, err := toolkit.Run(context.TODO(), calls...)
51+
assert.NoError(err)
52+
assert.Len(result, 1)
53+
54+
assert.NoError(session.FromTool(context.TODO(), result...))
55+
}
56+
57+
t.Log(session)
58+
}

0 commit comments

Comments
 (0)