1
1
import OpenAI , { type ClientOptions } from "openai" ;
2
2
import { zodResponseFormat } from "openai/helpers/zod" ;
3
- import type { LLMCache } from "../../lib/cache/LLMCache" ;
4
- import { validateZodSchema } from "../../lib/utils" ;
5
- import {
6
- type ChatCompletionOptions ,
7
- type ChatMessage ,
8
- LLMClient ,
9
- } from "../../lib/llm/LLMClient" ;
10
- import type { LogLine } from "../../types/log" ;
11
- import type { AvailableModel } from "../../types/model" ;
12
3
import type {
13
4
ChatCompletion ,
14
5
ChatCompletionAssistantMessageParam ,
@@ -19,23 +10,28 @@ import type {
19
10
ChatCompletionSystemMessageParam ,
20
11
ChatCompletionUserMessageParam ,
21
12
} from "openai/resources/chat" ;
13
+ import type { LLMCache } from "../../lib/cache/LLMCache" ;
14
+ import {
15
+ type ChatMessage ,
16
+ CreateChatCompletionOptions ,
17
+ LLMClient ,
18
+ } from "../../lib/llm/LLMClient" ;
19
+ import { validateZodSchema } from "../../lib/utils" ;
20
+ import type { AvailableModel } from "../../types/model" ;
22
21
23
22
export class OllamaClient extends LLMClient {
24
23
public type = "ollama" as const ;
25
24
private client : OpenAI ;
26
25
private cache : LLMCache | undefined ;
27
- public logger : ( message : LogLine ) => void ;
28
26
private enableCaching : boolean ;
29
27
public clientOptions : ClientOptions ;
30
28
31
29
constructor ( {
32
- logger,
33
30
enableCaching = false ,
34
31
cache = undefined ,
35
32
modelName = "llama3.2" ,
36
33
clientOptions,
37
34
} : {
38
- logger ?: ( message : LogLine ) => void ;
39
35
enableCaching ?: boolean ;
40
36
cache ?: LLMCache ;
41
37
modelName ?: string ;
@@ -47,16 +43,16 @@ export class OllamaClient extends LLMClient {
47
43
baseURL : clientOptions ?. baseURL || "http://localhost:11434/v1" ,
48
44
apiKey : "ollama" ,
49
45
} ) ;
50
- this . logger = logger ;
51
46
this . cache = cache ;
52
47
this . enableCaching = enableCaching ;
53
48
this . modelName = modelName as AvailableModel ;
54
49
}
55
50
56
- async createChatCompletion < T = ChatCompletion > (
57
- options : ChatCompletionOptions ,
51
+ async createChatCompletion < T = ChatCompletion > ( {
52
+ options,
58
53
retries = 3 ,
59
- ) : Promise < T > {
54
+ logger,
55
+ } : CreateChatCompletionOptions ) : Promise < T > {
60
56
const { image, requestId, ...optionsWithoutImageAndRequestId } = options ;
61
57
62
58
// TODO: Implement vision support
@@ -66,7 +62,7 @@ export class OllamaClient extends LLMClient {
66
62
) ;
67
63
}
68
64
69
- this . logger ( {
65
+ logger ( {
70
66
category : "ollama" ,
71
67
message : "creating chat completion" ,
72
68
level : 1 ,
@@ -122,7 +118,7 @@ export class OllamaClient extends LLMClient {
122
118
) ;
123
119
124
120
if ( cachedResponse ) {
125
- this . logger ( {
121
+ logger ( {
126
122
category : "llm_cache" ,
127
123
message : "LLM cache hit - returning cached response" ,
128
124
level : 1 ,
@@ -140,7 +136,7 @@ export class OllamaClient extends LLMClient {
140
136
return cachedResponse ;
141
137
}
142
138
143
- this . logger ( {
139
+ logger ( {
144
140
category : "llm_cache" ,
145
141
message : "LLM cache miss - no cached response found" ,
146
142
level : 1 ,
@@ -168,7 +164,7 @@ export class OllamaClient extends LLMClient {
168
164
model : this . modelName ,
169
165
} ;
170
166
171
- this . logger ( {
167
+ logger ( {
172
168
category : "ollama" ,
173
169
message : "creating chat completion" ,
174
170
level : 1 ,
@@ -257,7 +253,7 @@ export class OllamaClient extends LLMClient {
257
253
258
254
const response = await this . client . chat . completions . create ( body ) ;
259
255
260
- this . logger ( {
256
+ logger ( {
261
257
category : "ollama" ,
262
258
message : "response" ,
263
259
level : 1 ,
@@ -279,7 +275,11 @@ export class OllamaClient extends LLMClient {
279
275
280
276
if ( ! validateZodSchema ( options . response_model . schema , parsedData ) ) {
281
277
if ( retries > 0 ) {
282
- return this . createChatCompletion ( options , retries - 1 ) ;
278
+ return this . createChatCompletion ( {
279
+ options,
280
+ logger,
281
+ retries : retries - 1 ,
282
+ } ) ;
283
283
}
284
284
285
285
throw new Error ( "Invalid response schema" ) ;
@@ -299,7 +299,7 @@ export class OllamaClient extends LLMClient {
299
299
}
300
300
301
301
if ( this . enableCaching ) {
302
- this . logger ( {
302
+ logger ( {
303
303
category : "llm_cache" ,
304
304
message : "caching response" ,
305
305
level : 1 ,
0 commit comments