@@ -21,7 +21,7 @@ type Response struct {
21
21
Reason string `json:"stop_reason,omitempty"`
22
22
StopSequence * string `json:"stop_sequence,omitempty"`
23
23
Message
24
- Metrics `json:"usage,omitempty"`
24
+ * Metrics `json:"usage,omitempty"`
25
25
}
26
26
27
27
// Metrics
@@ -43,30 +43,43 @@ func (r Response) String() string {
43
43
return string (data )
44
44
}
45
45
46
+ func (m Metrics ) String () string {
47
+ data , err := json .MarshalIndent (m , "" , " " )
48
+ if err != nil {
49
+ return err .Error ()
50
+ }
51
+ return string (data )
52
+ }
53
+
46
54
///////////////////////////////////////////////////////////////////////////////
47
55
// PUBLIC METHODS
48
56
49
57
type reqMessages struct {
50
- Model string `json:"model"`
51
- MaxTokens uint64 `json:"max_tokens,omitempty"`
52
- Metadata * optmetadata `json:"metadata,omitempty"`
53
- StopSequences []string `json:"stop_sequences,omitempty"`
54
- Stream bool `json:"stream,omitempty"`
55
- System string `json:"system,omitempty"`
56
- Temperature float64 `json:"temperature,omitempty"`
57
- TopK uint64 `json:"top_k,omitempty"`
58
- TopP float64 `json:"top_p,omitempty"`
59
- Messages [] * Message `json:"messages "`
60
- Tools []llm. Tool `json:"tools ,omitempty"`
61
- ToolChoice any `json:"tool_choice,omitempty "`
58
+ Model string `json:"model"`
59
+ MaxTokens uint64 `json:"max_tokens,omitempty"`
60
+ Metadata * optmetadata `json:"metadata,omitempty"`
61
+ StopSequences []string `json:"stop_sequences,omitempty"`
62
+ Stream bool `json:"stream,omitempty"`
63
+ System string `json:"system,omitempty"`
64
+ Temperature float64 `json:"temperature,omitempty"`
65
+ TopK uint64 `json:"top_k,omitempty"`
66
+ TopP float64 `json:"top_p,omitempty"`
67
+ Tools []llm. Tool `json:"tools,omitempty "`
68
+ ToolChoice any `json:"tool_choice ,omitempty"`
69
+ Messages []llm. Completion `json:"messages "`
62
70
}
63
71
72
+ // Send a completion request with a single prompt, and return the next completion
64
73
func (model * model ) Completion (ctx context.Context , prompt string , opts ... llm.Opt ) (llm.Completion , error ) {
65
- // TODO
66
- return nil , llm .ErrNotImplemented
74
+ message , err := messagefactory {}.UserPrompt (prompt , opts ... )
75
+ if err != nil {
76
+ return nil , err
77
+ }
78
+ return model .Chat (ctx , []llm.Completion {message }, opts ... )
67
79
}
68
80
69
- func (anthropic * Client ) Messages (ctx context.Context , context llm.Context , opts ... llm.Opt ) (* Response , error ) {
81
+ // Send a completion request with multiple completions, and return the next completion
82
+ func (model * model ) Chat (ctx context.Context , completions []llm.Completion , opts ... llm.Opt ) (llm.Completion , error ) {
70
83
// Apply options
71
84
opt , err := llm .ApplyOpts (opts ... )
72
85
if err != nil {
@@ -75,28 +88,30 @@ func (anthropic *Client) Messages(ctx context.Context, context llm.Context, opts
75
88
76
89
// Request
77
90
req , err := client .NewJSONRequest (reqMessages {
78
- Model : context .( * session ). model .Name (),
79
- MaxTokens : optMaxTokens (context .( * session ). model , opt ),
91
+ Model : model .Name (),
92
+ MaxTokens : optMaxTokens (model , opt ),
80
93
Metadata : optMetadata (opt ),
81
94
StopSequences : optStopSequences (opt ),
82
95
Stream : optStream (opt ),
83
96
System : optSystemPrompt (opt ),
84
97
Temperature : optTemperature (opt ),
85
98
TopK : optTopK (opt ),
86
99
TopP : optTopP (opt ),
87
- Messages : context .(* session ).seq ,
88
- Tools : optTools (anthropic , opt ),
100
+ Tools : optTools (model .Client , opt ),
89
101
ToolChoice : optToolChoice (opt ),
102
+ Messages : completions ,
90
103
})
91
104
if err != nil {
92
105
return nil , err
93
106
}
94
107
95
- // Stream
108
+ // Response options
96
109
var response Response
97
110
reqopts := []client.RequestOpt {
98
111
client .OptPath ("messages" ),
99
112
}
113
+
114
+ // Streaming
100
115
if optStream (opt ) {
101
116
reqopts = append (reqopts , client .OptTextStreamCallback (func (evt client.TextStreamEvent ) error {
102
117
if err := streamEvent (& response , evt ); err != nil {
@@ -110,7 +125,7 @@ func (anthropic *Client) Messages(ctx context.Context, context llm.Context, opts
110
125
}
111
126
112
127
// Response
113
- if err := anthropic .DoWithContext (ctx , req , & response , reqopts ... ); err != nil {
128
+ if err := model .DoWithContext (ctx , req , & response , reqopts ... ); err != nil {
114
129
return nil , err
115
130
}
116
131
0 commit comments