File tree Expand file tree Collapse file tree 1 file changed +3
-2
lines changed Expand file tree Collapse file tree 1 file changed +3
-2
lines changed Original file line number Diff line number Diff line change @@ -248,7 +248,8 @@ func (r *ChatCompletionResponseFormatJSONSchema) UnmarshalJSON(data []byte) erro
248
248
return nil
249
249
}
250
250
251
- // ChatCompletionRequestExtensions contains third-party OpenAI API extensions (e.g., vendor-specific implementations like vLLM).
251
+ // ChatCompletionRequestExtensions contains third-party OpenAI API extensions
252
+ // (e.g., vendor-specific implementations like vLLM).
252
253
type ChatCompletionRequestExtensions struct {
253
254
// GuidedChoice is a vLLM-specific extension that restricts the model's output
254
255
// to one of the predefined string choices provided in this field. This feature
@@ -264,7 +265,7 @@ type ChatCompletionRequest struct {
264
265
Messages []ChatCompletionMessage `json:"messages"`
265
266
// MaxTokens The maximum number of tokens that can be generated in the chat completion.
266
267
// This value can be used to control costs for text generated via API.
267
- // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
268
+ // Deprecated: use MaxCompletionTokens. Not compatible with o1- series models.
268
269
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
269
270
MaxTokens int `json:"max_tokens,omitempty"`
270
271
// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
You can’t perform that action at this time.
0 commit comments