Skip to content

Commit c4273cb

Browse files
authored
fix(chat): shorten comment to pass linter (#1050)
1 parent 4f87294 commit c4273cb

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

chat.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,8 @@ func (r *ChatCompletionResponseFormatJSONSchema) UnmarshalJSON(data []byte) erro
248248
return nil
249249
}
250250

251-
// ChatCompletionRequestExtensions contains third-party OpenAI API extensions (e.g., vendor-specific implementations like vLLM).
251+
// ChatCompletionRequestExtensions contains third-party OpenAI API extensions
252+
// (e.g., vendor-specific implementations like vLLM).
252253
type ChatCompletionRequestExtensions struct {
253254
// GuidedChoice is a vLLM-specific extension that restricts the model's output
254255
// to one of the predefined string choices provided in this field. This feature
@@ -264,7 +265,7 @@ type ChatCompletionRequest struct {
264265
Messages []ChatCompletionMessage `json:"messages"`
265266
// MaxTokens The maximum number of tokens that can be generated in the chat completion.
266267
// This value can be used to control costs for text generated via API.
267-
// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
268+
// Deprecated: use MaxCompletionTokens. Not compatible with o1-series models.
268269
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
269270
MaxTokens int `json:"max_tokens,omitempty"`
270271
// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,

0 commit comments

Comments
 (0)