|
228 | 228 | ],
|
229 | 229 | "additionalProperties": false,
|
230 | 230 | "description": "Learn about [audio inputs](https://platform.openai.com/docs/guides/audio)."
|
| 231 | + }, |
| 232 | + { |
| 233 | + "type": "object", |
| 234 | + "properties": { |
| 235 | + "file": { |
| 236 | + "type": "object", |
| 237 | + "properties": { |
| 238 | + "file_data": { |
| 239 | + "type": "string", |
| 240 | + "description": "The base64 encoded file data, used when passing the file to the model as a string." |
| 241 | + }, |
| 242 | + "file_id": { |
| 243 | + "type": "string", |
| 244 | + "description": "The ID of an uploaded file to use as input." |
| 245 | + }, |
| 246 | + "filename": { |
| 247 | + "type": "string", |
| 248 | + "description": "The name of the file, used when passing the file to the model as a string." |
| 249 | + } |
| 250 | + }, |
| 251 | + "additionalProperties": false |
| 252 | + }, |
| 253 | + "type": { |
| 254 | + "type": "string", |
| 255 | + "const": "file", |
| 256 | + "description": "The type of the content part. Always `file`." |
| 257 | + } |
| 258 | + }, |
| 259 | + "required": [ |
| 260 | + "file", |
| 261 | + "type" |
| 262 | + ], |
| 263 | + "additionalProperties": false, |
| 264 | + "description": "Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text generation." |
231 | 265 | }
|
232 | 266 | ],
|
233 | 267 | "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)."
|
|
538 | 572 | "type": "string",
|
539 | 573 | "enum": [
|
540 | 574 | "wav",
|
| 575 | + "aac", |
541 | 576 | "mp3",
|
542 | 577 | "flac",
|
543 | 578 | "opus",
|
|
546 | 581 | "description": "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`."
|
547 | 582 | },
|
548 | 583 | "voice": {
|
549 |
| - "type": "string", |
550 |
| - "enum": [ |
551 |
| - "alloy", |
552 |
| - "ash", |
553 |
| - "ballad", |
554 |
| - "coral", |
555 |
| - "echo", |
556 |
| - "sage", |
557 |
| - "shimmer", |
558 |
| - "verse" |
| 584 | + "anyOf": [ |
| 585 | + { |
| 586 | + "type": "string" |
| 587 | + }, |
| 588 | + { |
| 589 | + "type": "string", |
| 590 | + "enum": [ |
| 591 | + "alloy", |
| 592 | + "ash", |
| 593 | + "ballad", |
| 594 | + "coral", |
| 595 | + "echo", |
| 596 | + "fable", |
| 597 | + "onyx", |
| 598 | + "nova", |
| 599 | + "sage", |
| 600 | + "shimmer", |
| 601 | + "verse" |
| 602 | + ] |
| 603 | + } |
559 | 604 | ],
|
560 |
| - "description": "The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive)." |
| 605 | + "description": "The voice the model uses to respond. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`." |
561 | 606 | }
|
562 | 607 | },
|
563 | 608 | "required": [
|
|
568 | 613 | "description": "Parameters for audio output. Required when audio output is requested with `modalities: [\"audio\"]`. [Learn more](https://platform.openai.com/docs/guides/audio)."
|
569 | 614 | },
|
570 | 615 | "reasoningEffort": {
|
571 |
| - "type": "string", |
572 |
| - "enum": [ |
573 |
| - "low", |
574 |
| - "medium", |
575 |
| - "high" |
| 616 | + "anyOf": [ |
| 617 | + { |
| 618 | + "type": [ |
| 619 | + "string", |
| 620 | + "null" |
| 621 | + ], |
| 622 | + "enum": [ |
| 623 | + "low", |
| 624 | + "medium", |
| 625 | + "high", |
| 626 | + null |
| 627 | + ], |
| 628 | + "description": "**o-series models only**\n\nConstrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response." |
| 629 | + }, |
| 630 | + { |
| 631 | + "type": "null" |
| 632 | + } |
576 | 633 | ],
|
577 | 634 | "description": "Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."
|
578 | 635 | },
|
|
0 commit comments