Skip to content

Commit f8a360a

Browse files
authored
Change max_tokens type to Option<u32> (64bit#233)
1 parent 5c9c817 commit f8a360a

File tree

14 files changed

+20
-20
lines changed

14 files changed

+20
-20
lines changed

async-openai/src/lib.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@
5858
//! let request = CreateCompletionRequestArgs::default()
5959
//! .model("gpt-3.5-turbo-instruct")
6060
//! .prompt("Tell me the recipe of alfredo pasta")
61-
//! .max_tokens(40_u16)
61+
//! .max_tokens(40_u32)
6262
//! .build()
6363
//! .unwrap();
6464
//!

async-openai/src/types/chat.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,7 @@ pub struct CreateChatCompletionRequest {
420420
///
421421
/// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
422422
#[serde(skip_serializing_if = "Option::is_none")]
423-
pub max_tokens: Option<u16>,
423+
pub max_tokens: Option<u32>,
424424

425425
/// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
426426
#[serde(skip_serializing_if = "Option::is_none")]

async-openai/src/types/completion.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ pub struct CreateCompletionRequest {
3333
///
3434
/// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
3535
#[serde(skip_serializing_if = "Option::is_none")]
36-
pub max_tokens: Option<u16>,
36+
pub max_tokens: Option<u32>,
3737

3838
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
3939
///

async-openai/tests/boxed_future.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ async fn boxed_future_test() {
3232
.prompt("does 2 and 2 add to four? (yes/no):\n")
3333
.stream(true)
3434
.logprobs(3)
35-
.max_tokens(64_u16)
35+
.max_tokens(64_u32)
3636
.build()
3737
.unwrap();
3838

examples/azure-openai-service/src/main.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ use async_openai::{
1111

1212
async fn chat_completion_example(client: &Client<AzureConfig>) -> Result<(), Box<dyn Error>> {
1313
let request = CreateChatCompletionRequestArgs::default()
14-
.max_tokens(512u16)
14+
.max_tokens(512u32)
1515
.model("gpt-3.5-turbo")
1616
.messages([
1717
ChatCompletionRequestSystemMessageArgs::default()
@@ -44,7 +44,7 @@ async fn chat_completion_example(client: &Client<AzureConfig>) -> Result<(), Box
4444
// .n(1)
4545
// .prompt("Tell me a short bedtime story about Optimus Prime and Bumblebee in Sir David Attenborough voice")
4646
// .stream(true)
47-
// .max_tokens(512_u16)
47+
// .max_tokens(512_u32)
4848
// .build()?;
4949

5050
// let mut stream = client.completions().create_stream(request).await?;

examples/chat-stream/src/main.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
1111

1212
let request = CreateChatCompletionRequestArgs::default()
1313
.model("gpt-3.5-turbo")
14-
.max_tokens(512u16)
14+
.max_tokens(512u32)
1515
.messages([ChatCompletionRequestUserMessageArgs::default()
1616
.content("Write a marketing blog praising and introducing Rust library async-openai")
1717
.build()?

examples/chat/src/main.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
1313
let client = Client::new();
1414

1515
let request = CreateChatCompletionRequestArgs::default()
16-
.max_tokens(512u16)
16+
.max_tokens(512u32)
1717
.model("gpt-3.5-turbo")
1818
.messages([
1919
ChatCompletionRequestSystemMessageArgs::default()

examples/completions-stream/src/main.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
1010
.n(1)
1111
.prompt("Tell me a bedtime story about Optimus Prime and Bumblebee")
1212
.stream(true)
13-
.max_tokens(1024_u16)
13+
.max_tokens(1024_u32)
1414
.build()?;
1515

1616
let mut stream = client.completions().create_stream(request).await?;

examples/completions/src/main.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
1010
let request = CreateCompletionRequestArgs::default()
1111
.model("gpt-3.5-turbo-instruct")
1212
.prompt("Tell me a joke about the universe")
13-
.max_tokens(40_u16)
13+
.max_tokens(40_u32)
1414
.build()?;
1515

1616
let response = client.completions().create(request).await?;
@@ -27,7 +27,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
2727
"How old is the human civilization?",
2828
"How old is the Earth?",
2929
])
30-
.max_tokens(40_u16)
30+
.max_tokens(40_u32)
3131
.build()?;
3232

3333
let response = client.completions().create(request).await?;

examples/function-call-stream/src/main.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
1919
let client = Client::new();
2020

2121
let request = CreateChatCompletionRequestArgs::default()
22-
.max_tokens(512u16)
22+
.max_tokens(512u32)
2323
.model("gpt-3.5-turbo-0613")
2424
.messages([ChatCompletionRequestUserMessageArgs::default()
2525
.content("What's the weather like in Boston?")
@@ -110,7 +110,7 @@ async fn call_fn(
110110
];
111111

112112
let request = CreateChatCompletionRequestArgs::default()
113-
.max_tokens(512u16)
113+
.max_tokens(512u32)
114114
.model("gpt-3.5-turbo-0613")
115115
.messages(message)
116116
.build()?;

0 commit comments

Comments
 (0)