From f68cc3af1908a3224a4cf7e53e033cb8c88cca82 Mon Sep 17 00:00:00 2001 From: Paul Hendricks Date: Mon, 30 Jun 2025 14:17:54 -0400 Subject: [PATCH] bug: changing top_logprobs from u32 to u8 to be consistent with chat --- async-openai/src/types/responses.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/async-openai/src/types/responses.rs b/async-openai/src/types/responses.rs index 4e0eeec7..3a50faf9 100644 --- a/async-openai/src/types/responses.rs +++ b/async-openai/src/types/responses.rs @@ -281,7 +281,7 @@ pub struct CreateResponse { /// An integer between 0 and 20 specifying the number of most likely tokens to return /// at each token position, each with an associated log probability. #[serde(skip_serializing_if = "Option::is_none")] - pub top_logprobs: Option, // TODO add validation of range + pub top_logprobs: Option, // TODO add validation of range /// An alternative to sampling with temperature, called nucleus sampling, /// where the model considers the results of the tokens with top_p probability