diff --git a/async-openai/src/lib.rs b/async-openai/src/lib.rs index b862f824..7218ac45 100644 --- a/async-openai/src/lib.rs +++ b/async-openai/src/lib.rs @@ -58,7 +58,7 @@ //! let request = CreateCompletionRequestArgs::default() //! .model("gpt-3.5-turbo-instruct") //! .prompt("Tell me the recipe of alfredo pasta") -//! .max_tokens(40_u16) +//! .max_tokens(40_u32) //! .build() //! .unwrap(); //! diff --git a/async-openai/src/types/chat.rs b/async-openai/src/types/chat.rs index 44fad140..2dc65d16 100644 --- a/async-openai/src/types/chat.rs +++ b/async-openai/src/types/chat.rs @@ -420,7 +420,7 @@ pub struct CreateChatCompletionRequest { /// /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. #[serde(skip_serializing_if = "Option::is_none")] - pub max_tokens: Option, + pub max_tokens: Option, /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. #[serde(skip_serializing_if = "Option::is_none")] diff --git a/async-openai/src/types/completion.rs b/async-openai/src/types/completion.rs index 901ccd50..eea5b7e6 100644 --- a/async-openai/src/types/completion.rs +++ b/async-openai/src/types/completion.rs @@ -33,7 +33,7 @@ pub struct CreateCompletionRequest { /// /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. #[serde(skip_serializing_if = "Option::is_none")] - pub max_tokens: Option, + pub max_tokens: Option, /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. /// diff --git a/async-openai/tests/boxed_future.rs b/async-openai/tests/boxed_future.rs index 9eae1105..fa03914a 100644 --- a/async-openai/tests/boxed_future.rs +++ b/async-openai/tests/boxed_future.rs @@ -32,7 +32,7 @@ async fn boxed_future_test() { .prompt("does 2 and 2 add to four? (yes/no):\n") .stream(true) .logprobs(3) - .max_tokens(64_u16) + .max_tokens(64_u32) .build() .unwrap(); diff --git a/examples/azure-openai-service/src/main.rs b/examples/azure-openai-service/src/main.rs index c798c865..8137cc18 100644 --- a/examples/azure-openai-service/src/main.rs +++ b/examples/azure-openai-service/src/main.rs @@ -11,7 +11,7 @@ use async_openai::{ async fn chat_completion_example(client: &Client) -> Result<(), Box> { let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-3.5-turbo") .messages([ ChatCompletionRequestSystemMessageArgs::default() @@ -44,7 +44,7 @@ async fn chat_completion_example(client: &Client) -> Result<(), Box // .n(1) // .prompt("Tell me a short bedtime story about Optimus Prime and Bumblebee in Sir David Attenborough voice") // .stream(true) -// .max_tokens(512_u16) +// .max_tokens(512_u32) // .build()?; // let mut stream = client.completions().create_stream(request).await?; diff --git a/examples/chat-stream/src/main.rs b/examples/chat-stream/src/main.rs index 0d7904cf..2241e9ee 100644 --- a/examples/chat-stream/src/main.rs +++ b/examples/chat-stream/src/main.rs @@ -11,7 +11,7 @@ async fn main() -> Result<(), Box> { let request = CreateChatCompletionRequestArgs::default() .model("gpt-3.5-turbo") - .max_tokens(512u16) + .max_tokens(512u32) .messages([ChatCompletionRequestUserMessageArgs::default() .content("Write a marketing blog praising and introducing Rust library async-openai") .build()? diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index 7b9f27c4..4ca389ef 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -13,7 +13,7 @@ async fn main() -> Result<(), Box> { let client = Client::new(); let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-3.5-turbo") .messages([ ChatCompletionRequestSystemMessageArgs::default() diff --git a/examples/completions-stream/src/main.rs b/examples/completions-stream/src/main.rs index d75d72f1..70b9f5bc 100644 --- a/examples/completions-stream/src/main.rs +++ b/examples/completions-stream/src/main.rs @@ -10,7 +10,7 @@ async fn main() -> Result<(), Box> { .n(1) .prompt("Tell me a bedtime story about Optimus Prime and Bumblebee") .stream(true) - .max_tokens(1024_u16) + .max_tokens(1024_u32) .build()?; let mut stream = client.completions().create_stream(request).await?; diff --git a/examples/completions/src/main.rs b/examples/completions/src/main.rs index 25049ead..d54b4837 100644 --- a/examples/completions/src/main.rs +++ b/examples/completions/src/main.rs @@ -10,7 +10,7 @@ async fn main() -> Result<(), Box> { let request = CreateCompletionRequestArgs::default() .model("gpt-3.5-turbo-instruct") .prompt("Tell me a joke about the universe") - .max_tokens(40_u16) + .max_tokens(40_u32) .build()?; let response = client.completions().create(request).await?; @@ -27,7 +27,7 @@ async fn main() -> Result<(), Box> { "How old is the human civilization?", "How old is the Earth?", ]) - .max_tokens(40_u16) + .max_tokens(40_u32) .build()?; let response = client.completions().create(request).await?; diff --git a/examples/function-call-stream/src/main.rs b/examples/function-call-stream/src/main.rs index 2dee4f6e..75f2e637 100644 --- a/examples/function-call-stream/src/main.rs +++ b/examples/function-call-stream/src/main.rs @@ -19,7 +19,7 @@ async fn main() -> Result<(), Box> { let client = Client::new(); let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-3.5-turbo-0613") .messages([ChatCompletionRequestUserMessageArgs::default() .content("What's the weather like in Boston?") @@ -110,7 +110,7 @@ async fn call_fn( ]; let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-3.5-turbo-0613") .messages(message) .build()?; diff --git a/examples/function-call/src/main.rs b/examples/function-call/src/main.rs index e1862cdb..76d63fc3 100644 --- a/examples/function-call/src/main.rs +++ b/examples/function-call/src/main.rs @@ -24,7 +24,7 @@ async fn main() -> Result<(), Box> { let client = Client::new(); let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-3.5-turbo-0613") .messages([ChatCompletionRequestUserMessageArgs::default() .content("What's the weather like in Boston?") @@ -85,7 +85,7 @@ async fn main() -> Result<(), Box> { println!("{}", serde_json::to_string(&message).unwrap()); let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-3.5-turbo-0613") .messages(message) .build()?; diff --git a/examples/tool-call-stream/src/main.rs b/examples/tool-call-stream/src/main.rs index ec421846..732d9814 100644 --- a/examples/tool-call-stream/src/main.rs +++ b/examples/tool-call-stream/src/main.rs @@ -22,7 +22,7 @@ async fn main() -> Result<(), Box> { let user_prompt = "What's the weather like in Boston and Atlanta?"; let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-4-1106-preview") .messages([ChatCompletionRequestUserMessageArgs::default() .content(user_prompt) @@ -170,7 +170,7 @@ async fn main() -> Result<(), Box> { messages.extend(tool_messages); let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-4-1106-preview") .messages(messages) .build() diff --git a/examples/tool-call/src/main.rs b/examples/tool-call/src/main.rs index 7a3598fc..003dd975 100644 --- a/examples/tool-call/src/main.rs +++ b/examples/tool-call/src/main.rs @@ -19,7 +19,7 @@ async fn main() -> Result<(), Box> { let user_prompt = "What's the weather like in Boston and Atlanta?"; let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-4-1106-preview") .messages([ChatCompletionRequestUserMessageArgs::default() .content(user_prompt) @@ -110,7 +110,7 @@ async fn main() -> Result<(), Box> { messages.extend(tool_messages); let subsequent_request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) + .max_tokens(512u32) .model("gpt-4-1106-preview") .messages(messages) .build() diff --git a/examples/vision-chat/src/main.rs b/examples/vision-chat/src/main.rs index 4b95958c..3fbf001a 100644 --- a/examples/vision-chat/src/main.rs +++ b/examples/vision-chat/src/main.rs @@ -18,7 +18,7 @@ async fn main() -> Result<(), Box> { let request = CreateChatCompletionRequestArgs::default() .model("gpt-4-vision-preview") - .max_tokens(300_u16) + .max_tokens(300_u32) .messages([ChatCompletionRequestUserMessageArgs::default() .content(vec![ ChatCompletionRequestMessageContentPartTextArgs::default()