Skip to content

Commit

Permalink
Change max_tokens type to Option<u32> (#233)
Browse files Browse the repository at this point in the history
  • Loading branch information
MakotoE authored Jun 11, 2024
1 parent 5c9c817 commit f8a360a
Show file tree
Hide file tree
Showing 14 changed files with 20 additions and 20 deletions.
2 changes: 1 addition & 1 deletion async-openai/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
//! let request = CreateCompletionRequestArgs::default()
//! .model("gpt-3.5-turbo-instruct")
//! .prompt("Tell me the recipe of alfredo pasta")
//! .max_tokens(40_u16)
//! .max_tokens(40_u32)
//! .build()
//! .unwrap();
//!
Expand Down
2 changes: 1 addition & 1 deletion async-openai/src/types/chat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ pub struct CreateChatCompletionRequest {
///
/// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
#[serde(skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u16>,
pub max_tokens: Option<u32>,

/// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
#[serde(skip_serializing_if = "Option::is_none")]
Expand Down
2 changes: 1 addition & 1 deletion async-openai/src/types/completion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ pub struct CreateCompletionRequest {
///
/// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
#[serde(skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u16>,
pub max_tokens: Option<u32>,

/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
///
Expand Down
2 changes: 1 addition & 1 deletion async-openai/tests/boxed_future.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ async fn boxed_future_test() {
.prompt("does 2 and 2 add to four? (yes/no):\n")
.stream(true)
.logprobs(3)
.max_tokens(64_u16)
.max_tokens(64_u32)
.build()
.unwrap();

Expand Down
4 changes: 2 additions & 2 deletions examples/azure-openai-service/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use async_openai::{

async fn chat_completion_example(client: &Client<AzureConfig>) -> Result<(), Box<dyn Error>> {
let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-3.5-turbo")
.messages([
ChatCompletionRequestSystemMessageArgs::default()
Expand Down Expand Up @@ -44,7 +44,7 @@ async fn chat_completion_example(client: &Client<AzureConfig>) -> Result<(), Box
// .n(1)
// .prompt("Tell me a short bedtime story about Optimus Prime and Bumblebee in Sir David Attenborough voice")
// .stream(true)
// .max_tokens(512_u16)
// .max_tokens(512_u32)
// .build()?;

// let mut stream = client.completions().create_stream(request).await?;
Expand Down
2 changes: 1 addition & 1 deletion examples/chat-stream/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ async fn main() -> Result<(), Box<dyn Error>> {

let request = CreateChatCompletionRequestArgs::default()
.model("gpt-3.5-turbo")
.max_tokens(512u16)
.max_tokens(512u32)
.messages([ChatCompletionRequestUserMessageArgs::default()
.content("Write a marketing blog praising and introducing Rust library async-openai")
.build()?
Expand Down
2 changes: 1 addition & 1 deletion examples/chat/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let client = Client::new();

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-3.5-turbo")
.messages([
ChatCompletionRequestSystemMessageArgs::default()
Expand Down
2 changes: 1 addition & 1 deletion examples/completions-stream/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
.n(1)
.prompt("Tell me a bedtime story about Optimus Prime and Bumblebee")
.stream(true)
.max_tokens(1024_u16)
.max_tokens(1024_u32)
.build()?;

let mut stream = client.completions().create_stream(request).await?;
Expand Down
4 changes: 2 additions & 2 deletions examples/completions/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let request = CreateCompletionRequestArgs::default()
.model("gpt-3.5-turbo-instruct")
.prompt("Tell me a joke about the universe")
.max_tokens(40_u16)
.max_tokens(40_u32)
.build()?;

let response = client.completions().create(request).await?;
Expand All @@ -27,7 +27,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
"How old is the human civilization?",
"How old is the Earth?",
])
.max_tokens(40_u16)
.max_tokens(40_u32)
.build()?;

let response = client.completions().create(request).await?;
Expand Down
4 changes: 2 additions & 2 deletions examples/function-call-stream/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let client = Client::new();

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-3.5-turbo-0613")
.messages([ChatCompletionRequestUserMessageArgs::default()
.content("What's the weather like in Boston?")
Expand Down Expand Up @@ -110,7 +110,7 @@ async fn call_fn(
];

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-3.5-turbo-0613")
.messages(message)
.build()?;
Expand Down
4 changes: 2 additions & 2 deletions examples/function-call/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let client = Client::new();

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-3.5-turbo-0613")
.messages([ChatCompletionRequestUserMessageArgs::default()
.content("What's the weather like in Boston?")
Expand Down Expand Up @@ -85,7 +85,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
println!("{}", serde_json::to_string(&message).unwrap());

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-3.5-turbo-0613")
.messages(message)
.build()?;
Expand Down
4 changes: 2 additions & 2 deletions examples/tool-call-stream/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let user_prompt = "What's the weather like in Boston and Atlanta?";

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-4-1106-preview")
.messages([ChatCompletionRequestUserMessageArgs::default()
.content(user_prompt)
Expand Down Expand Up @@ -170,7 +170,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
messages.extend(tool_messages);

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-4-1106-preview")
.messages(messages)
.build()
Expand Down
4 changes: 2 additions & 2 deletions examples/tool-call/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let user_prompt = "What's the weather like in Boston and Atlanta?";

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-4-1106-preview")
.messages([ChatCompletionRequestUserMessageArgs::default()
.content(user_prompt)
Expand Down Expand Up @@ -110,7 +110,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
messages.extend(tool_messages);

let subsequent_request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.max_tokens(512u32)
.model("gpt-4-1106-preview")
.messages(messages)
.build()
Expand Down
2 changes: 1 addition & 1 deletion examples/vision-chat/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ async fn main() -> Result<(), Box<dyn Error>> {

let request = CreateChatCompletionRequestArgs::default()
.model("gpt-4-vision-preview")
.max_tokens(300_u16)
.max_tokens(300_u32)
.messages([ChatCompletionRequestUserMessageArgs::default()
.content(vec![
ChatCompletionRequestMessageContentPartTextArgs::default()
Expand Down

0 comments on commit f8a360a

Please sign in to comment.