From 206178665e498451b7a6f4a7550f9d79ca1c0606 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Date: Mon, 24 Jun 2024 11:59:08 -0400 Subject: [PATCH] Apply suggestions from code review Co-authored-by: rachelsaunders <52258509+rachelsaunders@users.noreply.github.com> --- .../google/ai/client/generative/samples/chat.kt | 6 +++--- .../ai/client/generative/samples/count_tokens.kt | 14 +++++++------- .../ai/client/generative/samples/java/chat.java | 10 +++++----- .../generative/samples/java/count_tokens.java | 14 +++++++------- .../samples/java/model_configuration.java | 2 +- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index 020e0c17..f116c5a0 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -51,7 +51,7 @@ suspend fun chatStreaming() { // Use streaming with multi-turn conversations (like chat) val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -70,7 +70,7 @@ suspend fun chatStreaming() { } suspend fun chatStreamingWithImages(context: Context) { - // [START chat_with-images_streaming] + // [START chat_streaming_with_images] // Use streaming with multi-turn conversations (like chat) val generativeModel = GenerativeModel( @@ -96,5 +96,5 @@ suspend fun chatStreamingWithImages(context: Context) { } chat.sendMessageStream(inputContent).collect { chunk -> print(chunk.text) } - // [END chat_with-images_streaming] + // [END chat_streaming_with_images] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt index 6efd4cb6..543a15d1 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt @@ -24,10 +24,10 @@ import com.google.ai.client.generativeai.type.content import com.google.ai.sample.R suspend fun tokensTextOnly() { - // [START tokens_text-only] + // [START tokens_text_only] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -35,14 +35,14 @@ suspend fun tokensTextOnly() { // For text-only input val (totalTokens) = generativeModel.countTokens("Write a story about a magic backpack.") print(totalTokens) - // [END tokens_text-only] + // [END tokens_text_only] } suspend fun tokensChat() { // [START tokens_chat] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -64,10 +64,10 @@ suspend fun tokensChat() { } suspend fun tokensMultimodalImageInline(context: Context) { - // [START tokens_multimodal-image_inline] + // [START tokens_multimodal_image_inline] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -83,5 +83,5 @@ suspend fun tokensMultimodalImageInline(context: Context) { val (totalTokens) = generativeModel.countTokens(multiModalContent) print(totalTokens) - // [START tokens_multimodal-image_inline] + // [START tokens_multimodal_image_inline] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index d6e43470..3af32ef2 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -37,7 +37,7 @@ class Chat { void chat() { // [START chat] - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -94,7 +94,7 @@ public void onFailure(Throwable t) { void chatStreaming() { // [START chat_streaming] - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -159,8 +159,8 @@ public void onError(Throwable t) {} } void chatStreamingWithImages(Context context) { - // [START chat_with-images_streaming] - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // [START chat_streaming_with_images] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -223,6 +223,6 @@ public void onSubscribe(Subscription s) { public void onError(Throwable t) {} // [END_EXCLUDE] }); - // [END chat_with-images_streaming] + // [END chat_streaming_with_images] } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index 52871875..d0d89ced 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -34,8 +34,8 @@ class CountTokens { void tokensTextOnly() { - // [START tokens_text-only] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START tokens_text_only] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -68,12 +68,12 @@ public void onFailure(Throwable t) { } }, executor); - // [END tokens_text-only] + // [END tokens_text_only] } void tokensChat() { // [START tokens_chat] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -127,8 +127,8 @@ public void onFailure(Throwable t) { } void tokensMultimodalImageInline(Context context) { - // [START tokens_multimodal-image_inline] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START tokens_multimodal_image_inline] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -170,7 +170,7 @@ public void onFailure(Throwable t) { } }, executor); - // [END tokens_multimodal-image_inline] + // [END tokens_multimodal_image_inline] } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java index c551f99d..7a197bce 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java @@ -31,7 +31,7 @@ void configureModel() { GenerationConfig generationConfig = configBuilder.build(); - // The Gemini 1.5 models are versatile and work with most use cases + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel("gemini-1.5-flash", BuildConfig.apiKey, generationConfig);