From 6e2faa77dc516f935e108ed98f28ee81bf1dd630 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Tue, 11 Jun 2024 13:50:03 -0400 Subject: [PATCH 01/43] Initial sample code --- .../samples/java/text_generation.java | 272 ++++++++++++++++++ .../generative/samples/text_generation.kt | 151 ++++++++++ 2 files changed, 423 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java new file mode 100644 index 00000000..1802dc66 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -0,0 +1,272 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples; + +class TextGeneration { + void TextGenTextOnlyPrompt () { + // [START text_gen_text-only-prompt] + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Content content = new Content.Builder() + .addText("Write a story about a magic backpack.") + .build(); + + Executor executor; // = ... + + ListenableFuture response = model.generateContent(content); + Futures.addCallback(response, new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, executor); + // [END text_gen_text-only-prompt] + } + + void TextGenTextOnlyPromptStreaming () { + // [START text_gen_text-only-prompt_streaming] + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Content content = new Content.Builder() + .addText("Write a story about a magic backpack.") + .build(); + + Publisher streamingResponse = + model.generateContentStream(content); + + StringBuilder outputContent = new StringBuilder(); + + streamingResponse.subscribe(new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onError(Throwable t) { + t.printStackTrace(); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + }); + // [END text_gen_text-only-prompt_streaming] + } + + void TextGenMultimodalOneImagePrompt () { + // [START text_gen_multimodal-one-image-prompt] + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Bitmap image; // = ... + + Content content = new Content.Builder() + .addText("What's different between these pictures?") + .addImage(image) + .build(); + + + Executor executor; // = ... + + ListenableFuture response = model.generateContent(content); + Futures.addCallback(response, new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, executor); + // [END text_gen_multimodal-one-image-prompt] + } + + + void TextGenMultimodalOneImagePromptStreaming () { + // [START text_gen_multimodal-one-image-prompt_streaming] + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Bitmap image1; // = ... + Bitmap image2; // = ... + + Content content = new Content.Builder() + .addText("What's different between these pictures?") + .addImage(image1) + .addImage(image2) + .build(); + + + Executor executor; // = ... + + Publisher streamingResponse = + model.generateContentStream(content); + + StringBuilder outputContent = new StringBuilder(); + + streamingResponse.subscribe(new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onError(Throwable t) { + t.printStackTrace(); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + }); + // [END text_gen_multimodal-one-image-prompt_streaming] + } + + + void TextGenMultimodalMultiImagePrompt () { + // [START text_gen_multimodal-multi-image-prompt] + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Bitmap image1; // = ... + Bitmap image2; // = ... + + Content content = new Content.Builder() + .addText("What's different between these pictures?") + .addImage(image1) + .addImage(image2) + .build(); + + + Executor executor; // = ... + + ListenableFuture response = model.generateContent(content); + Futures.addCallback(response, new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, executor); + // [END text_gen_multimodal-multi-image-prompt] + } + + void TextGenMultimodalMultiImagePromptStreaming () { + // [START text_gen_multimodal-multi-image-prompt_streaming] + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Bitmap image1; // = ... + Bitmap image2; // = ... + + Content content = new Content.Builder() + .addText("What's different between these pictures?") + .addImage(image1) + .addImage(image2) + .build(); + + + Executor executor; // = ... + + Publisher streamingResponse = + model.generateContentStream(content); + + StringBuilder outputContent = new StringBuilder(); + + streamingResponse.subscribe(new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onError(Throwable t) { + t.printStackTrace(); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + }); + // [END text_gen_multimodal-multi-image-prompt_streaming] + } + void TextGenMultimodalVideoPrompt () { + // [START text_gen_multimodal-video-prompt] + // TODO + // [END text_gen_multimodal-video-prompt] + } + + void TextGenMultimodalVideoPromptStreaming () { + // [START text_gen_multimodal-video-prompt_streaming] + // TODO + // [END text_gen_multimodal-video-prompt_streaming] + } +} diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt new file mode 100644 index 00000000..7d8258f6 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -0,0 +1,151 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples + +suspend fun textGenTextOnlyPrompt () { + // [START text_gen_text-only-prompt] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val prompt = "Write a story about a magic backpack." + val response = generativeModel.generateContent(prompt) + print(response.text) + // [END text_gen_text-only-prompt] +} + +suspend fun textGenTextOnlyPromptStreaming () { + // [START text_gen_text-only-prompt_streaming] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val prompt = "Write a story about a magic backpack." + // Use streaming with text-only input + generativeModel.generateContentStream(prompt).collect { chunk -> + print(chunk.text) + } + + // [END text_gen_text-only-prompt_streaming] +} + +suspend fun textGenMultimodalOneImagePrompt () { + // [START text_gen_multimodal-one-image-prompt] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val image: Bitmap // = ... + + val inputContent = content { + image(image) + text("What's in this picture?") + } + + val response = generativeModel.generateContent(inputContent) + print(response.text) + // [END text_gen_multimodal-one-image-prompt] +} + +suspend fun textGenMultimodalOneImagePromptStreaming () { + // [START text_gen_multimodal-one-image-prompt_streaming] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val image1: Bitmap // = ... + + val inputContent = content { + image(image1) + text("What's in this picture?") + } + + generativeModel.generateContentStream(prompt).collect { chunk -> + print(chunk.text) + } + // [END text_gen_multimodal-one-image-prompt_streaming] +} + +suspend fun textGenMultimodalMultiImagePrompt () { + // [START text_gen_multimodal-multi-image-prompt] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val image1: Bitmap // = ... + val image2: Bitmap // = ... + + val inputContent = content { + image(image1) + image(image2) + text("What's different between these pictures?") + } + + val response = generativeModel.generateContent(inputContent) + print(response.text) + + // [END text_gen_multimodal-multi-image-prompt] +} + +suspend fun textGenMultimodalMultiImagePromptStreaming () { + // [START text_gen_multimodal-multi-image-prompt_streaming] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val image1: Bitmap // = ... + val image2: Bitmap // = ... + + val inputContent = content { + image(image1) + image(image2) + text("What's different between these pictures?") + } + + generativeModel.generateContentStream(prompt).collect { chunk -> + print(chunk.text) + } + // [END text_gen_multimodal-multi-image-prompt_streaming] +} + +suspend fun textGenMultimodalVideoPrompt () { + // [START text_gen_multimodal-video-prompt] + // TODO + // [END text_gen_multimodal-video-prompt] +} + +suspend fun textGenMultimodalVideoPromptStreaming () { + // [START text_gen_multimodal-video-prompt_streaming] + // TODO + // [END text_gen_multimodal-video-prompt_streaming] +} From 4623ffb62222a4138d3bcaadd981a1fd2de00cdf Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Tue, 11 Jun 2024 13:51:24 -0400 Subject: [PATCH 02/43] Add empty readme --- samples/README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 samples/README.md diff --git a/samples/README.md b/samples/README.md new file mode 100644 index 00000000..304360ca --- /dev/null +++ b/samples/README.md @@ -0,0 +1 @@ +Readme From 7a4404454d6a1dc8f1fb316ed76d9fc90ff8f817 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Wed, 12 Jun 2024 11:11:59 -0400 Subject: [PATCH 03/43] Add chat samples --- .../ai/client/generative/samples/chat.kt | 91 ++++++++ .../client/generative/samples/java/chat.java | 196 ++++++++++++++++++ 2 files changed, 287 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/chat.kt create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt new file mode 100644 index 00000000..fc2bbf25 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -0,0 +1,91 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples + +suspend fun chat() { + // [START chat] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val chat = generativeModel.startChat( + history = listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { text("Great to meet you. What would you like to know?") } + ) + ) + + val response = chat.sendMessage("How many paws are in my house?") + print(response.text) + // [END chat] +} + +suspend fun chatStreaming() { + // [START chat_streaming] + // Use streaming with multi-turn conversations (like chat) + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val chat = generativeModel.startChat( + history = listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { text("Great to meet you. What would you like to know?") } + ) + ) + + val chat = generativeModel.startChat() + chat.sendMessageStream("How many paws are in my house?").collect { chunk -> + print(chunk.text) + } + // [END chat_streaming] +} + +suspend fun chatStreamingWithImages() { + // [START chat_with-images_streaming] + // Use streaming with multi-turn conversations (like chat) + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val chat = generativeModel.startChat( + history = listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { text("Great to meet you. What would you like to know?") } + ) + ) + + val chat = generativeModel.startChat() + val image: Bitmap // = ... + + val inputContent = content { + image(image) + text("This is a picture of them, what breed are they?") + } + + chat.sendMessageStream(inputContent).collect { chunk -> + print(chunk.text) + } + // [END chat_with-images_streaming] +} diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java new file mode 100644 index 00000000..e014e544 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -0,0 +1,196 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples; + +class Chat { + void chat() { + // [START chat] + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + // (optional) Create previous chat history for context + Content.Builder userContentBuilder = new Content.Builder(); + userContentBuilder.setRole("user"); + userContentBuilder.addText("Hello, I have 2 dogs in my house."); + Content userContent = userContentBuilder.build(); + + Content.Builder modelContentBuilder = new Content.Builder(); + modelContentBuilder.setRole("model"); + modelContentBuilder.addText("Great to meet you. What would you like to know?"); + Content modelContent = userContentBuilder.build(); + + List history = Arrays.asList(userContent, modelContent); + + // Initialize the chat + ChatFutures chat = model.startChat(history); + + // Create a new user message + Content.Builder userMessageBuilder = new Content.Builder(); + userMessageBuilder.setRole("user"); + userMessageBuilder.addText("How many paws are in my house?"); + Content userMessage = userMessageBuilder.build(); + + Executor executor; // = ... + + // Send the message + ListenableFuture response = chat.sendMessage(userMessage); + + Futures.addCallback(response, new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, executor); + // [END chat] + } + + void chatStreaming() { + // [START chat_streaming] + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + // (optional) Create previous chat history for context + Content.Builder userContentBuilder = new Content.Builder(); + userContentBuilder.setRole("user"); + userContentBuilder.addText("Hello, I have 2 dogs in my house."); + Content userContent = userContentBuilder.build(); + + Content.Builder modelContentBuilder = new Content.Builder(); + modelContentBuilder.setRole("model"); + modelContentBuilder.addText("Great to meet you. What would you like to know?"); + Content modelContent = userContentBuilder.build(); + + List history = Arrays.asList(userContent, modelContent); + + // Initialize the chat + ChatFutures chat = model.startChat(history); + + // Create a new user message + Content.Builder userMessageBuilder = new Content.Builder(); + userMessageBuilder.setRole("user"); + userMessageBuilder.addText("How many paws are in my house?"); + Content userMessage = userMessageBuilder.build(); + + Executor executor; // = ... + + // Use streaming with text-only input + Publisher streamingResponse = + model.generateContentStream(inputContent); + + StringBuilder outputContent = new StringBuilder(); + + streamingResponse.subscribe(new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + + // ... other methods omitted for brevity + }); + + // [END chat_streaming] + } + + void chatStreamingWithImages() { + // [START chat_with-images_streaming] + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + // (optional) Create previous chat history for context + Content.Builder userContentBuilder = new Content.Builder(); + userContentBuilder.setRole("user"); + userContentBuilder.addText("Hello, I have 2 dogs in my house."); + Content userContent = userContentBuilder.build(); + + Content.Builder modelContentBuilder = new Content.Builder(); + modelContentBuilder.setRole("model"); + modelContentBuilder.addText("Great to meet you. What would you like to know?"); + Content modelContent = userContentBuilder.build(); + + List history = Arrays.asList(userContent, modelContent); + + // Initialize the chat + ChatFutures chat = model.startChat(history); + + // Create a new user message + Bitmap image; // = ... + + Content content = new Content.Builder() + .addText("What's different between these pictures?") + .build(); + + Content.Builder userMessageBuilder = new Content.Builder(); + userMessageBuilder.setRole("user"); + userMessageBuilder.addImage(image); + userMessageBuilder.addText("This is a picture of them, what breed are they?"); + Content userMessage = userMessageBuilder.build(); + + Executor executor; // = ... + + // Use streaming with text-only input + Publisher streamingResponse = + model.generateContentStream(inputContent); + + StringBuilder outputContent = new StringBuilder(); + + streamingResponse.subscribe(new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + + // ... other methods omitted for brevity + }); + // [END chat_with-images_streaming] + } + +} From 17ed102725c60d719d41c053c71d95e0a82b8303 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Wed, 12 Jun 2024 12:09:07 -0400 Subject: [PATCH 04/43] Initial token count samples. Java samples are still missing --- .../client/generative/samples/count_tokens.kt | 76 +++++++++++++++++++ .../generative/samples/java/count_tokens.java | 21 +++++ 2 files changed, 97 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt new file mode 100644 index 00000000..02d41d8a --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt @@ -0,0 +1,76 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples + +fun tokensTextOnly () { + // [START tokens_text-only] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + // For text-only input + val (totalTokens) = generativeModel.countTokens("Write a story about a magic backpack.") + print(totalTokens) + // [END tokens_text-only] +} + +fun tokensChat () { + // [START tokens_chat] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val chat = generativeModel.startChat( + history = listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { text("Great to meet you. What would you like to know?") } + ) + ) + + val history = chat.history + val messageContent = content { text("This is the message I intend to send")} + val (totalTokens) = generativeModel.countTokens(*history.toTypedArray(), messageContent) + print(totalTokens) + // [END tokens_chat] +} + +fun tokensMultimodalImageInline () { + // [START tokens_multimodal-image_inline] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey + ) + + val image1: Bitmap // = ... + val image2: Bitmap // = ... + + val multiModalContent = content { + image(image1) + image(image2) + text("What's the difference between these pictures?") + } + + val (totalTokens) = generativeModel.countTokens(multiModalContent) + print(totalTokens) + // [START tokens_multimodal-image_inline] +} diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java new file mode 100644 index 00000000..7575d758 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -0,0 +1,21 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples; + +class CountTokens { + void tokensTextOnly (){} + void tokensChat (){} + void tokensMultimodalImageInline (){} +} From 66050a2de07d21cefc00db998f8b1d610a773a07 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Wed, 12 Jun 2024 12:09:27 -0400 Subject: [PATCH 05/43] Fix duplicated `startChat` declarations --- .../main/java/com/google/ai/client/generative/samples/chat.kt | 2 -- 1 file changed, 2 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index fc2bbf25..1b8dca9b 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -52,7 +52,6 @@ suspend fun chatStreaming() { ) ) - val chat = generativeModel.startChat() chat.sendMessageStream("How many paws are in my house?").collect { chunk -> print(chunk.text) } @@ -76,7 +75,6 @@ suspend fun chatStreamingWithImages() { ) ) - val chat = generativeModel.startChat() val image: Bitmap // = ... val inputContent = content { From 90384df67ae7a97d9f53ee8d2c81f38d01600eff Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 10:40:38 -0400 Subject: [PATCH 06/43] Add java countToken samples --- .../generative/samples/java/count_tokens.java | 130 +++++++++++++++++- 1 file changed, 127 insertions(+), 3 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index 7575d758..3525a5e9 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -15,7 +15,131 @@ package com.google.ai.client.generative.samples; class CountTokens { - void tokensTextOnly (){} - void tokensChat (){} - void tokensMultimodalImageInline (){} + void tokensTextOnly() { + // [START tokens_text-only] + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Content inputContent = new Content.Builder() + .addText("Write a story about a magic backpack.") + .build(); + + Executor executor; // = ... + + // For text-only input + ListenableFuture countTokensResponse = model.countTokens(inputContent); + + Futures.addCallback(countTokensResponse, new FutureCallback() { + @Override + public void onSuccess(CountTokensResponse result) { + int totalTokens = result.getTotalTokens(); + System.out.println("TotalTokens = " + totalTokens); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, executor); + // [END tokens_text-only] + } + + void tokensChat (){ + // [START tokens_chat] + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + // (optional) Create previous chat history for context + Content.Builder userContentBuilder = new Content.Builder(); + userContentBuilder.setRole("user"); + userContentBuilder.addText("Hello, I have 2 dogs in my house."); + Content userContent = userContentBuilder.build(); + + Content.Builder modelContentBuilder = new Content.Builder(); + modelContentBuilder.setRole("model"); + modelContentBuilder.addText("Great to meet you. What would you like to know?"); + Content modelContent = userContentBuilder.build(); + + List history = Arrays.asList(userContent, modelContent); + + // Initialize the chat + ChatFutures chat = model.startChat(history); + + List history = chat.getChat().getHistory(); + + Content messageContent = new Content.Builder() + .addText("This is the message I intend to send") + .build(); + + Collections.addAll(history, messageContent); + + ListenableFuture countTokensResponse = model.countTokens(history.toArray(new Content[0])); + Futures.addCallback(response, new FutureCallback() { + @Override + public void onSuccess(CountTokenResponse result) { + System.out.println(result); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, executor); + // [END tokens_chat] + + } + + + void tokensMultimodalImageInline () { + Content text = new Content.Builder() + .addText("Write a story about a magic backpack.") + .build(); + + Executor executor = // ... + + // For text-only input + ListenableFuture countTokensResponse = model.countTokens(text); + + Futures.addCallback(countTokensResponse, new FutureCallback() { + @Override + public void onSuccess(CountTokensResponse result) { + int totalTokens = result.getTotalTokens(); + System.out.println("TotalTokens = " + totalTokens); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, executor); + + // For text-and-image input + Bitmap image1 = // ... + Bitmap image2 = // ... + + Content multiModalContent = new Content.Builder() + .addImage(image1) + .addImage(image2) + .addText("What's different between these pictures?") + .build(); + + ListenableFuture countTokensResponse = model.countTokens(multiModalContent); + + // For multi-turn conversations (like chat) + List history = chat.getChat().getHistory(); + + Content messageContent = new Content.Builder() + .addText("This is the message I intend to send") + .build(); + + Collections.addAll(history, messageContent); + + ListenableFuture countTokensResponse = model.countTokens(history.toArray(new Content[0])); + } } From 9ecc7e732a19f4683bac7e9aedf234f35908c638 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 10:44:17 -0400 Subject: [PATCH 07/43] Add function calling files. No actual snippets yet. --- .../generative/samples/function_calling.kt | 17 +++++++++++++++++ .../samples/java/function_calling.java | 19 +++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt b/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt new file mode 100644 index 00000000..5001a4d7 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt @@ -0,0 +1,17 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples + +// TODO diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java new file mode 100644 index 00000000..f173313c --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java @@ -0,0 +1,19 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples; + +class FunctionCalling { + // TODO +} From cfb13310e03821264b3905a659b22786bf030fd5 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 10:49:20 -0400 Subject: [PATCH 08/43] Use right package name for java snippets --- .../java/com/google/ai/client/generative/samples/java/chat.java | 2 +- .../google/ai/client/generative/samples/java/count_tokens.java | 2 +- .../ai/client/generative/samples/java/function_calling.java | 2 +- .../ai/client/generative/samples/java/text_generation.java | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index e014e544..69f40add 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package com.google.ai.client.generative.samples; +package com.google.ai.client.generative.samples.java; class Chat { void chat() { diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index 3525a5e9..d19ccf79 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package com.google.ai.client.generative.samples; +package com.google.ai.client.generative.samples.java; class CountTokens { void tokensTextOnly() { diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java index f173313c..ba151a5e 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package com.google.ai.client.generative.samples; +package com.google.ai.client.generative.samples.java; class FunctionCalling { // TODO diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java index 1802dc66..77e62326 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package com.google.ai.client.generative.samples; +package com.google.ai.client.generative.samples.java; class TextGeneration { void TextGenTextOnlyPrompt () { From d83cd0c219f2055cf5c44210ebd0770470755184 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 10:49:54 -0400 Subject: [PATCH 09/43] Add controlled generation files No snippets yet --- .../samples/controlled_generation.kt | 17 +++++++++++++++++ .../samples/java/controlled_generation.java | 19 +++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt new file mode 100644 index 00000000..5001a4d7 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt @@ -0,0 +1,17 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples + +// TODO diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java new file mode 100644 index 00000000..718feb46 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java @@ -0,0 +1,19 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples.java; + +class ControlledGeneration { + // TODO +} From f03f26c3bed15bf0c04e52cb026d7977c0191216 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 10:57:18 -0400 Subject: [PATCH 10/43] Add safety settings files --- .../samples/java/safety_settings.java | 55 +++++++++++++++++++ .../generative/samples/safety_settings.kt | 43 +++++++++++++++ 2 files changed, 98 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java new file mode 100644 index 00000000..90ef99c6 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java @@ -0,0 +1,55 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples.java; + +class SafetySettings { + void safetySettings() { + // [START safety-settings] + SafetySetting harassmentSafety = new SafetySetting(HarmCategory.HARASSMENT, + BlockThreshold.ONLY_HIGH); + + // The Gemini 1.5 models are versatile and work with most use cases + GenerativeModel gm = new GenerativeModel( + "gemini-1.5-flash", + BuildConfig.apiKey, + null, // generation config is optional + Collections.singletonList(harassmentSafety) + ); + + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + // [END safety-settings] + } + + void SafetySettingsMulti() { + // [START safety-settings_multi] + SafetySetting harassmentSafety = new SafetySetting(HarmCategory.HARASSMENT, + BlockThreshold.ONLY_HIGH); + + SafetySetting hateSpeechSafety = new SafetySetting(HarmCategory.HATE_SPEECH, + BlockThreshold.MEDIUM_AND_ABOVE); + + // The Gemini 1.5 models are versatile and work with most use cases + GenerativeModel gm = new GenerativeModel( + "gemini-1.5-flash", + BuildConfig.apiKey, + null, // generation config is optional + Arrays.asList(harassmentSafety, hateSpeechSafety) + ); + + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + // [END safety-settings_multi] + } + +} diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt new file mode 100644 index 00000000..b44d1a41 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt @@ -0,0 +1,43 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples + +suspend fun safetySettings() { + // [START safety-settings] + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with most use cases + modelName = "gemini-1.5-flash", + apiKey = BuildConfig.apiKey, + safetySettings = listOf( + SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH) + ) + ) + // [END safety-settings] +} + +suspend fun SafetySettingsMulti() { + // [START safety-settings_multi] + val harassmentSafety = SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH) + + val hateSpeechSafety = SafetySetting(HarmCategory.HATE_SPEECH, BlockThreshold.MEDIUM_AND_ABOVE) + + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with most use cases + modelName = "gemini-1.5-flash", + apiKey = BuildConfig.apiKey, + safetySettings = listOf(harassmentSafety, hateSpeechSafety) + ) + // [END safety-settings_multi] +} From 0d4a534142337edad023ed6d6a34664a5892607e Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 11:12:19 -0400 Subject: [PATCH 11/43] Add model configuration snippets --- .../samples/java/model_configuration.java | 40 +++++++++++++++++++ .../generative/samples/model_configuration.kt | 34 ++++++++++++++++ 2 files changed, 74 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java new file mode 100644 index 00000000..3d868558 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples.java; + +class ConfigureModel { + void configureModel() { + // [START configure_model] + GenerationConfig.Builder configBuilder = new GenerationConfig.Builder(); + configBuilder.temperature = 0.9f; + configBuilder.topK = 16; + configBuilder.topP = 0.1f; + configBuilder.maxOutputTokens = 200; + configBuilder.stopSequences = Arrays.asList("red"); + + GenerationConfig generationConfig = configBuilder.build(); + + // The Gemini 1.5 models are versatile and work with most use cases + GenerativeModel gm = new GenerativeModel( + "gemini-1.5-flash", + BuildConfig.apiKey, + generationConfig + ); + + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + // [END configure_model] + } + +} diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt new file mode 100644 index 00000000..8c5601e6 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples + +suspend fun configureModel() { + // [START configure_model] + val config = generationConfig { + temperature = 0.9f + topK = 16 + topP = 0.1f + maxOutputTokens = 200 + stopSequences = listOf("red") + } + + val generativeModel = GenerativeModel( + // The Gemini 1.5 models are versatile and work with most use cases + modelName = "gemini-1.5-flash", + apiKey = BuildConfig.apiKey, + generationConfig = config + ) + // [END configure_model] +} From 8dda71fb6e39d1b2d35ccb158e2f51bebe233d67 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 11:13:24 -0400 Subject: [PATCH 12/43] Add system instructions samples --- .../samples/java/system_instructions.java | 33 +++++++++++++++++++ .../generative/samples/system_instructions.kt | 25 ++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java new file mode 100644 index 00000000..2aed3733 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples.java; + +class SystemInstructions { + void systemInstructions() { + // [START system_instructions] + GenerativeModel model = new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + /* apiKey */ BuildConfig.apiKey, + /* generationConfig (optional) */ null, + /* safetySettings (optional) */ null, + /* requestOptions (optional) */ new RequestOptions(), + /* tools (optional) */ null, + /* toolsConfig (optional) */ null, + /* systemInstruction (optional) */ new Content.Builder().addText("You are a cat. Your name is Neko.").build() + ); + // [END system_instructions] + } + +} diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt new file mode 100644 index 00000000..5a1963e5 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt @@ -0,0 +1,25 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples + +suspend fun systemInstructions() { + // [START system_instructions] + val generativeModel = GenerativeModel( + modelName = "gemini-1.5-flash", + apiKey = BuildConfig.apiKey, + systemInstruction = content { text("You are a cat. Your name is Neko.") }, + ) + // [END system_instructions] +} From b9d0999a14980165ff9b5e224968920533136e55 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 11:31:51 -0400 Subject: [PATCH 13/43] Fix format Using google-java-format tool --- .../client/generative/samples/java/chat.java | 136 ++++--- .../samples/java/controlled_generation.java | 2 +- .../generative/samples/java/count_tokens.java | 151 ++++---- .../samples/java/function_calling.java | 2 +- .../samples/java/model_configuration.java | 34 +- .../samples/java/safety_settings.java | 73 ++-- .../samples/java/system_instructions.java | 31 +- .../samples/java/text_generation.java | 354 ++++++++++-------- 8 files changed, 414 insertions(+), 369 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index 69f40add..94f5a1ab 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -18,9 +18,12 @@ class Chat { void chat() { // [START chat] // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); // (optional) Create previous chat history for context @@ -50,27 +53,33 @@ void chat() { // Send the message ListenableFuture response = chat.sendMessage(userMessage); - Futures.addCallback(response, new FutureCallback() { - @Override - public void onSuccess(GenerateContentResponse result) { - String resultText = result.getText(); - System.out.println(resultText); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, executor); + Futures.addCallback( + response, + new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); // [END chat] } void chatStreaming() { // [START chat_streaming] // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); // (optional) Create previous chat history for context @@ -99,29 +108,30 @@ void chatStreaming() { // Use streaming with text-only input Publisher streamingResponse = - model.generateContentStream(inputContent); + model.generateContentStream(inputContent); StringBuilder outputContent = new StringBuilder(); - streamingResponse.subscribe(new Subscriber() { - @Override - public void onNext(GenerateContentResponse generateContentResponse) { - String chunk = generateContentResponse.getText(); - outputContent.append(chunk); - } + streamingResponse.subscribe( + new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } - @Override - public void onComplete() { - System.out.println(outputContent); - } + @Override + public void onComplete() { + System.out.println(outputContent); + } - @Override - public void onSubscribe(Subscription s) { - s.request(Long.MAX_VALUE); - } + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } - // ... other methods omitted for brevity - }); + // ... other methods omitted for brevity + }); // [END chat_streaming] } @@ -129,9 +139,12 @@ public void onSubscribe(Subscription s) { void chatStreamingWithImages() { // [START chat_with-images_streaming] // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); // (optional) Create previous chat history for context @@ -153,9 +166,8 @@ void chatStreamingWithImages() { // Create a new user message Bitmap image; // = ... - Content content = new Content.Builder() - .addText("What's different between these pictures?") - .build(); + Content content = + new Content.Builder().addText("What's different between these pictures?").build(); Content.Builder userMessageBuilder = new Content.Builder(); userMessageBuilder.setRole("user"); @@ -167,30 +179,30 @@ void chatStreamingWithImages() { // Use streaming with text-only input Publisher streamingResponse = - model.generateContentStream(inputContent); + model.generateContentStream(inputContent); StringBuilder outputContent = new StringBuilder(); - streamingResponse.subscribe(new Subscriber() { - @Override - public void onNext(GenerateContentResponse generateContentResponse) { - String chunk = generateContentResponse.getText(); - outputContent.append(chunk); - } - - @Override - public void onComplete() { - System.out.println(outputContent); - } - - @Override - public void onSubscribe(Subscription s) { - s.request(Long.MAX_VALUE); - } - - // ... other methods omitted for brevity - }); + streamingResponse.subscribe( + new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + + // ... other methods omitted for brevity + }); // [END chat_with-images_streaming] } - } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java index 718feb46..733ec8ee 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java @@ -15,5 +15,5 @@ package com.google.ai.client.generative.samples.java; class ControlledGeneration { - // TODO + // TODO } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index d19ccf79..fde82b79 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -18,41 +18,49 @@ class CountTokens { void tokensTextOnly() { // [START tokens_text-only] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - Content inputContent = new Content.Builder() - .addText("Write a story about a magic backpack.") - .build(); + Content inputContent = + new Content.Builder().addText("Write a story about a magic backpack.").build(); Executor executor; // = ... // For text-only input ListenableFuture countTokensResponse = model.countTokens(inputContent); - Futures.addCallback(countTokensResponse, new FutureCallback() { - @Override - public void onSuccess(CountTokensResponse result) { - int totalTokens = result.getTotalTokens(); - System.out.println("TotalTokens = " + totalTokens); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, executor); + Futures.addCallback( + countTokensResponse, + new FutureCallback() { + @Override + public void onSuccess(CountTokensResponse result) { + int totalTokens = result.getTotalTokens(); + System.out.println("TotalTokens = " + totalTokens); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); // [END tokens_text-only] } - void tokensChat (){ + void tokensChat() { // [START tokens_chat] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); // (optional) Create previous chat history for context @@ -73,73 +81,78 @@ void tokensChat (){ List history = chat.getChat().getHistory(); - Content messageContent = new Content.Builder() - .addText("This is the message I intend to send") - .build(); + Content messageContent = + new Content.Builder().addText("This is the message I intend to send").build(); Collections.addAll(history, messageContent); - ListenableFuture countTokensResponse = model.countTokens(history.toArray(new Content[0])); - Futures.addCallback(response, new FutureCallback() { - @Override - public void onSuccess(CountTokenResponse result) { - System.out.println(result); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, executor); + ListenableFuture countTokensResponse = + model.countTokens(history.toArray(new Content[0])); + Futures.addCallback( + response, + new FutureCallback() { + @Override + public void onSuccess(CountTokenResponse result) { + System.out.println(result); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); // [END tokens_chat] } - - void tokensMultimodalImageInline () { - Content text = new Content.Builder() - .addText("Write a story about a magic backpack.") - .build(); + void tokensMultimodalImageInline() { + Content text = new Content.Builder().addText("Write a story about a magic backpack.").build(); Executor executor = // ... - // For text-only input - ListenableFuture countTokensResponse = model.countTokens(text); - - Futures.addCallback(countTokensResponse, new FutureCallback() { - @Override - public void onSuccess(CountTokensResponse result) { - int totalTokens = result.getTotalTokens(); - System.out.println("TotalTokens = " + totalTokens); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, executor); + // For text-only input + ListenableFuture < CountTokensResponse > countTokensResponse = model.countTokens(text); + + Futures.addCallback( + countTokensResponse, + new FutureCallback() { + @Override + public void onSuccess(CountTokensResponse result) { + int totalTokens = result.getTotalTokens(); + System.out.println("TotalTokens = " + totalTokens); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); // For text-and-image input - Bitmap image1 = // ... - Bitmap image2 = // ... + Bitmap image1; // = ... + Bitmap image2; // = ... - Content multiModalContent = new Content.Builder() - .addImage(image1) - .addImage(image2) - .addText("What's different between these pictures?") - .build(); + Content multiModalContent = + new Content.Builder() + .addImage(image1) + .addImage(image2) + .addText("What's different between these pictures?") + .build(); - ListenableFuture countTokensResponse = model.countTokens(multiModalContent); + ListenableFuture countTokensResponse = + model.countTokens(multiModalContent); // For multi-turn conversations (like chat) List history = chat.getChat().getHistory(); - Content messageContent = new Content.Builder() - .addText("This is the message I intend to send") - .build(); + Content messageContent = + new Content.Builder().addText("This is the message I intend to send").build(); Collections.addAll(history, messageContent); - ListenableFuture countTokensResponse = model.countTokens(history.toArray(new Content[0])); + ListenableFuture countTokensResponse = + model.countTokens(history.toArray(new Content[0])); } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java index ba151a5e..f3dfcdea 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java @@ -15,5 +15,5 @@ package com.google.ai.client.generative.samples.java; class FunctionCalling { - // TODO + // TODO } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java index 3d868558..65e9540c 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java @@ -15,26 +15,22 @@ package com.google.ai.client.generative.samples.java; class ConfigureModel { - void configureModel() { - // [START configure_model] - GenerationConfig.Builder configBuilder = new GenerationConfig.Builder(); - configBuilder.temperature = 0.9f; - configBuilder.topK = 16; - configBuilder.topP = 0.1f; - configBuilder.maxOutputTokens = 200; - configBuilder.stopSequences = Arrays.asList("red"); + void configureModel() { + // [START configure_model] + GenerationConfig.Builder configBuilder = new GenerationConfig.Builder(); + configBuilder.temperature = 0.9f; + configBuilder.topK = 16; + configBuilder.topP = 0.1f; + configBuilder.maxOutputTokens = 200; + configBuilder.stopSequences = Arrays.asList("red"); - GenerationConfig generationConfig = configBuilder.build(); + GenerationConfig generationConfig = configBuilder.build(); - // The Gemini 1.5 models are versatile and work with most use cases - GenerativeModel gm = new GenerativeModel( - "gemini-1.5-flash", - BuildConfig.apiKey, - generationConfig - ); - - GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END configure_model] - } + // The Gemini 1.5 models are versatile and work with most use cases + GenerativeModel gm = + new GenerativeModel("gemini-1.5-flash", BuildConfig.apiKey, generationConfig); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + // [END configure_model] + } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java index 90ef99c6..04600d2d 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java @@ -15,41 +15,40 @@ package com.google.ai.client.generative.samples.java; class SafetySettings { - void safetySettings() { - // [START safety-settings] - SafetySetting harassmentSafety = new SafetySetting(HarmCategory.HARASSMENT, - BlockThreshold.ONLY_HIGH); - - // The Gemini 1.5 models are versatile and work with most use cases - GenerativeModel gm = new GenerativeModel( - "gemini-1.5-flash", - BuildConfig.apiKey, - null, // generation config is optional - Collections.singletonList(harassmentSafety) - ); - - GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END safety-settings] - } - - void SafetySettingsMulti() { - // [START safety-settings_multi] - SafetySetting harassmentSafety = new SafetySetting(HarmCategory.HARASSMENT, - BlockThreshold.ONLY_HIGH); - - SafetySetting hateSpeechSafety = new SafetySetting(HarmCategory.HATE_SPEECH, - BlockThreshold.MEDIUM_AND_ABOVE); - - // The Gemini 1.5 models are versatile and work with most use cases - GenerativeModel gm = new GenerativeModel( - "gemini-1.5-flash", - BuildConfig.apiKey, - null, // generation config is optional - Arrays.asList(harassmentSafety, hateSpeechSafety) - ); - - GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END safety-settings_multi] - } - + void safetySettings() { + // [START safety-settings] + SafetySetting harassmentSafety = + new SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH); + + // The Gemini 1.5 models are versatile and work with most use cases + GenerativeModel gm = + new GenerativeModel( + "gemini-1.5-flash", + BuildConfig.apiKey, + null, // generation config is optional + Collections.singletonList(harassmentSafety)); + + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + // [END safety-settings] + } + + void SafetySettingsMulti() { + // [START safety-settings_multi] + SafetySetting harassmentSafety = + new SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH); + + SafetySetting hateSpeechSafety = + new SafetySetting(HarmCategory.HATE_SPEECH, BlockThreshold.MEDIUM_AND_ABOVE); + + // The Gemini 1.5 models are versatile and work with most use cases + GenerativeModel gm = + new GenerativeModel( + "gemini-1.5-flash", + BuildConfig.apiKey, + null, // generation config is optional + Arrays.asList(harassmentSafety, hateSpeechSafety)); + + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + // [END safety-settings_multi] + } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java index 2aed3733..2b987d5a 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java @@ -15,19 +15,20 @@ package com.google.ai.client.generative.samples.java; class SystemInstructions { - void systemInstructions() { - // [START system_instructions] - GenerativeModel model = new GenerativeModel( - /* modelName */ "gemini-1.5-flash", - /* apiKey */ BuildConfig.apiKey, - /* generationConfig (optional) */ null, - /* safetySettings (optional) */ null, - /* requestOptions (optional) */ new RequestOptions(), - /* tools (optional) */ null, - /* toolsConfig (optional) */ null, - /* systemInstruction (optional) */ new Content.Builder().addText("You are a cat. Your name is Neko.").build() - ); - // [END system_instructions] - } - + void systemInstructions() { + // [START system_instructions] + GenerativeModel model = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + /* apiKey */ BuildConfig.apiKey, + /* generationConfig (optional) */ null, + /* safetySettings (optional) */ null, + /* requestOptions (optional) */ new RequestOptions(), + /* tools (optional) */ null, + /* toolsConfig (optional) */ null, + /* systemInstruction (optional) */ new Content.Builder() + .addText("You are a cat. Your name is Neko.") + .build()); + // [END system_instructions] + } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java index 77e62326..43f0d05e 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -15,256 +15,280 @@ package com.google.ai.client.generative.samples.java; class TextGeneration { - void TextGenTextOnlyPrompt () { + void TextGenTextOnlyPrompt() { // [START text_gen_text-only-prompt] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - Content content = new Content.Builder() - .addText("Write a story about a magic backpack.") - .build(); + Content content = + new Content.Builder().addText("Write a story about a magic backpack.").build(); Executor executor; // = ... ListenableFuture response = model.generateContent(content); - Futures.addCallback(response, new FutureCallback() { - @Override - public void onSuccess(GenerateContentResponse result) { - String resultText = result.getText(); - System.out.println(resultText); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, executor); + Futures.addCallback( + response, + new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); // [END text_gen_text-only-prompt] } - void TextGenTextOnlyPromptStreaming () { + void TextGenTextOnlyPromptStreaming() { // [START text_gen_text-only-prompt_streaming] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - Content content = new Content.Builder() - .addText("Write a story about a magic backpack.") - .build(); + Content content = + new Content.Builder().addText("Write a story about a magic backpack.").build(); - Publisher streamingResponse = - model.generateContentStream(content); + Publisher streamingResponse = model.generateContentStream(content); StringBuilder outputContent = new StringBuilder(); - streamingResponse.subscribe(new Subscriber() { - @Override - public void onNext(GenerateContentResponse generateContentResponse) { - String chunk = generateContentResponse.getText(); - outputContent.append(chunk); - } - - @Override - public void onComplete() { - System.out.println(outputContent); - } - - @Override - public void onError(Throwable t) { - t.printStackTrace(); - } - - @Override - public void onSubscribe(Subscription s) { - s.request(Long.MAX_VALUE); - } - }); + streamingResponse.subscribe( + new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onError(Throwable t) { + t.printStackTrace(); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + }); // [END text_gen_text-only-prompt_streaming] } - void TextGenMultimodalOneImagePrompt () { + void TextGenMultimodalOneImagePrompt() { // [START text_gen_multimodal-one-image-prompt] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); Bitmap image; // = ... - Content content = new Content.Builder() - .addText("What's different between these pictures?") - .addImage(image) - .build(); - + Content content = + new Content.Builder() + .addText("What's different between these pictures?") + .addImage(image) + .build(); Executor executor; // = ... ListenableFuture response = model.generateContent(content); - Futures.addCallback(response, new FutureCallback() { - @Override - public void onSuccess(GenerateContentResponse result) { - String resultText = result.getText(); - System.out.println(resultText); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, executor); + Futures.addCallback( + response, + new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); // [END text_gen_multimodal-one-image-prompt] } - - void TextGenMultimodalOneImagePromptStreaming () { + void TextGenMultimodalOneImagePromptStreaming() { // [START text_gen_multimodal-one-image-prompt_streaming] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); Bitmap image1; // = ... Bitmap image2; // = ... - Content content = new Content.Builder() - .addText("What's different between these pictures?") - .addImage(image1) - .addImage(image2) - .build(); - + Content content = + new Content.Builder() + .addText("What's different between these pictures?") + .addImage(image1) + .addImage(image2) + .build(); Executor executor; // = ... - Publisher streamingResponse = - model.generateContentStream(content); + Publisher streamingResponse = model.generateContentStream(content); StringBuilder outputContent = new StringBuilder(); - streamingResponse.subscribe(new Subscriber() { - @Override - public void onNext(GenerateContentResponse generateContentResponse) { - String chunk = generateContentResponse.getText(); - outputContent.append(chunk); - } - - @Override - public void onComplete() { - System.out.println(outputContent); - } - - @Override - public void onError(Throwable t) { - t.printStackTrace(); - } - - @Override - public void onSubscribe(Subscription s) { - s.request(Long.MAX_VALUE); - } - }); + streamingResponse.subscribe( + new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onError(Throwable t) { + t.printStackTrace(); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + }); // [END text_gen_multimodal-one-image-prompt_streaming] } - - void TextGenMultimodalMultiImagePrompt () { + void TextGenMultimodalMultiImagePrompt() { // [START text_gen_multimodal-multi-image-prompt] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); Bitmap image1; // = ... Bitmap image2; // = ... - Content content = new Content.Builder() - .addText("What's different between these pictures?") - .addImage(image1) - .addImage(image2) - .build(); - + Content content = + new Content.Builder() + .addText("What's different between these pictures?") + .addImage(image1) + .addImage(image2) + .build(); Executor executor; // = ... ListenableFuture response = model.generateContent(content); - Futures.addCallback(response, new FutureCallback() { - @Override - public void onSuccess(GenerateContentResponse result) { - String resultText = result.getText(); - System.out.println(resultText); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, executor); + Futures.addCallback( + response, + new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); // [END text_gen_multimodal-multi-image-prompt] } - void TextGenMultimodalMultiImagePromptStreaming () { + void TextGenMultimodalMultiImagePromptStreaming() { // [START text_gen_multimodal-multi-image-prompt_streaming] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - GenerativeModel gm = new GenerativeModel(/* modelName */ "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - /* apiKey */ BuildConfig.apiKey); + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); GenerativeModelFutures model = GenerativeModelFutures.from(gm); Bitmap image1; // = ... Bitmap image2; // = ... - Content content = new Content.Builder() - .addText("What's different between these pictures?") - .addImage(image1) - .addImage(image2) - .build(); - + Content content = + new Content.Builder() + .addText("What's different between these pictures?") + .addImage(image1) + .addImage(image2) + .build(); Executor executor; // = ... - Publisher streamingResponse = - model.generateContentStream(content); + Publisher streamingResponse = model.generateContentStream(content); StringBuilder outputContent = new StringBuilder(); - streamingResponse.subscribe(new Subscriber() { - @Override - public void onNext(GenerateContentResponse generateContentResponse) { - String chunk = generateContentResponse.getText(); - outputContent.append(chunk); - } - - @Override - public void onComplete() { - System.out.println(outputContent); - } - - @Override - public void onError(Throwable t) { - t.printStackTrace(); - } - - @Override - public void onSubscribe(Subscription s) { - s.request(Long.MAX_VALUE); - } - }); + streamingResponse.subscribe( + new Subscriber() { + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + outputContent.append(chunk); + } + + @Override + public void onComplete() { + System.out.println(outputContent); + } + + @Override + public void onError(Throwable t) { + t.printStackTrace(); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + }); // [END text_gen_multimodal-multi-image-prompt_streaming] } - void TextGenMultimodalVideoPrompt () { + + void TextGenMultimodalVideoPrompt() { // [START text_gen_multimodal-video-prompt] // TODO // [END text_gen_multimodal-video-prompt] } - void TextGenMultimodalVideoPromptStreaming () { + void TextGenMultimodalVideoPromptStreaming() { // [START text_gen_multimodal-video-prompt_streaming] // TODO // [END text_gen_multimodal-video-prompt_streaming] From 3b8953eb2a329b8e81d40a0e6f51c955395c4e5c Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 13:38:02 -0400 Subject: [PATCH 14/43] Fix count tokens sample --- .../ai/client/generative/samples/java/count_tokens.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index fde82b79..b7d94c69 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -109,10 +109,10 @@ public void onFailure(Throwable t) { void tokensMultimodalImageInline() { Content text = new Content.Builder().addText("Write a story about a magic backpack.").build(); - Executor executor = // ... + Executor executor; // = ... - // For text-only input - ListenableFuture < CountTokensResponse > countTokensResponse = model.countTokens(text); + // For text-only input + ListenableFuture < CountTokensResponse > countTokensResponse = model.countTokens(text); Futures.addCallback( countTokensResponse, From 2f4a694eef819516128ba56b42ebafecaf7a87fb Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 14:26:54 -0400 Subject: [PATCH 15/43] Add dummy BuidConfig files --- .../generative/samples/BuildConfig.java | 19 +++++++++++++++++++ .../generative/samples/java/BuildConfig.java | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/BuildConfig.java create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/BuildConfig.java b/samples/src/main/java/com/google/ai/client/generative/samples/BuildConfig.java new file mode 100644 index 00000000..4d32f537 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/BuildConfig.java @@ -0,0 +1,19 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples; + +public class BuildConfig { + public static String apiKey = "invalidApiKey"; +} diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java new file mode 100644 index 00000000..3704de25 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java @@ -0,0 +1,19 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples.java; + +public class BuildConfig { + public static String apiKey = "invalidApiKey"; +} From a902099c150e04c0500e295f2eebfd27e939add7 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 14:27:23 -0400 Subject: [PATCH 16/43] Add missing imports in kotlin sample files --- .../com/google/ai/client/generative/samples/chat.kt | 4 ++++ .../ai/client/generative/samples/count_tokens.kt | 10 +++++++--- .../google/ai/client/generative/samples/java/chat.java | 2 ++ .../client/generative/samples/model_configuration.kt | 3 +++ .../ai/client/generative/samples/safety_settings.kt | 5 +++++ .../client/generative/samples/system_instructions.kt | 3 +++ .../ai/client/generative/samples/text_generation.kt | 8 ++++++-- 7 files changed, 30 insertions(+), 5 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index 1b8dca9b..472cdc3b 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -14,6 +14,10 @@ package com.google.ai.client.generative.samples +import android.graphics.Bitmap +import com.google.ai.client.generativeai.GenerativeModel +import com.google.ai.client.generativeai.type.content + suspend fun chat() { // [START chat] val generativeModel = GenerativeModel( diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt index 02d41d8a..53adad72 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt @@ -14,7 +14,11 @@ package com.google.ai.client.generative.samples -fun tokensTextOnly () { +import android.graphics.Bitmap +import com.google.ai.client.generativeai.GenerativeModel +import com.google.ai.client.generativeai.type.content + +suspend fun tokensTextOnly () { // [START tokens_text-only] val generativeModel = GenerativeModel( // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) @@ -29,7 +33,7 @@ fun tokensTextOnly () { // [END tokens_text-only] } -fun tokensChat () { +suspend fun tokensChat () { // [START tokens_chat] val generativeModel = GenerativeModel( // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) @@ -52,7 +56,7 @@ fun tokensChat () { // [END tokens_chat] } -fun tokensMultimodalImageInline () { +suspend fun tokensMultimodalImageInline () { // [START tokens_multimodal-image_inline] val generativeModel = GenerativeModel( // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index 94f5a1ab..fc6b4f71 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -14,6 +14,8 @@ package com.google.ai.client.generative.samples.java; +import com.google.ai.client.generativeai.GenerativeModel; + class Chat { void chat() { // [START chat] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt index 8c5601e6..72e37412 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt @@ -14,6 +14,9 @@ package com.google.ai.client.generative.samples +import com.google.ai.client.generativeai.GenerativeModel +import com.google.ai.client.generativeai.type.generationConfig + suspend fun configureModel() { // [START configure_model] val config = generationConfig { diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt index b44d1a41..6ee5fae0 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt @@ -14,6 +14,11 @@ package com.google.ai.client.generative.samples +import com.google.ai.client.generativeai.GenerativeModel +import com.google.ai.client.generativeai.type.BlockThreshold +import com.google.ai.client.generativeai.type.HarmCategory +import com.google.ai.client.generativeai.type.SafetySetting + suspend fun safetySettings() { // [START safety-settings] val generativeModel = GenerativeModel( diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt index 5a1963e5..c4f8d6e8 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt @@ -14,6 +14,9 @@ package com.google.ai.client.generative.samples +import com.google.ai.client.generativeai.GenerativeModel +import com.google.ai.client.generativeai.type.content + suspend fun systemInstructions() { // [START system_instructions] val generativeModel = GenerativeModel( diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt index 7d8258f6..545b34c8 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -14,6 +14,10 @@ package com.google.ai.client.generative.samples +import android.graphics.Bitmap +import com.google.ai.client.generativeai.GenerativeModel +import com.google.ai.client.generativeai.type.content + suspend fun textGenTextOnlyPrompt () { // [START text_gen_text-only-prompt] val generativeModel = GenerativeModel( @@ -84,7 +88,7 @@ suspend fun textGenMultimodalOneImagePromptStreaming () { text("What's in this picture?") } - generativeModel.generateContentStream(prompt).collect { chunk -> + generativeModel.generateContentStream(inputContent).collect { chunk -> print(chunk.text) } // [END text_gen_multimodal-one-image-prompt_streaming] @@ -132,7 +136,7 @@ suspend fun textGenMultimodalMultiImagePromptStreaming () { text("What's different between these pictures?") } - generativeModel.generateContentStream(prompt).collect { chunk -> + generativeModel.generateContentStream(inputContent).collect { chunk -> print(chunk.text) } // [END text_gen_multimodal-multi-image-prompt_streaming] From 500ba0fd9f59f5d3efd0e0facb95805efcae95f0 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 14:54:09 -0400 Subject: [PATCH 17/43] Java fix imports --- .../generative/samples/java/BuildConfig.java | 2 +- .../client/generative/samples/java/chat.java | 27 +++++- .../generative/samples/java/count_tokens.java | 85 +++++++++++-------- .../samples/java/safety_settings.java | 8 ++ .../samples/java/system_instructions.java | 4 + .../samples/java/text_generation.java | 14 ++- 6 files changed, 99 insertions(+), 41 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java index 3704de25..c9f8a48b 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java @@ -15,5 +15,5 @@ package com.google.ai.client.generative.samples.java; public class BuildConfig { - public static String apiKey = "invalidApiKey"; + public static String apiKey = "invalidApiKey"; } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index fc6b4f71..3954f2e5 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -14,7 +14,25 @@ package com.google.ai.client.generative.samples.java; +import android.graphics.Bitmap; + import com.google.ai.client.generativeai.GenerativeModel; +import com.google.ai.client.generativeai.java.ChatFutures; +import com.google.ai.client.generativeai.java.GenerativeModelFutures; +import com.google.ai.client.generativeai.type.Content; +import com.google.ai.client.generativeai.type.GenerateContentResponse; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; + +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; class Chat { void chat() { @@ -50,7 +68,8 @@ void chat() { userMessageBuilder.addText("How many paws are in my house?"); Content userMessage = userMessageBuilder.build(); - Executor executor; // = ... + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); // Send the message ListenableFuture response = chat.sendMessage(userMessage); @@ -106,7 +125,8 @@ void chatStreaming() { userMessageBuilder.addText("How many paws are in my house?"); Content userMessage = userMessageBuilder.build(); - Executor executor; // = ... + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); // Use streaming with text-only input Publisher streamingResponse = @@ -177,7 +197,8 @@ void chatStreamingWithImages() { userMessageBuilder.addText("This is a picture of them, what breed are they?"); Content userMessage = userMessageBuilder.build(); - Executor executor; // = ... + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); // Use streaming with text-only input Publisher streamingResponse = diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index b7d94c69..25c071d0 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -14,6 +14,21 @@ package com.google.ai.client.generative.samples.java; +import android.graphics.Bitmap; +import com.google.ai.client.generativeai.GenerativeModel; +import com.google.ai.client.generativeai.java.ChatFutures; +import com.google.ai.client.generativeai.java.GenerativeModelFutures; +import com.google.ai.client.generativeai.type.Content; +import com.google.ai.client.generativeai.type.CountTokensResponse; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; + class CountTokens { void tokensTextOnly() { // [START tokens_text-only] @@ -29,7 +44,8 @@ void tokensTextOnly() { Content inputContent = new Content.Builder().addText("Write a story about a magic backpack.").build(); - Executor executor; // = ... + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); // For text-only input ListenableFuture countTokensResponse = model.countTokens(inputContent); @@ -79,20 +95,20 @@ void tokensChat() { // Initialize the chat ChatFutures chat = model.startChat(history); - List history = chat.getChat().getHistory(); - Content messageContent = new Content.Builder().addText("This is the message I intend to send").build(); Collections.addAll(history, messageContent); + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); + ListenableFuture countTokensResponse = model.countTokens(history.toArray(new Content[0])); - Futures.addCallback( - response, - new FutureCallback() { + Futures.addCallback(countTokensResponse, + new FutureCallback() { @Override - public void onSuccess(CountTokenResponse result) { + public void onSuccess(CountTokensResponse result) { System.out.println(result); } @@ -107,28 +123,18 @@ public void onFailure(Throwable t) { } void tokensMultimodalImageInline() { + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); Content text = new Content.Builder().addText("Write a story about a magic backpack.").build(); - Executor executor; // = ... - - // For text-only input - ListenableFuture < CountTokensResponse > countTokensResponse = model.countTokens(text); - - Futures.addCallback( - countTokensResponse, - new FutureCallback() { - @Override - public void onSuccess(CountTokensResponse result) { - int totalTokens = result.getTotalTokens(); - System.out.println("TotalTokens = " + totalTokens); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, - executor); + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); // For text-and-image input Bitmap image1; // = ... @@ -144,15 +150,22 @@ public void onFailure(Throwable t) { ListenableFuture countTokensResponse = model.countTokens(multiModalContent); - // For multi-turn conversations (like chat) - List history = chat.getChat().getHistory(); - - Content messageContent = - new Content.Builder().addText("This is the message I intend to send").build(); + Futures.addCallback( + countTokensResponse, + new FutureCallback() { + @Override + public void onSuccess(CountTokensResponse result) { + int totalTokens = result.getTotalTokens(); + System.out.println("TotalTokens = " + totalTokens); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); + // [END tokens_text-only] - Collections.addAll(history, messageContent); - - ListenableFuture countTokensResponse = - model.countTokens(history.toArray(new Content[0])); } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java index 04600d2d..d93894c4 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java @@ -14,6 +14,14 @@ package com.google.ai.client.generative.samples.java; +import com.google.ai.client.generativeai.GenerativeModel; +import com.google.ai.client.generativeai.java.GenerativeModelFutures; +import com.google.ai.client.generativeai.type.BlockThreshold; +import com.google.ai.client.generativeai.type.HarmCategory; +import com.google.ai.client.generativeai.type.SafetySetting; +import java.util.Arrays; +import java.util.Collections; + class SafetySettings { void safetySettings() { // [START safety-settings] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java index 2b987d5a..654144e3 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java @@ -14,6 +14,10 @@ package com.google.ai.client.generative.samples.java; +import com.google.ai.client.generativeai.GenerativeModel; +import com.google.ai.client.generativeai.type.Content; +import com.google.ai.client.generativeai.type.RequestOptions; + class SystemInstructions { void systemInstructions() { // [START system_instructions] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java index 43f0d05e..167e4e6a 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -14,6 +14,17 @@ package com.google.ai.client.generative.samples.java; +import com.google.ai.client.generativeai.GenerativeModel; +import com.google.ai.client.generativeai.java.GenerativeModelFutures; +import com.google.ai.client.generativeai.type.Content; +import com.google.ai.client.generativeai.type.GenerateContentResponse; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; + +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; + class TextGeneration { void TextGenTextOnlyPrompt() { // [START text_gen_text-only-prompt] @@ -29,7 +40,8 @@ void TextGenTextOnlyPrompt() { Content content = new Content.Builder().addText("Write a story about a magic backpack.").build(); - Executor executor; // = ... + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); ListenableFuture response = model.generateContent(content); Futures.addCallback( From 09591636ad73683dbbe2db1d5a09718be2d234f9 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 14:59:57 -0400 Subject: [PATCH 18/43] Missing fixes for java code --- .../client/generative/samples/java/chat.java | 34 ++++++++---------- .../generative/samples/java/count_tokens.java | 35 ++++++++++--------- .../samples/java/model_configuration.java | 5 +++ .../samples/java/text_generation.java | 14 +++++--- .../generative/samples/text_generation.kt | 19 ++++------ 5 files changed, 55 insertions(+), 52 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index 3954f2e5..04bf3bb9 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -15,7 +15,6 @@ package com.google.ai.client.generative.samples.java; import android.graphics.Bitmap; - import com.google.ai.client.generativeai.GenerativeModel; import com.google.ai.client.generativeai.java.ChatFutures; import com.google.ai.client.generativeai.java.GenerativeModelFutures; @@ -24,15 +23,13 @@ import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; - -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - import java.util.Arrays; import java.util.List; import java.util.concurrent.Executor; import java.util.concurrent.Executors; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; class Chat { void chat() { @@ -68,8 +65,8 @@ void chat() { userMessageBuilder.addText("How many paws are in my house?"); Content userMessage = userMessageBuilder.build(); - // TODO COMMENT - Executor executor = Executors.newSingleThreadExecutor(); + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); // Send the message ListenableFuture response = chat.sendMessage(userMessage); @@ -125,12 +122,9 @@ void chatStreaming() { userMessageBuilder.addText("How many paws are in my house?"); Content userMessage = userMessageBuilder.build(); - // TODO COMMENT - Executor executor = Executors.newSingleThreadExecutor(); - // Use streaming with text-only input Publisher streamingResponse = - model.generateContentStream(inputContent); + model.generateContentStream(userMessage); StringBuilder outputContent = new StringBuilder(); @@ -153,6 +147,10 @@ public void onSubscribe(Subscription s) { } // ... other methods omitted for brevity + // [START_EXCLUDE] + @Override + public void onError(Throwable t) {} + // [END_EXCLUDE] }); // [END chat_streaming] @@ -188,21 +186,15 @@ void chatStreamingWithImages() { // Create a new user message Bitmap image; // = ... - Content content = - new Content.Builder().addText("What's different between these pictures?").build(); - Content.Builder userMessageBuilder = new Content.Builder(); userMessageBuilder.setRole("user"); userMessageBuilder.addImage(image); userMessageBuilder.addText("This is a picture of them, what breed are they?"); Content userMessage = userMessageBuilder.build(); - // TODO COMMENT - Executor executor = Executors.newSingleThreadExecutor(); - // Use streaming with text-only input Publisher streamingResponse = - model.generateContentStream(inputContent); + model.generateContentStream(userMessage); StringBuilder outputContent = new StringBuilder(); @@ -225,6 +217,10 @@ public void onSubscribe(Subscription s) { } // ... other methods omitted for brevity + // [START_EXCLUDE] + @Override + public void onError(Throwable t) {} + // [END_EXCLUDE] }); // [END chat_with-images_streaming] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index 25c071d0..44b1ce35 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -105,7 +105,8 @@ void tokensChat() { ListenableFuture countTokensResponse = model.countTokens(history.toArray(new Content[0])); - Futures.addCallback(countTokensResponse, + Futures.addCallback( + countTokensResponse, new FutureCallback() { @Override public void onSuccess(CountTokensResponse result) { @@ -150,22 +151,22 @@ void tokensMultimodalImageInline() { ListenableFuture countTokensResponse = model.countTokens(multiModalContent); - Futures.addCallback( - countTokensResponse, - new FutureCallback() { - @Override - public void onSuccess(CountTokensResponse result) { - int totalTokens = result.getTotalTokens(); - System.out.println("TotalTokens = " + totalTokens); - } - - @Override - public void onFailure(Throwable t) { - t.printStackTrace(); - } - }, - executor); - // [END tokens_text-only] + Futures.addCallback( + countTokensResponse, + new FutureCallback() { + @Override + public void onSuccess(CountTokensResponse result) { + int totalTokens = result.getTotalTokens(); + System.out.println("TotalTokens = " + totalTokens); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); + // [END tokens_text-only] } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java index 65e9540c..c551f99d 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java @@ -14,6 +14,11 @@ package com.google.ai.client.generative.samples.java; +import com.google.ai.client.generativeai.GenerativeModel; +import com.google.ai.client.generativeai.java.GenerativeModelFutures; +import com.google.ai.client.generativeai.type.GenerationConfig; +import java.util.Arrays; + class ConfigureModel { void configureModel() { // [START configure_model] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java index 167e4e6a..84e6b7eb 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -14,6 +14,7 @@ package com.google.ai.client.generative.samples.java; +import android.graphics.Bitmap; import com.google.ai.client.generativeai.GenerativeModel; import com.google.ai.client.generativeai.java.GenerativeModelFutures; import com.google.ai.client.generativeai.type.Content; @@ -21,9 +22,11 @@ import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; - import java.util.concurrent.Executor; import java.util.concurrent.Executors; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; class TextGeneration { void TextGenTextOnlyPrompt() { @@ -125,7 +128,8 @@ void TextGenMultimodalOneImagePrompt() { .addImage(image) .build(); - Executor executor; // = ... + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); ListenableFuture response = model.generateContent(content); Futures.addCallback( @@ -167,7 +171,8 @@ void TextGenMultimodalOneImagePromptStreaming() { .addImage(image2) .build(); - Executor executor; // = ... + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); Publisher streamingResponse = model.generateContentStream(content); @@ -220,7 +225,8 @@ void TextGenMultimodalMultiImagePrompt() { .addImage(image2) .build(); - Executor executor; // = ... + // TODO COMMENT + Executor executor = Executors.newSingleThreadExecutor(); ListenableFuture response = model.generateContent(content); Futures.addCallback( diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt index 545b34c8..fcb7f0a6 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -60,10 +60,10 @@ suspend fun textGenMultimodalOneImagePrompt () { apiKey = BuildConfig.apiKey ) - val image: Bitmap // = ... + val inputContent = content { - image(image) + text("What's in this picture?") } @@ -81,10 +81,10 @@ suspend fun textGenMultimodalOneImagePromptStreaming () { apiKey = BuildConfig.apiKey ) - val image1: Bitmap // = ... + val inputContent = content { - image(image1) + text("What's in this picture?") } @@ -103,12 +103,9 @@ suspend fun textGenMultimodalMultiImagePrompt () { apiKey = BuildConfig.apiKey ) - val image1: Bitmap // = ... - val image2: Bitmap // = ... val inputContent = content { - image(image1) - image(image2) + text("What's different between these pictures?") } @@ -127,12 +124,10 @@ suspend fun textGenMultimodalMultiImagePromptStreaming () { apiKey = BuildConfig.apiKey ) - val image1: Bitmap // = ... - val image2: Bitmap // = ... + val inputContent = content { - image(image1) - image(image2) + text("What's different between these pictures?") } From 2b97243ab14754920351284891b28f00cea06110 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 15:17:23 -0400 Subject: [PATCH 19/43] Add sample images --- .../app/src/main/res/drawable/image.png | Bin 0 -> 1370 bytes .../app/src/main/res/drawable/image1.png | Bin 0 -> 1370 bytes .../app/src/main/res/drawable/image2.png | Bin 0 -> 1370 bytes .../ai/client/generative/samples/chat.kt | 8 ++++-- .../client/generative/samples/count_tokens.kt | 9 ++++-- .../client/generative/samples/java/chat.java | 8 ++++-- .../generative/samples/java/count_tokens.java | 10 +++++-- .../samples/java/text_generation.java | 26 ++++++++++-------- 8 files changed, 40 insertions(+), 21 deletions(-) create mode 100644 generativeai-android-sample/app/src/main/res/drawable/image.png create mode 100644 generativeai-android-sample/app/src/main/res/drawable/image1.png create mode 100644 generativeai-android-sample/app/src/main/res/drawable/image2.png diff --git a/generativeai-android-sample/app/src/main/res/drawable/image.png b/generativeai-android-sample/app/src/main/res/drawable/image.png new file mode 100644 index 0000000000000000000000000000000000000000..3632bceed665f83689bd5bbc9600e21e9b98a191 GIT binary patch literal 1370 zcmV-g1*Q6lP)Px)6iGxuRCr$Pon3C*Fc5_0lchdM0wfQb8US*lpHT&90GR0e`#M0Q z0u%tuiGDT}pa5WI^d|-=RKPs|OQOG{0>3`q&aU?hx(8rk^j~v=&-b%y8@X=Wn&@Xy z0k;5zJ^%l9{I;7u*Iu622;2e?cl@@`{ux|u6GE2&SV;c>G%g`<2f#+oPn$;EjS5@= zu%+|8CWUSQuuS@Em zc(luEte+eCu9l7=0oXI)%cTM}Cx8QBkD*?!(+3hmxOoC70BUwV>O27q0ELsiS)y<_ zG%!j~JpdIYd$S-2kDNyksy>9WY5))oQsjguc0Oa(1eD+p_%>d}g zduY8PJ)eP_JGTa65dbNoJ2eekupi=gIeVeAGyfJQnEu3jZzh?R9Cm32K&ViJKmcTV za^Y>Q(<20B0LZC$n9PY#w8ydPT4aIb?69`5$&~UU1z6G)0g-LBb`2p84>xi8%}_ME zT81}zoa$X^o?V?@9Dq~-sY3Y`!`-@ou?Qhlgld+Gs{o*@2Y$aGEh|j zcm|==RPR(IR0)755NdUN)e6w17I?+5I=LTGgQ^vv2g3CoKU1<+!S67SjllpAoQg17 zs9o~=!2tz8?GT_jKiCrvfCz!v0Z>-mg&qyFvgC|%DtTB0BH6oVR!9ZtHl{EHFv*@< z07AnGK>+W3*c_lwbBV{ry&NG6-vxAL4R=cnQ4>DsiJ+X^aF2Nc#G&4p?c6B99_t6* zobM(F=o7w0Qs~k7ZUNX}WZ)v>cb@|ctRm{T_=)7nT@GNqg6LKHlbu`x5W9j)&Isb( zJzmE9u4e+r9K2>|=so&fCxI601>U2d27o0~#SGC;1Hh7DAw%?20We1eSfZZ_fLs+| zay}gZIVvCk)(n~f{yVi!z|!~AOa#r@Ji-?JbX`Db@K`s%7X6d}+)D--qn{RlAr%k+ z012E`6^8&|lz&NO>_22ml;Gs%&0O_j>}zEEWI=hc1_VV~4p%XBJ2eKsu>Z0L-Mu%kAmC2V0%Q zjbJBs1!&)GAOH|h-Jp(A0MJ9CtXL2LPx)6iGxuRCr$Pon3C*Fc5_0lchdM0wfQb8US*lpHT&90GR0e`#M0Q z0u%tuiGDT}pa5WI^d|-=RKPs|OQOG{0>3`q&aU?hx(8rk^j~v=&-b%y8@X=Wn&@Xy z0k;5zJ^%l9{I;7u*Iu622;2e?cl@@`{ux|u6GE2&SV;c>G%g`<2f#+oPn$;EjS5@= zu%+|8CWUSQuuS@Em zc(luEte+eCu9l7=0oXI)%cTM}Cx8QBkD*?!(+3hmxOoC70BUwV>O27q0ELsiS)y<_ zG%!j~JpdIYd$S-2kDNyksy>9WY5))oQsjguc0Oa(1eD+p_%>d}g zduY8PJ)eP_JGTa65dbNoJ2eekupi=gIeVeAGyfJQnEu3jZzh?R9Cm32K&ViJKmcTV za^Y>Q(<20B0LZC$n9PY#w8ydPT4aIb?69`5$&~UU1z6G)0g-LBb`2p84>xi8%}_ME zT81}zoa$X^o?V?@9Dq~-sY3Y`!`-@ou?Qhlgld+Gs{o*@2Y$aGEh|j zcm|==RPR(IR0)755NdUN)e6w17I?+5I=LTGgQ^vv2g3CoKU1<+!S67SjllpAoQg17 zs9o~=!2tz8?GT_jKiCrvfCz!v0Z>-mg&qyFvgC|%DtTB0BH6oVR!9ZtHl{EHFv*@< z07AnGK>+W3*c_lwbBV{ry&NG6-vxAL4R=cnQ4>DsiJ+X^aF2Nc#G&4p?c6B99_t6* zobM(F=o7w0Qs~k7ZUNX}WZ)v>cb@|ctRm{T_=)7nT@GNqg6LKHlbu`x5W9j)&Isb( zJzmE9u4e+r9K2>|=so&fCxI601>U2d27o0~#SGC;1Hh7DAw%?20We1eSfZZ_fLs+| zay}gZIVvCk)(n~f{yVi!z|!~AOa#r@Ji-?JbX`Db@K`s%7X6d}+)D--qn{RlAr%k+ z012E`6^8&|lz&NO>_22ml;Gs%&0O_j>}zEEWI=hc1_VV~4p%XBJ2eKsu>Z0L-Mu%kAmC2V0%Q zjbJBs1!&)GAOH|h-Jp(A0MJ9CtXL2LPx)6iGxuRCr$Pon3C*Fc5_0lchdM0wfQb8US*lpHT&90GR0e`#M0Q z0u%tuiGDT}pa5WI^d|-=RKPs|OQOG{0>3`q&aU?hx(8rk^j~v=&-b%y8@X=Wn&@Xy z0k;5zJ^%l9{I;7u*Iu622;2e?cl@@`{ux|u6GE2&SV;c>G%g`<2f#+oPn$;EjS5@= zu%+|8CWUSQuuS@Em zc(luEte+eCu9l7=0oXI)%cTM}Cx8QBkD*?!(+3hmxOoC70BUwV>O27q0ELsiS)y<_ zG%!j~JpdIYd$S-2kDNyksy>9WY5))oQsjguc0Oa(1eD+p_%>d}g zduY8PJ)eP_JGTa65dbNoJ2eekupi=gIeVeAGyfJQnEu3jZzh?R9Cm32K&ViJKmcTV za^Y>Q(<20B0LZC$n9PY#w8ydPT4aIb?69`5$&~UU1z6G)0g-LBb`2p84>xi8%}_ME zT81}zoa$X^o?V?@9Dq~-sY3Y`!`-@ou?Qhlgld+Gs{o*@2Y$aGEh|j zcm|==RPR(IR0)755NdUN)e6w17I?+5I=LTGgQ^vv2g3CoKU1<+!S67SjllpAoQg17 zs9o~=!2tz8?GT_jKiCrvfCz!v0Z>-mg&qyFvgC|%DtTB0BH6oVR!9ZtHl{EHFv*@< z07AnGK>+W3*c_lwbBV{ry&NG6-vxAL4R=cnQ4>DsiJ+X^aF2Nc#G&4p?c6B99_t6* zobM(F=o7w0Qs~k7ZUNX}WZ)v>cb@|ctRm{T_=)7nT@GNqg6LKHlbu`x5W9j)&Isb( zJzmE9u4e+r9K2>|=so&fCxI601>U2d27o0~#SGC;1Hh7DAw%?20We1eSfZZ_fLs+| zay}gZIVvCk)(n~f{yVi!z|!~AOa#r@Ji-?JbX`Db@K`s%7X6d}+)D--qn{RlAr%k+ z012E`6^8&|lz&NO>_22ml;Gs%&0O_j>}zEEWI=hc1_VV~4p%XBJ2eKsu>Z0L-Mu%kAmC2V0%Q zjbJBs1!&)GAOH|h-Jp(A0MJ9CtXL2L Date: Thu, 13 Jun 2024 15:17:35 -0400 Subject: [PATCH 20/43] Update generativeai quickstart app to include sample code This enables us to compile them and be sure they work. --- generativeai-android-sample/app/build.gradle.kts | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/generativeai-android-sample/app/build.gradle.kts b/generativeai-android-sample/app/build.gradle.kts index 506d6497..a13c6ea1 100644 --- a/generativeai-android-sample/app/build.gradle.kts +++ b/generativeai-android-sample/app/build.gradle.kts @@ -37,6 +37,10 @@ android { } } + sourceSets.getByName("main") { + java.setSrcDirs(listOf("src/main/java", "src/main/kotlin", "../../samples/src/main/java")) + } + compileOptions { sourceCompatibility = JavaVersion.VERSION_1_8 targetCompatibility = JavaVersion.VERSION_1_8 @@ -59,6 +63,12 @@ dependencies { implementation("androidx.activity:activity-compose:1.8.1") implementation("androidx.navigation:navigation-compose:2.7.5") + // Required for one-shot operations (to use `ListenableFuture` from Guava Android) + implementation("com.google.guava:guava:31.0.1-android") + + // Required for streaming operations (to use `Publisher` from Reactive Streams) + implementation("org.reactivestreams:reactive-streams:1.0.4") + implementation(platform("androidx.compose:compose-bom:2023.10.01")) implementation("androidx.compose.ui:ui") implementation("androidx.compose.ui:ui-graphics") From a01c6c9a7b4cdf74273f222759f2d565b7ac1f3c Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 15:19:02 -0400 Subject: [PATCH 21/43] Fix formatting --- .../com/google/ai/client/generative/samples/java/chat.java | 7 ++----- .../ai/client/generative/samples/java/count_tokens.java | 1 - .../ai/client/generative/samples/java/text_generation.java | 1 - 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index 3fea9756..f2aa6f14 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -17,7 +17,6 @@ import android.content.Context; import android.graphics.Bitmap; import android.graphics.BitmapFactory; - import com.google.ai.client.generativeai.GenerativeModel; import com.google.ai.client.generativeai.java.ChatFutures; import com.google.ai.client.generativeai.java.GenerativeModelFutures; @@ -127,8 +126,7 @@ void chatStreaming() { Content userMessage = userMessageBuilder.build(); // Use streaming with text-only input - Publisher streamingResponse = - model.generateContentStream(userMessage); + Publisher streamingResponse = model.generateContentStream(userMessage); StringBuilder outputContent = new StringBuilder(); @@ -197,8 +195,7 @@ void chatStreamingWithImages(Context context) { Content userMessage = userMessageBuilder.build(); // Use streaming with text-only input - Publisher streamingResponse = - model.generateContentStream(userMessage); + Publisher streamingResponse = model.generateContentStream(userMessage); StringBuilder outputContent = new StringBuilder(); diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index 1f1360d2..d94a3ff0 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -17,7 +17,6 @@ import android.content.Context; import android.graphics.Bitmap; import android.graphics.BitmapFactory; - import com.google.ai.client.generativeai.GenerativeModel; import com.google.ai.client.generativeai.java.ChatFutures; import com.google.ai.client.generativeai.java.GenerativeModelFutures; diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java index 4a58d14d..6d3a6359 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -17,7 +17,6 @@ import android.content.Context; import android.graphics.Bitmap; import android.graphics.BitmapFactory; - import com.google.ai.client.generativeai.GenerativeModel; import com.google.ai.client.generativeai.java.GenerativeModelFutures; import com.google.ai.client.generativeai.type.Content; From 7771776903034b0e60aa89cf7d80d49456740110 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 15:21:24 -0400 Subject: [PATCH 22/43] Fix kotlin formatting Using the ktfmt tool --- .../ai/client/generative/samples/chat.kt | 87 ++++++------ .../client/generative/samples/count_tokens.kt | 58 ++++---- .../generative/samples/model_configuration.kt | 12 +- .../generative/samples/safety_settings.kt | 26 ++-- .../generative/samples/system_instructions.kt | 11 +- .../generative/samples/text_generation.kt | 128 +++++++----------- 6 files changed, 149 insertions(+), 173 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index 797cb0d4..0b7c9150 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -14,7 +14,6 @@ package com.google.ai.client.generative.samples -import android.app.Application import android.content.Context import android.graphics.Bitmap import android.graphics.BitmapFactory @@ -24,19 +23,21 @@ import com.google.ai.sample.R suspend fun chat() { // [START chat] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) - val chat = generativeModel.startChat( - history = listOf( - content(role = "user") { text("Hello, I have 2 dogs in my house.") }, - content(role = "model") { text("Great to meet you. What would you like to know?") } - ) - ) + val chat = + generativeModel.startChat( + history = + listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { + text("Great to meet you. What would you like to know?") + })) val response = chat.sendMessage("How many paws are in my house?") print(response.text) @@ -46,42 +47,44 @@ suspend fun chat() { suspend fun chatStreaming() { // [START chat_streaming] // Use streaming with multi-turn conversations (like chat) - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) - val chat = generativeModel.startChat( - history = listOf( - content(role = "user") { text("Hello, I have 2 dogs in my house.") }, - content(role = "model") { text("Great to meet you. What would you like to know?") } - ) - ) + val chat = + generativeModel.startChat( + history = + listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { + text("Great to meet you. What would you like to know?") + })) - chat.sendMessageStream("How many paws are in my house?").collect { chunk -> - print(chunk.text) - } + chat.sendMessageStream("How many paws are in my house?").collect { chunk -> print(chunk.text) } // [END chat_streaming] } suspend fun chatStreamingWithImages(context: Context) { // [START chat_with-images_streaming] // Use streaming with multi-turn conversations (like chat) - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) - val chat = generativeModel.startChat( - history = listOf( - content(role = "user") { text("Hello, I have 2 dogs in my house.") }, - content(role = "model") { text("Great to meet you. What would you like to know?") } - ) - ) + val chat = + generativeModel.startChat( + history = + listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { + text("Great to meet you. What would you like to know?") + })) val image: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image) @@ -90,8 +93,6 @@ suspend fun chatStreamingWithImages(context: Context) { text("This is a picture of them, what breed are they?") } - chat.sendMessageStream(inputContent).collect { chunk -> - print(chunk.text) - } + chat.sendMessageStream(inputContent).collect { chunk -> print(chunk.text) } // [END chat_with-images_streaming] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt index 7f440123..aed45843 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt @@ -21,14 +21,14 @@ import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content import com.google.ai.sample.R -suspend fun tokensTextOnly () { +suspend fun tokensTextOnly() { // [START tokens_text-only] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) // For text-only input val (totalTokens) = generativeModel.countTokens("Write a story about a magic backpack.") @@ -36,37 +36,39 @@ suspend fun tokensTextOnly () { // [END tokens_text-only] } -suspend fun tokensChat () { +suspend fun tokensChat() { // [START tokens_chat] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) - val chat = generativeModel.startChat( - history = listOf( - content(role = "user") { text("Hello, I have 2 dogs in my house.") }, - content(role = "model") { text("Great to meet you. What would you like to know?") } - ) - ) + val chat = + generativeModel.startChat( + history = + listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { + text("Great to meet you. What would you like to know?") + })) val history = chat.history - val messageContent = content { text("This is the message I intend to send")} + val messageContent = content { text("This is the message I intend to send") } val (totalTokens) = generativeModel.countTokens(*history.toTypedArray(), messageContent) print(totalTokens) // [END tokens_chat] } -suspend fun tokensMultimodalImageInline (context: Context) { +suspend fun tokensMultimodalImageInline(context: Context) { // [START tokens_multimodal-image_inline] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) val image1: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image1) val image2: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image2) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt index 72e37412..713c6343 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt @@ -27,11 +27,11 @@ suspend fun configureModel() { stopSequences = listOf("red") } - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with most use cases - modelName = "gemini-1.5-flash", - apiKey = BuildConfig.apiKey, - generationConfig = config - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with most use cases + modelName = "gemini-1.5-flash", + apiKey = BuildConfig.apiKey, + generationConfig = config) // [END configure_model] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt index 6ee5fae0..9b8d35f4 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt @@ -21,14 +21,12 @@ import com.google.ai.client.generativeai.type.SafetySetting suspend fun safetySettings() { // [START safety-settings] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with most use cases - modelName = "gemini-1.5-flash", - apiKey = BuildConfig.apiKey, - safetySettings = listOf( - SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH) - ) - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with most use cases + modelName = "gemini-1.5-flash", + apiKey = BuildConfig.apiKey, + safetySettings = listOf(SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH))) // [END safety-settings] } @@ -38,11 +36,11 @@ suspend fun SafetySettingsMulti() { val hateSpeechSafety = SafetySetting(HarmCategory.HATE_SPEECH, BlockThreshold.MEDIUM_AND_ABOVE) - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with most use cases - modelName = "gemini-1.5-flash", - apiKey = BuildConfig.apiKey, - safetySettings = listOf(harassmentSafety, hateSpeechSafety) - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with most use cases + modelName = "gemini-1.5-flash", + apiKey = BuildConfig.apiKey, + safetySettings = listOf(harassmentSafety, hateSpeechSafety)) // [END safety-settings_multi] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt index c4f8d6e8..b45b1950 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt @@ -19,10 +19,11 @@ import com.google.ai.client.generativeai.type.content suspend fun systemInstructions() { // [START system_instructions] - val generativeModel = GenerativeModel( - modelName = "gemini-1.5-flash", - apiKey = BuildConfig.apiKey, - systemInstruction = content { text("You are a cat. Your name is Neko.") }, - ) + val generativeModel = + GenerativeModel( + modelName = "gemini-1.5-flash", + apiKey = BuildConfig.apiKey, + systemInstruction = content { text("You are a cat. Your name is Neko.") }, + ) // [END system_instructions] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt index fcb7f0a6..37251645 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -14,18 +14,17 @@ package com.google.ai.client.generative.samples -import android.graphics.Bitmap import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content -suspend fun textGenTextOnlyPrompt () { +suspend fun textGenTextOnlyPrompt() { // [START text_gen_text-only-prompt] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) val prompt = "Write a story about a magic backpack." val response = generativeModel.generateContent(prompt) @@ -33,81 +32,63 @@ suspend fun textGenTextOnlyPrompt () { // [END text_gen_text-only-prompt] } -suspend fun textGenTextOnlyPromptStreaming () { +suspend fun textGenTextOnlyPromptStreaming() { // [START text_gen_text-only-prompt_streaming] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) val prompt = "Write a story about a magic backpack." // Use streaming with text-only input - generativeModel.generateContentStream(prompt).collect { chunk -> - print(chunk.text) - } + generativeModel.generateContentStream(prompt).collect { chunk -> print(chunk.text) } // [END text_gen_text-only-prompt_streaming] } -suspend fun textGenMultimodalOneImagePrompt () { +suspend fun textGenMultimodalOneImagePrompt() { // [START text_gen_multimodal-one-image-prompt] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) - - - val inputContent = content { - - text("What's in this picture?") - } + val inputContent = content { text("What's in this picture?") } val response = generativeModel.generateContent(inputContent) print(response.text) // [END text_gen_multimodal-one-image-prompt] } -suspend fun textGenMultimodalOneImagePromptStreaming () { +suspend fun textGenMultimodalOneImagePromptStreaming() { // [START text_gen_multimodal-one-image-prompt_streaming] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) - - - - val inputContent = content { + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) - text("What's in this picture?") - } + val inputContent = content { text("What's in this picture?") } - generativeModel.generateContentStream(inputContent).collect { chunk -> - print(chunk.text) - } + generativeModel.generateContentStream(inputContent).collect { chunk -> print(chunk.text) } // [END text_gen_multimodal-one-image-prompt_streaming] } -suspend fun textGenMultimodalMultiImagePrompt () { +suspend fun textGenMultimodalMultiImagePrompt() { // [START text_gen_multimodal-multi-image-prompt] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) - - val inputContent = content { - - text("What's different between these pictures?") - } + val inputContent = content { text("What's different between these pictures?") } val response = generativeModel.generateContent(inputContent) print(response.text) @@ -115,35 +96,28 @@ suspend fun textGenMultimodalMultiImagePrompt () { // [END text_gen_multimodal-multi-image-prompt] } -suspend fun textGenMultimodalMultiImagePromptStreaming () { +suspend fun textGenMultimodalMultiImagePromptStreaming() { // [START text_gen_multimodal-multi-image-prompt_streaming] - val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts - modelName = "gemini-1.5-flash", - // Access your API key as a Build Configuration variable (see "Set up your API key" above) - apiKey = BuildConfig.apiKey - ) - - - - val inputContent = content { + val generativeModel = + GenerativeModel( + // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + modelName = "gemini-1.5-flash", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey) - text("What's different between these pictures?") - } + val inputContent = content { text("What's different between these pictures?") } - generativeModel.generateContentStream(inputContent).collect { chunk -> - print(chunk.text) - } + generativeModel.generateContentStream(inputContent).collect { chunk -> print(chunk.text) } // [END text_gen_multimodal-multi-image-prompt_streaming] } -suspend fun textGenMultimodalVideoPrompt () { +suspend fun textGenMultimodalVideoPrompt() { // [START text_gen_multimodal-video-prompt] // TODO // [END text_gen_multimodal-video-prompt] } -suspend fun textGenMultimodalVideoPromptStreaming () { +suspend fun textGenMultimodalVideoPromptStreaming() { // [START text_gen_multimodal-video-prompt_streaming] // TODO // [END text_gen_multimodal-video-prompt_streaming] From c29dd672ebb2d7c973886515e971b229e541d7c2 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 16:03:19 -0400 Subject: [PATCH 23/43] Add build config warning comment These files shouldn't be created by hand by developers --- .../com/google/ai/client/generative/samples/BuildConfig.java | 3 +++ .../google/ai/client/generative/samples/java/BuildConfig.java | 3 +++ 2 files changed, 6 insertions(+) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/BuildConfig.java b/samples/src/main/java/com/google/ai/client/generative/samples/BuildConfig.java index 4d32f537..e2acd216 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/BuildConfig.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/BuildConfig.java @@ -14,6 +14,9 @@ package com.google.ai.client.generative.samples; +// This file was manually created for testing purposes only. +// For instructions on how to generate this file automatically, see the section +// "Set up your API Key" in https://ai.google.dev/gemini-api/docs/quickstart?lang=android public class BuildConfig { public static String apiKey = "invalidApiKey"; } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java index c9f8a48b..d2a7fd3a 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/BuildConfig.java @@ -14,6 +14,9 @@ package com.google.ai.client.generative.samples.java; +// This file was manually created for testing purposes only. +// For instructions on how to generate this file automatically, see the section +// "Set up your API Key" in https://ai.google.dev/gemini-api/docs/quickstart?lang=android public class BuildConfig { public static String apiKey = "invalidApiKey"; } From 6cf22586999ee142a2300baca0aff6cdd927e4ea Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 16:47:30 -0400 Subject: [PATCH 24/43] Change copyright comment style for Kotlin --- .../ai/client/generative/samples/chat.kt | 28 ++++++++++--------- .../samples/controlled_generation.kt | 28 ++++++++++--------- .../client/generative/samples/count_tokens.kt | 28 ++++++++++--------- .../generative/samples/function_calling.kt | 28 ++++++++++--------- .../generative/samples/model_configuration.kt | 28 ++++++++++--------- .../generative/samples/safety_settings.kt | 28 ++++++++++--------- .../generative/samples/system_instructions.kt | 28 ++++++++++--------- .../generative/samples/text_generation.kt | 28 ++++++++++--------- 8 files changed, 120 insertions(+), 104 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index 0b7c9150..a65482d0 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -1,16 +1,18 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.google.ai.client.generative.samples diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt index 5001a4d7..3685e67c 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt @@ -1,16 +1,18 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.google.ai.client.generative.samples diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt index aed45843..6efd4cb6 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt @@ -1,16 +1,18 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.google.ai.client.generative.samples diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt b/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt index 5001a4d7..3685e67c 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt @@ -1,16 +1,18 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.google.ai.client.generative.samples diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt index 713c6343..8aeb010e 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt @@ -1,16 +1,18 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.google.ai.client.generative.samples diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt index 9b8d35f4..670488c9 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt @@ -1,16 +1,18 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.google.ai.client.generative.samples diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt index b45b1950..6f367a90 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt @@ -1,16 +1,18 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.google.ai.client.generative.samples diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt index 37251645..cdca3da1 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -1,16 +1,18 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.google.ai.client.generative.samples From 59b0f3db4e5359da998629af7b4bd1bcfdb4fa4d Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 16:50:43 -0400 Subject: [PATCH 25/43] Add actual content to README --- samples/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/README.md b/samples/README.md index 304360ca..9c576257 100644 --- a/samples/README.md +++ b/samples/README.md @@ -1 +1 @@ -Readme +GenerativeAI sample snippets for Android in Kotlin and Java From 10da9cd8c66883709dbe6e06b9df0b7118a4333a Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 18:04:39 -0400 Subject: [PATCH 26/43] Add DEVELOPING.md to the snippets directory --- samples/DEVELOPING.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 samples/DEVELOPING.md diff --git a/samples/DEVELOPING.md b/samples/DEVELOPING.md new file mode 100644 index 00000000..cbe0b303 --- /dev/null +++ b/samples/DEVELOPING.md @@ -0,0 +1,27 @@ +# Developing + +The snippets in this directory are organized to simplify their use as documentation. + +## Snippets requirements + +All snippets must compile. + +## Workflow + +1. In Android Studio, import the `generativeai-android-sample` project +2. In the left-hand bar, using the "Android" prespective, you'll + notice that the within the `app` module, there are two packages: + - `com.google.ai.client.generative.samples` which contains the snippets + - `com.google.ai.sample` which contains the actual quickstart app +3. Make all necessary changes to the code in the + `com.google.ai.client.generative.samples` snippets +4. To compile the snippets, compile the `app` module itself. + +**IMPORTANT:** Always add both the Kotlin and the Java versions of the +snippets at the same time to maintain parity. + +### How does it work + +Under the hood, the configuration of the app module, in +`generativeai-android-sample/build.gradle.kts`, has been modified to +include in the main `sourceSet` the code for the samples. From 9aa076523b3f0d6a2fb7d4cd617d1bead44ceb6e Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 13 Jun 2024 18:10:04 -0400 Subject: [PATCH 27/43] Improve comments around executor usage --- .../google/ai/client/generative/samples/java/chat.java | 2 +- .../ai/client/generative/samples/java/count_tokens.java | 6 +++--- .../client/generative/samples/java/text_generation.java | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index f2aa6f14..d6e43470 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -68,7 +68,7 @@ void chat() { userMessageBuilder.addText("How many paws are in my house?"); Content userMessage = userMessageBuilder.build(); - // TODO COMMENT + // For illustrative purposes only. You should use an executor that fits your needs. Executor executor = Executors.newSingleThreadExecutor(); // Send the message diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index d94a3ff0..1276b8bf 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -47,7 +47,7 @@ void tokensTextOnly() { Content inputContent = new Content.Builder().addText("Write a story about a magic backpack.").build(); - // TODO COMMENT + // For illustrative purposes only. You should use an executor that fits your needs. Executor executor = Executors.newSingleThreadExecutor(); // For text-only input @@ -103,7 +103,7 @@ void tokensChat() { Collections.addAll(history, messageContent); - // TODO COMMENT + // For illustrative purposes only. You should use an executor that fits your needs. Executor executor = Executors.newSingleThreadExecutor(); ListenableFuture countTokensResponse = @@ -137,7 +137,7 @@ void tokensMultimodalImageInline(Context context) { GenerativeModelFutures model = GenerativeModelFutures.from(gm); Content text = new Content.Builder().addText("Write a story about a magic backpack.").build(); - // TODO COMMENT + // For illustrative purposes only. You should use an executor that fits your needs. Executor executor = Executors.newSingleThreadExecutor(); // For text-and-image input diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java index 6d3a6359..f266d31d 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -46,7 +46,7 @@ void TextGenTextOnlyPrompt() { Content content = new Content.Builder().addText("Write a story about a magic backpack.").build(); - // TODO COMMENT + // For illustrative purposes only. You should use an executor that fits your needs. Executor executor = Executors.newSingleThreadExecutor(); ListenableFuture response = model.generateContent(content); @@ -131,7 +131,7 @@ void TextGenMultimodalOneImagePrompt(Context context) { .addImage(image) .build(); - // TODO COMMENT + // For illustrative purposes only. You should use an executor that fits your needs. Executor executor = Executors.newSingleThreadExecutor(); ListenableFuture response = model.generateContent(content); @@ -174,7 +174,7 @@ void TextGenMultimodalOneImagePromptStreaming(Context context) { .addImage(image2) .build(); - // TODO COMMENT + // For illustrative purposes only. You should use an executor that fits your needs. Executor executor = Executors.newSingleThreadExecutor(); Publisher streamingResponse = model.generateContentStream(content); @@ -228,7 +228,7 @@ void TextGenMultimodalMultiImagePrompt(Context context) { .addImage(image2) .build(); - // TODO COMMENT + // For illustrative purposes only. You should use an executor that fits your needs. Executor executor = Executors.newSingleThreadExecutor(); ListenableFuture response = model.generateContent(content); From 5c88f27bdd3643c31bf9d5b01fcd6aa2cbd9cc5f Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Wed, 19 Jun 2024 14:25:05 -0400 Subject: [PATCH 28/43] Use right tags in count token multimodal --- .../google/ai/client/generative/samples/java/count_tokens.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index 1276b8bf..52871875 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -127,6 +127,7 @@ public void onFailure(Throwable t) { } void tokensMultimodalImageInline(Context context) { + // [START tokens_multimodal-image_inline] // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts GenerativeModel gm = new GenerativeModel( @@ -169,7 +170,7 @@ public void onFailure(Throwable t) { } }, executor); - // [END tokens_text-only] + // [END tokens_multimodal-image_inline] } } From 0694a9983426c981e2e4d9a77a11d4aa565c5e37 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Date: Mon, 24 Jun 2024 11:48:41 -0400 Subject: [PATCH 29/43] Update samples/src/main/java/com/google/ai/client/generative/samples/chat.kt Co-authored-by: rachelsaunders <52258509+rachelsaunders@users.noreply.github.com> --- .../main/java/com/google/ai/client/generative/samples/chat.kt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index a65482d0..020e0c17 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -27,7 +27,7 @@ suspend fun chat() { // [START chat] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) From 5a5d0c81fc28259891f6d7ecfcff51c02bcff6dc Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Date: Mon, 24 Jun 2024 11:55:17 -0400 Subject: [PATCH 30/43] Apply suggestions from code review Co-authored-by: rachelsaunders <52258509+rachelsaunders@users.noreply.github.com> --- .../samples/java/system_instructions.java | 1 + .../generative/samples/text_generation.kt | 44 +++++++++---------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java index 654144e3..2b47fdd5 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java @@ -23,6 +23,7 @@ void systemInstructions() { // [START system_instructions] GenerativeModel model = new GenerativeModel( + // Specify a Gemini model appropriate for your use case /* modelName */ "gemini-1.5-flash", /* apiKey */ BuildConfig.apiKey, /* generationConfig (optional) */ null, diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt index cdca3da1..f1971f9f 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -20,10 +20,10 @@ import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content suspend fun textGenTextOnlyPrompt() { - // [START text_gen_text-only-prompt] + // [START text_gen_text_only_prompt] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -31,14 +31,14 @@ suspend fun textGenTextOnlyPrompt() { val prompt = "Write a story about a magic backpack." val response = generativeModel.generateContent(prompt) print(response.text) - // [END text_gen_text-only-prompt] + // [END text_gen_text_only_prompt] } suspend fun textGenTextOnlyPromptStreaming() { - // [START text_gen_text-only-prompt_streaming] + // [START text_gen_text_only_prompt_streaming] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -47,14 +47,14 @@ suspend fun textGenTextOnlyPromptStreaming() { // Use streaming with text-only input generativeModel.generateContentStream(prompt).collect { chunk -> print(chunk.text) } - // [END text_gen_text-only-prompt_streaming] + // [END text_gen_text_only_prompt_streaming] } suspend fun textGenMultimodalOneImagePrompt() { - // [START text_gen_multimodal-one-image-prompt] + // [START text_gen_multimodal_one_image_prompt] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -63,14 +63,14 @@ suspend fun textGenMultimodalOneImagePrompt() { val response = generativeModel.generateContent(inputContent) print(response.text) - // [END text_gen_multimodal-one-image-prompt] + // [END text_gen_multimodal_one_image_prompt] } suspend fun textGenMultimodalOneImagePromptStreaming() { - // [START text_gen_multimodal-one-image-prompt_streaming] + // [START text_gen_multimodal_one_image_prompt_streaming] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -78,14 +78,14 @@ suspend fun textGenMultimodalOneImagePromptStreaming() { val inputContent = content { text("What's in this picture?") } generativeModel.generateContentStream(inputContent).collect { chunk -> print(chunk.text) } - // [END text_gen_multimodal-one-image-prompt_streaming] + // [END text_gen_multimodal_one_image_prompt_streaming] } suspend fun textGenMultimodalMultiImagePrompt() { - // [START text_gen_multimodal-multi-image-prompt] + // [START text_gen_multimodal_multi_image_prompt] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -95,14 +95,14 @@ suspend fun textGenMultimodalMultiImagePrompt() { val response = generativeModel.generateContent(inputContent) print(response.text) - // [END text_gen_multimodal-multi-image-prompt] + // [END text_gen_multimodal_multi_image_prompt] } suspend fun textGenMultimodalMultiImagePromptStreaming() { - // [START text_gen_multimodal-multi-image-prompt_streaming] + // [START text_gen_multimodal_multi_image_prompt_streaming] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -110,17 +110,17 @@ suspend fun textGenMultimodalMultiImagePromptStreaming() { val inputContent = content { text("What's different between these pictures?") } generativeModel.generateContentStream(inputContent).collect { chunk -> print(chunk.text) } - // [END text_gen_multimodal-multi-image-prompt_streaming] + // [END text_gen_multimodal_multi_image_prompt_streaming] } suspend fun textGenMultimodalVideoPrompt() { - // [START text_gen_multimodal-video-prompt] + // [START text_gen_multimodal_video_prompt] // TODO - // [END text_gen_multimodal-video-prompt] + // [END text_gen_multimodal_video_prompt] } suspend fun textGenMultimodalVideoPromptStreaming() { - // [START text_gen_multimodal-video-prompt_streaming] + // [START text_gen_multimodal_video_prompt_streaming] // TODO - // [END text_gen_multimodal-video-prompt_streaming] + // [END text_gen_multimodal_video_prompt_streaming] } From 206178665e498451b7a6f4a7550f9d79ca1c0606 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Date: Mon, 24 Jun 2024 11:59:08 -0400 Subject: [PATCH 31/43] Apply suggestions from code review Co-authored-by: rachelsaunders <52258509+rachelsaunders@users.noreply.github.com> --- .../google/ai/client/generative/samples/chat.kt | 6 +++--- .../ai/client/generative/samples/count_tokens.kt | 14 +++++++------- .../ai/client/generative/samples/java/chat.java | 10 +++++----- .../generative/samples/java/count_tokens.java | 14 +++++++------- .../samples/java/model_configuration.java | 2 +- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index 020e0c17..f116c5a0 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -51,7 +51,7 @@ suspend fun chatStreaming() { // Use streaming with multi-turn conversations (like chat) val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -70,7 +70,7 @@ suspend fun chatStreaming() { } suspend fun chatStreamingWithImages(context: Context) { - // [START chat_with-images_streaming] + // [START chat_streaming_with_images] // Use streaming with multi-turn conversations (like chat) val generativeModel = GenerativeModel( @@ -96,5 +96,5 @@ suspend fun chatStreamingWithImages(context: Context) { } chat.sendMessageStream(inputContent).collect { chunk -> print(chunk.text) } - // [END chat_with-images_streaming] + // [END chat_streaming_with_images] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt index 6efd4cb6..543a15d1 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt @@ -24,10 +24,10 @@ import com.google.ai.client.generativeai.type.content import com.google.ai.sample.R suspend fun tokensTextOnly() { - // [START tokens_text-only] + // [START tokens_text_only] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -35,14 +35,14 @@ suspend fun tokensTextOnly() { // For text-only input val (totalTokens) = generativeModel.countTokens("Write a story about a magic backpack.") print(totalTokens) - // [END tokens_text-only] + // [END tokens_text_only] } suspend fun tokensChat() { // [START tokens_chat] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -64,10 +64,10 @@ suspend fun tokensChat() { } suspend fun tokensMultimodalImageInline(context: Context) { - // [START tokens_multimodal-image_inline] + // [START tokens_multimodal_image_inline] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) @@ -83,5 +83,5 @@ suspend fun tokensMultimodalImageInline(context: Context) { val (totalTokens) = generativeModel.countTokens(multiModalContent) print(totalTokens) - // [START tokens_multimodal-image_inline] + // [START tokens_multimodal_image_inline] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index d6e43470..3af32ef2 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -37,7 +37,7 @@ class Chat { void chat() { // [START chat] - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -94,7 +94,7 @@ public void onFailure(Throwable t) { void chatStreaming() { // [START chat_streaming] - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -159,8 +159,8 @@ public void onError(Throwable t) {} } void chatStreamingWithImages(Context context) { - // [START chat_with-images_streaming] - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // [START chat_streaming_with_images] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -223,6 +223,6 @@ public void onSubscribe(Subscription s) { public void onError(Throwable t) {} // [END_EXCLUDE] }); - // [END chat_with-images_streaming] + // [END chat_streaming_with_images] } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index 52871875..d0d89ced 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -34,8 +34,8 @@ class CountTokens { void tokensTextOnly() { - // [START tokens_text-only] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START tokens_text_only] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -68,12 +68,12 @@ public void onFailure(Throwable t) { } }, executor); - // [END tokens_text-only] + // [END tokens_text_only] } void tokensChat() { // [START tokens_chat] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -127,8 +127,8 @@ public void onFailure(Throwable t) { } void tokensMultimodalImageInline(Context context) { - // [START tokens_multimodal-image_inline] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START tokens_multimodal_image_inline] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -170,7 +170,7 @@ public void onFailure(Throwable t) { } }, executor); - // [END tokens_multimodal-image_inline] + // [END tokens_multimodal_image_inline] } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java index c551f99d..7a197bce 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java @@ -31,7 +31,7 @@ void configureModel() { GenerationConfig generationConfig = configBuilder.build(); - // The Gemini 1.5 models are versatile and work with most use cases + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel("gemini-1.5-flash", BuildConfig.apiKey, generationConfig); From ae8b3ecc95a94b1859ecbc036c66ebf7ba600413 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Date: Mon, 24 Jun 2024 12:03:47 -0400 Subject: [PATCH 32/43] Apply suggestions from code review Co-authored-by: rachelsaunders <52258509+rachelsaunders@users.noreply.github.com> --- .../ai/client/generative/samples/chat.kt | 2 +- .../samples/java/safety_settings.java | 10 ++--- .../samples/java/system_instructions.java | 1 + .../samples/java/text_generation.java | 42 +++++++++---------- .../generative/samples/model_configuration.kt | 2 +- .../generative/samples/safety_settings.kt | 6 +-- .../generative/samples/system_instructions.kt | 1 + 7 files changed, 33 insertions(+), 31 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index f116c5a0..75e5fa9f 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -74,7 +74,7 @@ suspend fun chatStreamingWithImages(context: Context) { // Use streaming with multi-turn conversations (like chat) val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat) + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java index d93894c4..e68c8200 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java @@ -24,11 +24,11 @@ class SafetySettings { void safetySettings() { - // [START safety-settings] + // [START safety_settings] SafetySetting harassmentSafety = new SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH); - // The Gemini 1.5 models are versatile and work with most use cases + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( "gemini-1.5-flash", @@ -37,7 +37,7 @@ void safetySettings() { Collections.singletonList(harassmentSafety)); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END safety-settings] + // [END safety_settings] } void SafetySettingsMulti() { @@ -48,7 +48,7 @@ void SafetySettingsMulti() { SafetySetting hateSpeechSafety = new SafetySetting(HarmCategory.HATE_SPEECH, BlockThreshold.MEDIUM_AND_ABOVE); - // The Gemini 1.5 models are versatile and work with most use cases + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( "gemini-1.5-flash", @@ -57,6 +57,6 @@ void SafetySettingsMulti() { Arrays.asList(harassmentSafety, hateSpeechSafety)); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END safety-settings_multi] + // [END safety_settings_multi] } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java index 2b47fdd5..cc1306c7 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java @@ -23,6 +23,7 @@ void systemInstructions() { // [START system_instructions] GenerativeModel model = new GenerativeModel( + // Specify a Gemini model appropriate for your use case // Specify a Gemini model appropriate for your use case /* modelName */ "gemini-1.5-flash", /* apiKey */ BuildConfig.apiKey, diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java index f266d31d..9a7216cd 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -33,8 +33,8 @@ class TextGeneration { void TextGenTextOnlyPrompt() { - // [START text_gen_text-only-prompt] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START text_gen_text_only_prompt] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -65,12 +65,12 @@ public void onFailure(Throwable t) { } }, executor); - // [END text_gen_text-only-prompt] + // [END text_gen_text_only_prompt] } void TextGenTextOnlyPromptStreaming() { - // [START text_gen_text-only-prompt_streaming] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START text_gen_text_only_prompt_streaming] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -109,12 +109,12 @@ public void onSubscribe(Subscription s) { s.request(Long.MAX_VALUE); } }); - // [END text_gen_text-only-prompt_streaming] + // [END text_gen_text_only_prompt_streaming] } void TextGenMultimodalOneImagePrompt(Context context) { - // [START text_gen_multimodal-one-image-prompt] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START text_gen_multimodal_one_image_prompt] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -150,12 +150,12 @@ public void onFailure(Throwable t) { } }, executor); - // [END text_gen_multimodal-one-image-prompt] + // [END text_gen_multimodal_one_image_prompt] } void TextGenMultimodalOneImagePromptStreaming(Context context) { - // [START text_gen_multimodal-one-image-prompt_streaming] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START text_gen_multimodal_one_image_prompt_streaming] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -204,12 +204,12 @@ public void onSubscribe(Subscription s) { s.request(Long.MAX_VALUE); } }); - // [END text_gen_multimodal-one-image-prompt_streaming] + // [END text_gen_multimodal_one_image_prompt_streaming] } void TextGenMultimodalMultiImagePrompt(Context context) { // [START text_gen_multimodal-multi-image-prompt] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -247,12 +247,12 @@ public void onFailure(Throwable t) { } }, executor); - // [END text_gen_multimodal-multi-image-prompt] + // [END text_gen_multimodal_multi_image_prompt] } void TextGenMultimodalMultiImagePromptStreaming(Context context) { - // [START text_gen_multimodal-multi-image-prompt_streaming] - // The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts + // [START text_gen_multimodal_multi_image_prompt_streaming] + // Specify a Gemini model appropriate for your use case GenerativeModel gm = new GenerativeModel( /* modelName */ "gemini-1.5-flash", @@ -300,18 +300,18 @@ public void onSubscribe(Subscription s) { s.request(Long.MAX_VALUE); } }); - // [END text_gen_multimodal-multi-image-prompt_streaming] + // [END text_gen_multimodal_multi_image_prompt_streaming] } void TextGenMultimodalVideoPrompt() { - // [START text_gen_multimodal-video-prompt] + // [START text_gen_multimodal_video_prompt] // TODO - // [END text_gen_multimodal-video-prompt] + // [END text_gen_multimodal_video_prompt] } void TextGenMultimodalVideoPromptStreaming() { - // [START text_gen_multimodal-video-prompt_streaming] + // [START text_gen_multimodal_video_prompt_streaming] // TODO - // [END text_gen_multimodal-video-prompt_streaming] + // [END text_gen_multimodal_video_prompt_streaming] } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt index 8aeb010e..f8193139 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt @@ -31,7 +31,7 @@ suspend fun configureModel() { val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with most use cases + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", apiKey = BuildConfig.apiKey, generationConfig = config) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt index 670488c9..1940971e 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt @@ -25,7 +25,7 @@ suspend fun safetySettings() { // [START safety-settings] val generativeModel = GenerativeModel( - // The Gemini 1.5 models are versatile and work with most use cases + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", apiKey = BuildConfig.apiKey, safetySettings = listOf(SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH))) @@ -33,7 +33,7 @@ suspend fun safetySettings() { } suspend fun SafetySettingsMulti() { - // [START safety-settings_multi] + // [START safety_settings_multi] val harassmentSafety = SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH) val hateSpeechSafety = SafetySetting(HarmCategory.HATE_SPEECH, BlockThreshold.MEDIUM_AND_ABOVE) @@ -44,5 +44,5 @@ suspend fun SafetySettingsMulti() { modelName = "gemini-1.5-flash", apiKey = BuildConfig.apiKey, safetySettings = listOf(harassmentSafety, hateSpeechSafety)) - // [END safety-settings_multi] + // [END safety_settings_multi] } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt index 6f367a90..1649be2d 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt @@ -23,6 +23,7 @@ suspend fun systemInstructions() { // [START system_instructions] val generativeModel = GenerativeModel( + // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", apiKey = BuildConfig.apiKey, systemInstruction = content { text("You are a cat. Your name is Neko.") }, From 6a3a0c7b08b2d023d1fa4c888bfacbb08d0682d6 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Date: Mon, 24 Jun 2024 12:06:18 -0400 Subject: [PATCH 33/43] Update samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java Co-authored-by: rachelsaunders <52258509+rachelsaunders@users.noreply.github.com> --- .../ai/client/generative/samples/java/safety_settings.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java index e68c8200..3657c4ed 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java @@ -41,7 +41,7 @@ void safetySettings() { } void SafetySettingsMulti() { - // [START safety-settings_multi] + // [START safety_settings_multi] SafetySetting harassmentSafety = new SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH); From 49125c2c50f9b5025dfcccd86d7001d704b8b487 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Mon, 24 Jun 2024 17:54:00 -0400 Subject: [PATCH 34/43] Remove exclude blocks --- .../google/ai/client/generative/samples/java/chat.java | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index 3af32ef2..17d69289 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -148,11 +148,9 @@ public void onSubscribe(Subscription s) { s.request(Long.MAX_VALUE); } - // ... other methods omitted for brevity - // [START_EXCLUDE] @Override public void onError(Throwable t) {} - // [END_EXCLUDE] + }); // [END chat_streaming] @@ -217,11 +215,9 @@ public void onSubscribe(Subscription s) { s.request(Long.MAX_VALUE); } - // ... other methods omitted for brevity - // [START_EXCLUDE] @Override public void onError(Throwable t) {} - // [END_EXCLUDE] + }); // [END chat_streaming_with_images] } From 89e7232f46cd6084f15450ef3fdc2acf4982e89d Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Mon, 24 Jun 2024 18:04:47 -0400 Subject: [PATCH 35/43] Fix bad end block tag --- .../com/google/ai/client/generative/samples/count_tokens.kt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt index 543a15d1..4100aa2e 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt @@ -83,5 +83,5 @@ suspend fun tokensMultimodalImageInline(context: Context) { val (totalTokens) = generativeModel.countTokens(multiModalContent) print(totalTokens) - // [START tokens_multimodal_image_inline] + // [END tokens_multimodal_image_inline] } From 687bed905bfd55d16c3f8f30e588ace492c394dc Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Tue, 25 Jun 2024 20:53:44 -0400 Subject: [PATCH 36/43] Use underscore in safety_settings --- .../google/ai/client/generative/samples/safety_settings.kt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt index 1940971e..c6fd8e58 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt @@ -22,14 +22,14 @@ import com.google.ai.client.generativeai.type.HarmCategory import com.google.ai.client.generativeai.type.SafetySetting suspend fun safetySettings() { - // [START safety-settings] + // [START safety_settings] val generativeModel = GenerativeModel( // Specify a Gemini model appropriate for your use case modelName = "gemini-1.5-flash", apiKey = BuildConfig.apiKey, safetySettings = listOf(SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH))) - // [END safety-settings] + // [END safety_settings] } suspend fun SafetySettingsMulti() { From 979352d07167364ba8ac600230eee160a01be6c5 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Tue, 25 Jun 2024 20:55:40 -0400 Subject: [PATCH 37/43] Use singular, system instruction --- ...{system_instructions.java => system_instruction.java} | 9 ++++----- .../{system_instructions.kt => system_instruction.kt} | 6 +++--- 2 files changed, 7 insertions(+), 8 deletions(-) rename samples/src/main/java/com/google/ai/client/generative/samples/java/{system_instructions.java => system_instruction.java} (88%) rename samples/src/main/java/com/google/ai/client/generative/samples/{system_instructions.kt => system_instruction.kt} (91%) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instruction.java similarity index 88% rename from samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java rename to samples/src/main/java/com/google/ai/client/generative/samples/java/system_instruction.java index cc1306c7..62e85487 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instructions.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instruction.java @@ -18,12 +18,11 @@ import com.google.ai.client.generativeai.type.Content; import com.google.ai.client.generativeai.type.RequestOptions; -class SystemInstructions { - void systemInstructions() { - // [START system_instructions] +class SystemInstruction { + void systemInstruction() { + // [START system_instruction] GenerativeModel model = new GenerativeModel( - // Specify a Gemini model appropriate for your use case // Specify a Gemini model appropriate for your use case /* modelName */ "gemini-1.5-flash", /* apiKey */ BuildConfig.apiKey, @@ -35,6 +34,6 @@ void systemInstructions() { /* systemInstruction (optional) */ new Content.Builder() .addText("You are a cat. Your name is Neko.") .build()); - // [END system_instructions] + // [END system_instruction] } } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt b/samples/src/main/java/com/google/ai/client/generative/samples/system_instruction.kt similarity index 91% rename from samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt rename to samples/src/main/java/com/google/ai/client/generative/samples/system_instruction.kt index 1649be2d..415c5dee 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/system_instructions.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/system_instruction.kt @@ -19,8 +19,8 @@ package com.google.ai.client.generative.samples import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content -suspend fun systemInstructions() { - // [START system_instructions] +suspend fun systemInstruction() { + // [START system_instruction] val generativeModel = GenerativeModel( // Specify a Gemini model appropriate for your use case @@ -28,5 +28,5 @@ suspend fun systemInstructions() { apiKey = BuildConfig.apiKey, systemInstruction = content { text("You are a cat. Your name is Neko.") }, ) - // [END system_instructions] + // [END system_instruction] } From 75317e98c5a449c0384f29cb37e246db5239142f Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Wed, 26 Jun 2024 00:18:11 -0400 Subject: [PATCH 38/43] Add comment block for api key --- .../java/com/google/ai/client/generative/samples/chat.kt | 7 +++++++ .../ai/client/generative/samples/controlled_generation.kt | 7 +++++++ .../google/ai/client/generative/samples/count_tokens.kt | 7 +++++++ .../ai/client/generative/samples/function_calling.kt | 8 ++++++++ .../google/ai/client/generative/samples/java/chat.java | 7 +++++++ .../generative/samples/java/controlled_generation.java | 7 +++++++ .../ai/client/generative/samples/java/count_tokens.java | 7 +++++++ .../client/generative/samples/java/function_calling.java | 7 +++++++ .../generative/samples/java/model_configuration.java | 7 +++++++ .../client/generative/samples/java/safety_settings.java | 7 +++++++ .../generative/samples/java/system_instruction.java | 7 +++++++ .../client/generative/samples/java/text_generation.java | 7 +++++++ .../ai/client/generative/samples/model_configuration.kt | 8 ++++++++ .../ai/client/generative/samples/safety_settings.kt | 8 ++++++++ .../ai/client/generative/samples/system_instruction.kt | 7 +++++++ .../ai/client/generative/samples/text_generation.kt | 7 +++++++ 16 files changed, 115 insertions(+) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt index 75e5fa9f..4789b3e3 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/chat.kt @@ -23,6 +23,13 @@ import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content import com.google.ai.sample.R +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + suspend fun chat() { // [START chat] val generativeModel = diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt index 3685e67c..88ce2b9a 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/controlled_generation.kt @@ -16,4 +16,11 @@ package com.google.ai.client.generative.samples +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + // TODO diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt index 4100aa2e..e3285cda 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/count_tokens.kt @@ -23,6 +23,13 @@ import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content import com.google.ai.sample.R +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + suspend fun tokensTextOnly() { // [START tokens_text_only] val generativeModel = diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt b/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt index 3685e67c..894336f9 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/function_calling.kt @@ -16,4 +16,12 @@ package com.google.ai.client.generative.samples + +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + // TODO diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java index 17d69289..e313ae49 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/chat.java @@ -34,6 +34,13 @@ import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + class Chat { void chat() { // [START chat] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java index 733ec8ee..d42eee4b 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/controlled_generation.java @@ -14,6 +14,13 @@ package com.google.ai.client.generative.samples.java; +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + class ControlledGeneration { // TODO } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java index d0d89ced..d4323c85 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/count_tokens.java @@ -32,6 +32,13 @@ import java.util.concurrent.Executor; import java.util.concurrent.Executors; +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + class CountTokens { void tokensTextOnly() { // [START tokens_text_only] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java index f3dfcdea..e233e385 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/function_calling.java @@ -14,6 +14,13 @@ package com.google.ai.client.generative.samples.java; +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + class FunctionCalling { // TODO } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java index 7a197bce..cdf77ceb 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java @@ -19,6 +19,13 @@ import com.google.ai.client.generativeai.type.GenerationConfig; import java.util.Arrays; +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + class ConfigureModel { void configureModel() { // [START configure_model] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java index 3657c4ed..83f980e8 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/safety_settings.java @@ -22,6 +22,13 @@ import java.util.Arrays; import java.util.Collections; +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + class SafetySettings { void safetySettings() { // [START safety_settings] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instruction.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instruction.java index 62e85487..d1ef8cb5 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instruction.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/system_instruction.java @@ -18,6 +18,13 @@ import com.google.ai.client.generativeai.type.Content; import com.google.ai.client.generativeai.type.RequestOptions; +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + class SystemInstruction { void systemInstruction() { // [START system_instruction] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java index 9a7216cd..1ca36f28 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/text_generation.java @@ -31,6 +31,13 @@ import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + class TextGeneration { void TextGenTextOnlyPrompt() { // [START text_gen_text_only_prompt] diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt index f8193139..31385bae 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt @@ -19,6 +19,14 @@ package com.google.ai.client.generative.samples import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.generationConfig +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + + suspend fun configureModel() { // [START configure_model] val config = generationConfig { diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt index c6fd8e58..05bf1fb7 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt @@ -21,6 +21,14 @@ import com.google.ai.client.generativeai.type.BlockThreshold import com.google.ai.client.generativeai.type.HarmCategory import com.google.ai.client.generativeai.type.SafetySetting +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + + suspend fun safetySettings() { // [START safety_settings] val generativeModel = diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/system_instruction.kt b/samples/src/main/java/com/google/ai/client/generative/samples/system_instruction.kt index 415c5dee..4875138f 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/system_instruction.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/system_instruction.kt @@ -19,6 +19,13 @@ package com.google.ai.client.generative.samples import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + suspend fun systemInstruction() { // [START system_instruction] val generativeModel = diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt index f1971f9f..8e08c099 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -19,6 +19,13 @@ package com.google.ai.client.generative.samples import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + suspend fun textGenTextOnlyPrompt() { // [START text_gen_text_only_prompt] val generativeModel = From 58e0bd8d4a41153055c571d3108c0fb26550931c Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 27 Jun 2024 11:52:36 -0400 Subject: [PATCH 39/43] Fix image prompts in `text_generation.kt` The prompts were missing the actual images. --- .../generative/samples/text_generation.kt | 36 ++++++++++++++----- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt index 8e08c099..a4587733 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -57,7 +57,7 @@ suspend fun textGenTextOnlyPromptStreaming() { // [END text_gen_text_only_prompt_streaming] } -suspend fun textGenMultimodalOneImagePrompt() { +suspend fun textGenMultimodalOneImagePrompt(context: Context) { // [START text_gen_multimodal_one_image_prompt] val generativeModel = GenerativeModel( @@ -66,14 +66,18 @@ suspend fun textGenMultimodalOneImagePrompt() { // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) - val inputContent = content { text("What's in this picture?") } + val image: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image) + val inputContent = content { + image(image) + text("What's in this picture?") + } val response = generativeModel.generateContent(inputContent) print(response.text) // [END text_gen_multimodal_one_image_prompt] } -suspend fun textGenMultimodalOneImagePromptStreaming() { +suspend fun textGenMultimodalOneImagePromptStreaming(context: Context) { // [START text_gen_multimodal_one_image_prompt_streaming] val generativeModel = GenerativeModel( @@ -82,13 +86,17 @@ suspend fun textGenMultimodalOneImagePromptStreaming() { // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) - val inputContent = content { text("What's in this picture?") } + val image: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image) + val inputContent = content { + image(image) + text("What's in this picture?") + } generativeModel.generateContentStream(inputContent).collect { chunk -> print(chunk.text) } // [END text_gen_multimodal_one_image_prompt_streaming] } -suspend fun textGenMultimodalMultiImagePrompt() { +suspend fun textGenMultimodalMultiImagePrompt(context: Context) { // [START text_gen_multimodal_multi_image_prompt] val generativeModel = GenerativeModel( @@ -97,7 +105,13 @@ suspend fun textGenMultimodalMultiImagePrompt() { // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) - val inputContent = content { text("What's different between these pictures?") } + val image1: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image1) + val image2: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image2) + val inputContent = content { + image(image1) + image(image2) + text("What's the difference between these pictures?") + } val response = generativeModel.generateContent(inputContent) print(response.text) @@ -105,7 +119,7 @@ suspend fun textGenMultimodalMultiImagePrompt() { // [END text_gen_multimodal_multi_image_prompt] } -suspend fun textGenMultimodalMultiImagePromptStreaming() { +suspend fun textGenMultimodalMultiImagePromptStreaming(context: Context) { // [START text_gen_multimodal_multi_image_prompt_streaming] val generativeModel = GenerativeModel( @@ -114,7 +128,13 @@ suspend fun textGenMultimodalMultiImagePromptStreaming() { // Access your API key as a Build Configuration variable (see "Set up your API key" above) apiKey = BuildConfig.apiKey) - val inputContent = content { text("What's different between these pictures?") } + val image1: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image1) + val image2: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image2) + val inputContent = content { + image(image1) + image(image2) + text("What's the difference between these pictures?") + } generativeModel.generateContentStream(inputContent).collect { chunk -> print(chunk.text) } // [END text_gen_multimodal_multi_image_prompt_streaming] From 2bfe9e376918f99a9a440561eac4921d23c7ffed Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 27 Jun 2024 11:53:06 -0400 Subject: [PATCH 40/43] Address compilation issues There were warnings about unused variables, and errors due to missing imports. --- .../ai/client/generative/samples/model_configuration.kt | 3 +++ .../google/ai/client/generative/samples/safety_settings.kt | 6 ++++++ .../google/ai/client/generative/samples/text_generation.kt | 4 ++++ 3 files changed, 13 insertions(+) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt index 31385bae..7d9f7db7 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt @@ -44,4 +44,7 @@ suspend fun configureModel() { apiKey = BuildConfig.apiKey, generationConfig = config) // [END configure_model] + + // Added to silence the compiler warning about unused variable. + print(generativeModel) } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt index 05bf1fb7..dbf34493 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/safety_settings.kt @@ -38,6 +38,9 @@ suspend fun safetySettings() { apiKey = BuildConfig.apiKey, safetySettings = listOf(SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH))) // [END safety_settings] + + // Added to silence the compiler warning about unused variable. + print(generativeModel) } suspend fun SafetySettingsMulti() { @@ -53,4 +56,7 @@ suspend fun SafetySettingsMulti() { apiKey = BuildConfig.apiKey, safetySettings = listOf(harassmentSafety, hateSpeechSafety)) // [END safety_settings_multi] + + // Added to silence the compiler warning about unused variable. + print(generativeModel) } diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt index a4587733..86abc5d7 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/text_generation.kt @@ -16,8 +16,12 @@ package com.google.ai.client.generative.samples +import android.content.Context +import android.graphics.Bitmap +import android.graphics.BitmapFactory import com.google.ai.client.generativeai.GenerativeModel import com.google.ai.client.generativeai.type.content +import com.google.ai.sample.R // Set up your API Key // ==================== From a03f15164e28222030b5629cf75550481aee3596 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Tue, 9 Jul 2024 14:46:30 -0400 Subject: [PATCH 41/43] Hide samples source code inside a build variant This prevents the code from being seeing under normal circunstances in Android Studio --- generativeai-android-sample/app/build.gradle.kts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/generativeai-android-sample/app/build.gradle.kts b/generativeai-android-sample/app/build.gradle.kts index a13c6ea1..1377eb1f 100644 --- a/generativeai-android-sample/app/build.gradle.kts +++ b/generativeai-android-sample/app/build.gradle.kts @@ -37,7 +37,13 @@ android { } } - sourceSets.getByName("main") { + buildTypes { + create("samples") { + initWith(getByName("debug")) + } + } + + sourceSets.getByName("samples") { java.setSrcDirs(listOf("src/main/java", "src/main/kotlin", "../../samples/src/main/java")) } From 8c76dff5790b439082b92ff95634457bbf8b5efe Mon Sep 17 00:00:00 2001 From: David Motsonashvili Date: Wed, 10 Jul 2024 14:27:41 -0700 Subject: [PATCH 42/43] add code execution samples (#198) Co-authored-by: David Motsonashvili --- .../generative/samples/code_execution.kt | 63 +++++++++ .../samples/java/code_execution.java | 123 ++++++++++++++++++ 2 files changed, 186 insertions(+) create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/code_execution.kt create mode 100644 samples/src/main/java/com/google/ai/client/generative/samples/java/code_execution.java diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/code_execution.kt b/samples/src/main/java/com/google/ai/client/generative/samples/code_execution.kt new file mode 100644 index 00000000..7fcbe1e0 --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/code_execution.kt @@ -0,0 +1,63 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.ai.client.generative.samples + +import com.google.ai.client.generativeai.GenerativeModel +import com.google.ai.client.generativeai.type.Tool + +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). + +suspend fun codeExecutionBasic() { + // [START code_execution_basic] + + val model = GenerativeModel( + // Specify a Gemini model appropriate for your use case + modelName = "gemini-1.5-pro", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey, + tools = listOf(Tool.CODE_EXECUTION) + ) + + val response = model.generateContent("What is the sum of the first 50 prime numbers?") + + println(response.text) + // [END code_execution_basic] +} + +suspend fun codeExecutionChat() { + // [START code_execution_chat] + + val model = GenerativeModel( + // Specify a Gemini model appropriate for your use case + modelName = "gemini-1.5-pro", + // Access your API key as a Build Configuration variable (see "Set up your API key" above) + apiKey = BuildConfig.apiKey, + tools = listOf(Tool.CODE_EXECUTION) + ) + + val chat = model.startChat() + + val response = chat.sendMessage("What is the sum of the first 50 prime numbers?") + + println(response.text) + // [END code_execution_chat] +} \ No newline at end of file diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/code_execution.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/code_execution.java new file mode 100644 index 00000000..9bc2a5cc --- /dev/null +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/code_execution.java @@ -0,0 +1,123 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.ai.client.generative.samples.java; + + +import com.google.ai.client.generativeai.GenerativeModel; +import com.google.ai.client.generativeai.java.ChatFutures; +import com.google.ai.client.generativeai.java.GenerativeModelFutures; +import com.google.ai.client.generativeai.type.Content; +import com.google.ai.client.generativeai.type.GenerateContentResponse; +import com.google.ai.client.generativeai.type.RequestOptions; +import com.google.ai.client.generativeai.type.Tool; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; + +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; + +// Set up your API Key +// ==================== +// +// To use the Gemini API, you'll need an API key. To learn more, see +// the "Set up your API Key section" in the [Gemini API +// quickstart](https://ai.google.dev/gemini-api/docs/quickstart?lang=android#set-up-api-key). +class CodeExecution { + + void codeExecutionBasic() { + // [START code_execution_basic] + // Specify a Gemini model appropriate for your use case + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-pro", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey, + /* generationConfig */ null, + /* safetySettings */ null, + /* requestOptions */ new RequestOptions(), + /* tools */ Collections.singletonList(Tool.Companion.getCODE_EXECUTION())); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Content inputContent = + new Content.Builder().addText("What is the sum of the first 50 prime numbers?").build(); + + // For illustrative purposes only. You should use an executor that fits your needs. + Executor executor = Executors.newSingleThreadExecutor(); + + ListenableFuture response = model.generateContent(inputContent); + Futures.addCallback( + response, + new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); + // [END code_execution_basic] + } + + void codeExecutionChat() { + // [START code_execution_chat] + // Specify a Gemini model appropriate for your use case + GenerativeModel gm = + new GenerativeModel( + /* modelName */ "gemini-1.5-pro", + // Access your API key as a Build Configuration variable (see "Set up your API key" + // above) + /* apiKey */ BuildConfig.apiKey, + /* generationConfig */ null, + /* safetySettings */ null, + /* requestOptions */ new RequestOptions(), + /* tools */ Collections.singletonList(Tool.Companion.getCODE_EXECUTION())); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + + Content inputContent = + new Content.Builder().addText("What is the sum of the first 50 prime numbers?").build(); + + ChatFutures chat = model.startChat(); + + // For illustrative purposes only. You should use an executor that fits your needs. + Executor executor = Executors.newSingleThreadExecutor(); + + ListenableFuture response = chat.sendMessage(inputContent); + Futures.addCallback( + response, + new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); + // [END code_execution_chat] + } +} From 11da79a96a0bbe8532bd7e4891c1982e32a93cc5 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 11 Jul 2024 09:49:39 -0400 Subject: [PATCH 43/43] Update model configuration name --- .../{model_configuration.kt => configure_model_parameters.kt} | 4 ++-- ...del_configuration.java => configure_model_parameters.java} | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) rename samples/src/main/java/com/google/ai/client/generative/samples/{model_configuration.kt => configure_model_parameters.kt} (95%) rename samples/src/main/java/com/google/ai/client/generative/samples/java/{model_configuration.java => configure_model_parameters.java} (95%) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt b/samples/src/main/java/com/google/ai/client/generative/samples/configure_model_parameters.kt similarity index 95% rename from samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt rename to samples/src/main/java/com/google/ai/client/generative/samples/configure_model_parameters.kt index 7d9f7db7..2c9382e8 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/model_configuration.kt +++ b/samples/src/main/java/com/google/ai/client/generative/samples/configure_model_parameters.kt @@ -28,7 +28,7 @@ import com.google.ai.client.generativeai.type.generationConfig suspend fun configureModel() { - // [START configure_model] + // [START configure_model_parameters] val config = generationConfig { temperature = 0.9f topK = 16 @@ -43,7 +43,7 @@ suspend fun configureModel() { modelName = "gemini-1.5-flash", apiKey = BuildConfig.apiKey, generationConfig = config) - // [END configure_model] + // [END configure_model_parameters] // Added to silence the compiler warning about unused variable. print(generativeModel) diff --git a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java b/samples/src/main/java/com/google/ai/client/generative/samples/java/configure_model_parameters.java similarity index 95% rename from samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java rename to samples/src/main/java/com/google/ai/client/generative/samples/java/configure_model_parameters.java index cdf77ceb..8b5a916f 100644 --- a/samples/src/main/java/com/google/ai/client/generative/samples/java/model_configuration.java +++ b/samples/src/main/java/com/google/ai/client/generative/samples/java/configure_model_parameters.java @@ -28,7 +28,7 @@ class ConfigureModel { void configureModel() { - // [START configure_model] + // [START configure_model_parameters] GenerationConfig.Builder configBuilder = new GenerationConfig.Builder(); configBuilder.temperature = 0.9f; configBuilder.topK = 16; @@ -43,6 +43,6 @@ void configureModel() { new GenerativeModel("gemini-1.5-flash", BuildConfig.apiKey, generationConfig); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END configure_model] + // [END configure_model_parameters] } }