Skip to content

Commit

Permalink
Apply suggestions from code review
Browse files Browse the repository at this point in the history
Co-authored-by: rachelsaunders <[email protected]>
  • Loading branch information
rlazo and rachelsaunders authored Jun 24, 2024
1 parent 5a5d0c8 commit 2061786
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ suspend fun chatStreaming() {
// Use streaming with multi-turn conversations (like chat)
val generativeModel =
GenerativeModel(
// The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat)
// Specify a Gemini model appropriate for your use case
modelName = "gemini-1.5-flash",
// Access your API key as a Build Configuration variable (see "Set up your API key" above)
apiKey = BuildConfig.apiKey)
Expand All @@ -70,7 +70,7 @@ suspend fun chatStreaming() {
}

suspend fun chatStreamingWithImages(context: Context) {
// [START chat_with-images_streaming]
// [START chat_streaming_with_images]
// Use streaming with multi-turn conversations (like chat)
val generativeModel =
GenerativeModel(
Expand All @@ -96,5 +96,5 @@ suspend fun chatStreamingWithImages(context: Context) {
}

chat.sendMessageStream(inputContent).collect { chunk -> print(chunk.text) }
// [END chat_with-images_streaming]
// [END chat_streaming_with_images]
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,25 +24,25 @@ import com.google.ai.client.generativeai.type.content
import com.google.ai.sample.R

suspend fun tokensTextOnly() {
// [START tokens_text-only]
// [START tokens_text_only]
val generativeModel =
GenerativeModel(
// The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat)
// Specify a Gemini model appropriate for your use case
modelName = "gemini-1.5-flash",
// Access your API key as a Build Configuration variable (see "Set up your API key" above)
apiKey = BuildConfig.apiKey)

// For text-only input
val (totalTokens) = generativeModel.countTokens("Write a story about a magic backpack.")
print(totalTokens)
// [END tokens_text-only]
// [END tokens_text_only]
}

suspend fun tokensChat() {
// [START tokens_chat]
val generativeModel =
GenerativeModel(
// The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat)
// Specify a Gemini model appropriate for your use case
modelName = "gemini-1.5-flash",
// Access your API key as a Build Configuration variable (see "Set up your API key" above)
apiKey = BuildConfig.apiKey)
Expand All @@ -64,10 +64,10 @@ suspend fun tokensChat() {
}

suspend fun tokensMultimodalImageInline(context: Context) {
// [START tokens_multimodal-image_inline]
// [START tokens_multimodal_image_inline]
val generativeModel =
GenerativeModel(
// The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat)
// Specify a Gemini model appropriate for your use case
modelName = "gemini-1.5-flash",
// Access your API key as a Build Configuration variable (see "Set up your API key" above)
apiKey = BuildConfig.apiKey)
Expand All @@ -83,5 +83,5 @@ suspend fun tokensMultimodalImageInline(context: Context) {

val (totalTokens) = generativeModel.countTokens(multiModalContent)
print(totalTokens)
// [START tokens_multimodal-image_inline]
// [START tokens_multimodal_image_inline]
}
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
class Chat {
void chat() {
// [START chat]
// The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat)
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
new GenerativeModel(
/* modelName */ "gemini-1.5-flash",
Expand Down Expand Up @@ -94,7 +94,7 @@ public void onFailure(Throwable t) {

void chatStreaming() {
// [START chat_streaming]
// The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat)
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
new GenerativeModel(
/* modelName */ "gemini-1.5-flash",
Expand Down Expand Up @@ -159,8 +159,8 @@ public void onError(Throwable t) {}
}

void chatStreamingWithImages(Context context) {
// [START chat_with-images_streaming]
// The Gemini 1.5 models are versatile and work with multi-turn conversations (like chat)
// [START chat_streaming_with_images]
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
new GenerativeModel(
/* modelName */ "gemini-1.5-flash",
Expand Down Expand Up @@ -223,6 +223,6 @@ public void onSubscribe(Subscription s) {
public void onError(Throwable t) {}
// [END_EXCLUDE]
});
// [END chat_with-images_streaming]
// [END chat_streaming_with_images]
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@

class CountTokens {
void tokensTextOnly() {
// [START tokens_text-only]
// The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts
// [START tokens_text_only]
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
new GenerativeModel(
/* modelName */ "gemini-1.5-flash",
Expand Down Expand Up @@ -68,12 +68,12 @@ public void onFailure(Throwable t) {
}
},
executor);
// [END tokens_text-only]
// [END tokens_text_only]
}

void tokensChat() {
// [START tokens_chat]
// The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
new GenerativeModel(
/* modelName */ "gemini-1.5-flash",
Expand Down Expand Up @@ -127,8 +127,8 @@ public void onFailure(Throwable t) {
}

void tokensMultimodalImageInline(Context context) {
// [START tokens_multimodal-image_inline]
// The Gemini 1.5 models are versatile and work with both text-only and multimodal prompts
// [START tokens_multimodal_image_inline]
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
new GenerativeModel(
/* modelName */ "gemini-1.5-flash",
Expand Down Expand Up @@ -170,7 +170,7 @@ public void onFailure(Throwable t) {
}
},
executor);
// [END tokens_multimodal-image_inline]
// [END tokens_multimodal_image_inline]

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ void configureModel() {

GenerationConfig generationConfig = configBuilder.build();

// The Gemini 1.5 models are versatile and work with most use cases
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
new GenerativeModel("gemini-1.5-flash", BuildConfig.apiKey, generationConfig);

Expand Down

0 comments on commit 2061786

Please sign in to comment.