From 76ae8f89eed4789af0be9e7876d0089909abc419 Mon Sep 17 00:00:00 2001 From: Nate Bosch Date: Mon, 7 Oct 2024 14:50:35 -0700 Subject: [PATCH] Add sample TOC and import hints (#214) Add a table of content to the sample directory. Add comments with the import to individual samples. --- samples/dart/README.md | 13 +++++++++++++ samples/dart/bin/chat.dart | 6 ++++++ samples/dart/bin/code_execution.dart | 4 ++++ samples/dart/bin/controlled_generation.dart | 4 ++++ samples/dart/bin/count_tokens.dart | 10 ++++++++++ samples/dart/bin/function_calling.dart | 2 ++ samples/dart/bin/safety_settings.dart | 4 ++++ samples/dart/bin/system_instructions.dart | 2 ++ samples/dart/bin/text_generation.dart | 12 ++++++++++++ 9 files changed, 57 insertions(+) diff --git a/samples/dart/README.md b/samples/dart/README.md index 25f64c8..1d266a4 100644 --- a/samples/dart/README.md +++ b/samples/dart/README.md @@ -10,3 +10,16 @@ To try these samples out, follow these steps: the Gemini generative models, or run the below commands with an environment containing this variable. - Run any sample from the `bin` directory (e.g., `dart bin/simple_text.dart`). + +## Contents + +| File | Description | +|----------------------------------------------------------------| ----------- | +| [chat.dart](bin/chat.dart) | Multi-turn chat conversations | +| [code_execution.dart](bin/code_execution.dart) | Executing code | +| [controlled_generation.dart](bin/controlled_generation.dart) | Generating content with output constraints (e.g. JSON mode) | +| [count_tokens.dart](bin/count_tokens.dart) | Counting input and output tokens | +| [function_calling.dart](bin/function_calling.dart) | Using function calling | +| [safety_settings.dart](bin/safety_settings.dart) | Setting and using safety controls | +| [system_instruction.dart](bin/system_instruction.dart) | Setting system instructions | +| [text_generation.dart](bin/text_generation.dart) | Generating text | diff --git a/samples/dart/bin/chat.dart b/samples/dart/bin/chat.dart index 95acbe0..4fce399 100644 --- a/samples/dart/bin/chat.dart +++ b/samples/dart/bin/chat.dart @@ -27,6 +27,8 @@ final apiKey = () { Future chat() async { // [START chat] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -46,6 +48,8 @@ Future chat() async { Future chatStreaming() async { // [START chat_streaming] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -71,6 +75,8 @@ Future chatStreaming() async { Future chatStreamingWithImages() async { // [START chat_streaming_with_images] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, diff --git a/samples/dart/bin/code_execution.dart b/samples/dart/bin/code_execution.dart index a887ed6..b5986de 100644 --- a/samples/dart/bin/code_execution.dart +++ b/samples/dart/bin/code_execution.dart @@ -33,6 +33,8 @@ final apiKey = () { Future codeExecutionBasic() async { // [START code_execution_basic] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( // Specify a Gemini model appropriate for your use case model: 'gemini-1.5-flash', @@ -50,6 +52,8 @@ Future codeExecutionBasic() async { Future codeExecutionChat() async { // [START code_execution_chat] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( // Specify a Gemini model appropriate for your use case model: 'gemini-1.5-flash', diff --git a/samples/dart/bin/controlled_generation.dart b/samples/dart/bin/controlled_generation.dart index dc28929..2a806d1 100644 --- a/samples/dart/bin/controlled_generation.dart +++ b/samples/dart/bin/controlled_generation.dart @@ -27,6 +27,8 @@ final apiKey = () { Future jsonControlledGeneration() async { // [START json_controlled_generation] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final schema = Schema.array( description: 'List of recipes', items: Schema.object(properties: { @@ -50,6 +52,8 @@ Future jsonControlledGeneration() async { Future jsonNoSchema() async { // [START json_no_schema] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-pro', apiKey: apiKey, diff --git a/samples/dart/bin/count_tokens.dart b/samples/dart/bin/count_tokens.dart index 9501d6d..1d7d6e3 100644 --- a/samples/dart/bin/count_tokens.dart +++ b/samples/dart/bin/count_tokens.dart @@ -27,6 +27,8 @@ final apiKey = () { Future tokensTextOnly() async { // [START tokens_text_only] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -39,6 +41,8 @@ Future tokensTextOnly() async { Future tokensChat() async { // [START tokens_chat] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -66,6 +70,8 @@ Future tokensChat() async { Future tokensMultimodalImageInline() async { // [START tokens_multimodal_image_inline] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -95,6 +101,8 @@ Future tokensMultimodalImageInline() async { Future tokensSystemInstructions() async { // [START tokens_system_instruction] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; var model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -117,6 +125,8 @@ Future tokensSystemInstructions() async { Future tokensTools() async { // [START tokens_tools] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; var model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, diff --git a/samples/dart/bin/function_calling.dart b/samples/dart/bin/function_calling.dart index 2452146..30dd552 100644 --- a/samples/dart/bin/function_calling.dart +++ b/samples/dart/bin/function_calling.dart @@ -27,6 +27,8 @@ final apiKey = () { Future functionCalling() async { // [START function_calling] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; Map setLightValues(Map args) { return args; } diff --git a/samples/dart/bin/safety_settings.dart b/samples/dart/bin/safety_settings.dart index c773a9c..a53d864 100644 --- a/samples/dart/bin/safety_settings.dart +++ b/samples/dart/bin/safety_settings.dart @@ -27,6 +27,8 @@ final apiKey = () { Future safetySettings() async { // [START safety_settings] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -55,6 +57,8 @@ Future safetySettings() async { Future safetySettingsMulti() async { // [START safety_settings_multi] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, diff --git a/samples/dart/bin/system_instructions.dart b/samples/dart/bin/system_instructions.dart index b22ea5d..7467d99 100644 --- a/samples/dart/bin/system_instructions.dart +++ b/samples/dart/bin/system_instructions.dart @@ -27,6 +27,8 @@ final apiKey = () { Future systemInstructions() async { // [START system_instructions] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, diff --git a/samples/dart/bin/text_generation.dart b/samples/dart/bin/text_generation.dart index 2d3703b..763581a 100644 --- a/samples/dart/bin/text_generation.dart +++ b/samples/dart/bin/text_generation.dart @@ -27,6 +27,8 @@ final apiKey = () { Future textGenTextOnlyPrompt() async { // [START text_gen_text_only_prompt] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -40,6 +42,8 @@ Future textGenTextOnlyPrompt() async { Future textGenTextOnlyPromptStreaming() async { // [START text_gen_text_only_prompt_streaming] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -55,6 +59,8 @@ Future textGenTextOnlyPromptStreaming() async { Future textGenMultimodalOneImagePrompt() async { // [START text_gen_multimodal_one_image_prompt] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -76,6 +82,8 @@ Future textGenMultimodalOneImagePrompt() async { Future textGenMultimodalOneImagePromptStreaming() async { // [START text_gen_multimodal_one_image_prompt_streaming] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -99,6 +107,8 @@ Future textGenMultimodalOneImagePromptStreaming() async { Future textGenMultimodalMultiImagePrompt() async { // [START text_gen_multimodal_multi_image_prompt] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey, @@ -125,6 +135,8 @@ Future textGenMultimodalMultiImagePrompt() async { Future textGenMultimodalMultiImagePromptStreaming() async { // [START text_gen_multimodal_multi_image_prompt_streaming] + // Make sure to include this import: + // import 'package:google_generative_ai/google_generative_ai.dart'; final model = GenerativeModel( model: 'gemini-1.5-flash', apiKey: apiKey,