Skip to content

Commit

Permalink
Switch gemini-pro to gemini-1.5-flash
Browse files Browse the repository at this point in the history
  • Loading branch information
logankilpatrick committed Jan 12, 2025
1 parent 5ced6c1 commit 4898b01
Show file tree
Hide file tree
Showing 10 changed files with 21 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -562,7 +562,7 @@
"# the chain.\n",
"# 3. The `context` and `question` are then passed to the prompt where they\n",
"# are populated in the respective variables.\n",
"# 4. This prompt is then passed to the LLM (`gemini-pro`).\n",
"# 4. This prompt is then passed to the LLM (`gemini-1.5-flash`).\n",
"# 5. Output from the LLM is passed through an output parser\n",
"# to structure the model's response.\n",
"rag_chain = (\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@
"# 2. Use the `RunnablePassthrough` option to provide question during invoke.\n",
"# 3. The `context` and `question` are then passed to the prompt and\n",
"# input variables in the prompt are populated.\n",
"# 4. The prompt is then passed to the LLM (`gemini-pro`).\n",
"# 4. The prompt is then passed to the LLM (`gemini-1.5-flash`).\n",
"# 5. Output from the LLM is passed through an output parser\n",
"# to structure the model response.\n",
"rag_chain = (\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -355,8 +355,8 @@
"source": [
"PROJECT_ID = \"\" # leave this empty\n",
"API_ENDPOINT = \"generativelanguage.googleapis.com\"\n",
"embedding_model = \"embedding-001\" # embedding model \n",
"generative_model = \"gemini-pro\" # language model \n",
"embedding_model = \"text-embedding-004\" # embedding model \n",
"generative_model = \"gemini-1.5-flash\" # language model \n",
"\n",
"# Products Collection\n",
"if not client.collections.exists(\"Products\"):\n",
Expand Down Expand Up @@ -911,8 +911,8 @@
"source": [
"PROJECT_ID = \"\" # leave this empty\n",
"API_ENDPOINT = \"generativelanguage.googleapis.com\"\n",
"embedding_model = \"embedding-001\" # embedding model \n",
"generative_model = \"gemini-pro\" # language mdodel \n",
"embedding_model = \"text-embedding-004\" # embedding model \n",
"generative_model = \"gemini-1.5-flash\" # language mdodel \n",
"\n",
"# Personalized Collection\n",
"\n",
Expand Down Expand Up @@ -968,7 +968,7 @@
"personas = client.collections.get(\"Personas\")\n",
"\n",
"for persona in ['Alice', 'Bob']:\n",
" generated_description = gemini_flash_model.generate_content([\"Create a fictional buyer persona named \" + persona + \", write a short description about them\"]) # use gemini-pro to generate persona description\n",
" generated_description = gemini_flash_model.generate_content([\"Create a fictional buyer persona named \" + persona + \", write a short description about them\"]) # use gemini-1.5-flash to generate persona description\n",
" uuid = personas.data.insert({\n",
" \"name\": persona,\n",
" \"description\": generated_description.text\n",
Expand Down
2 changes: 1 addition & 1 deletion quickstarts/Authentication.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@
"Or, if you're calling the API through your terminal using `cURL`, you can copy and paste this code to read your key from the environment variable.\n",
"\n",
"```\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY\" \\\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY\" \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d '{\n",
Expand Down
2 changes: 1 addition & 1 deletion quickstarts/Prompting.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@
"source": [
"Or, set the `generation_config` on an individual call to `generate_content`. Any values set there override values on the model constructor.\n",
"\n",
"Note: Although you can set the `candidate_count` in the generation_config, gemini-pro models will only return a single candidate at the this time."
"Note: Although you can set the `candidate_count` in the generation_config, gemini-1.5-flash models will only return a single candidate at the this time."
]
},
{
Expand Down
10 changes: 4 additions & 6 deletions quickstarts/rest/Function_calling_REST.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,7 @@
"id": "ocMX8ebNrj0A"
},
"source": [
"The following model supports function calling:\n",
"\n",
"* `gemini-pro`"
"The all Gemini models support function calling."
]
},
{
Expand Down Expand Up @@ -250,7 +248,7 @@
"source": [
"%%bash\n",
"\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY\" \\\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY\" \\\n",
" -H 'Content-Type: application/json' \\\n",
" -d '{\n",
" \"contents\": {\n",
Expand Down Expand Up @@ -439,7 +437,7 @@
"source": [
"%%bash\n",
"\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY\" \\\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY\" \\\n",
" -H 'Content-Type: application/json' \\\n",
" -d '{\n",
" \"contents\": [{\n",
Expand Down Expand Up @@ -634,7 +632,7 @@
"source": [
"%%bash\n",
"\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY\" \\\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY\" \\\n",
" -H 'Content-Type: application/json' \\\n",
" -d '{\n",
" \"contents\": [{\n",
Expand Down
2 changes: 1 addition & 1 deletion quickstarts/rest/Models_REST.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@
"source": [
"%%bash\n",
"\n",
"curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro?key=$GOOGLE_API_KEY"
"curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash?key=$GOOGLE_API_KEY"
]
},
{
Expand Down
17 changes: 4 additions & 13 deletions quickstarts/rest/Prompting_REST.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions quickstarts/rest/Safety_REST.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@
"source": [
"%%bash\n",
"\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY\" \\\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY\" \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d @request.json 2> /dev/null | tee response.json"
Expand Down Expand Up @@ -263,7 +263,7 @@
"source": [
"%%bash\n",
"\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY\" \\\n",
"curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY\" \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d @request.json 2> /dev/null > response.json"
Expand Down
2 changes: 1 addition & 1 deletion quickstarts/rest/Streaming_REST.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@
}
],
"source": [
"!curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:streamGenerateContent?alt=sse&key=${GOOGLE_API_KEY}\" \\\n",
"!curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=${GOOGLE_API_KEY}\" \\\n",
" -H 'Content-Type: application/json' \\\n",
" --no-buffer \\\n",
" -d '{ \"contents\":[{\"parts\":[{\"text\": \"Write a cute story about cats.\"}]}]}'"
Expand Down

0 comments on commit 4898b01

Please sign in to comment.