diff --git a/examples/Apps_script_and_Workspace_codelab/utils.gs b/examples/Apps_script_and_Workspace_codelab/utils.gs
index fce96eb12..2e4f2415b 100644
--- a/examples/Apps_script_and_Workspace_codelab/utils.gs
+++ b/examples/Apps_script_and_Workspace_codelab/utils.gs
@@ -16,8 +16,7 @@
 
 const properties = PropertiesService.getScriptProperties().getProperties();
 const geminiApiKey = properties['GOOGLE_API_KEY'];
-const geminiEndpoint = `https://generativelanguage.googleapis.com/v1beta/models/gemini-1.0-pro-latest:generateContent?key=${geminiApiKey}`;
-const geminiProVisionEndpoint = `https://generativelanguage.googleapis.com/v1beta/models/gemini-1.0-pro-vision-latest:generateContent?key=${geminiApiKey}`;
+const geminiEndpoint = `https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-latest:generateContent?key=${geminiApiKey}`;
 
 const NUM_SLIDES = 3;
 
@@ -156,7 +155,7 @@ function callGeminiProVision(prompt, image, temperature=0) {
     'payload': JSON.stringify(payload)
   };
 
-  const response = UrlFetchApp.fetch(geminiProVisionEndpoint, options);
+  const response = UrlFetchApp.fetch(geminiEndpoint, options);
   const data = JSON.parse(response);
   const content = data["candidates"][0]["content"]["parts"][0]["text"];
   return content;
diff --git a/examples/Talk_to_documents_with_embeddings.ipynb b/examples/Talk_to_documents_with_embeddings.ipynb
index df17976d1..6d78b9aed 100644
--- a/examples/Talk_to_documents_with_embeddings.ipynb
+++ b/examples/Talk_to_documents_with_embeddings.ipynb
@@ -570,15 +570,13 @@
             "models/gemini-1.0-pro\n",
             "models/gemini-1.0-pro-001\n",
             "models/gemini-1.0-pro-latest\n",
-            "models/gemini-1.0-pro-vision-latest\n",
             "models/gemini-1.5-flash\n",
             "models/gemini-1.5-flash-001\n",
             "models/gemini-1.5-flash-latest\n",
             "models/gemini-1.5-pro\n",
             "models/gemini-1.5-pro-001\n",
             "models/gemini-1.5-pro-latest\n",
-            "models/gemini-pro\n",
-            "models/gemini-pro-vision\n"
+            "models/gemini-pro\n"
           ]
         }
       ],
diff --git a/quickstarts/Gemini_Flash_Introduction.ipynb b/quickstarts/Gemini_Flash_Introduction.ipynb
index f48c83cf4..64940b30d 100644
--- a/quickstarts/Gemini_Flash_Introduction.ipynb
+++ b/quickstarts/Gemini_Flash_Introduction.ipynb
@@ -491,8 +491,7 @@
         "You must first pick which model version you want to experiment with selecting on the listbox below - The available models are:\n",
         "\n",
         "- `models/gemini-1.5-flash`\n",
-        "- `models/gemini-1.5-pro`\n",
-        "- `models/gemini-pro-vision`"
+        "- `models/gemini-1.5-pro`\n"
       ]
     },
     {
@@ -503,7 +502,7 @@
       },
       "outputs": [],
       "source": [
-        "version = 'models/gemini-1.5-flash' # @param [\"models/gemini-1.5-flash\", \"models/gemini-1.5-pro\", \"models/gemini-pro-vision\"]\n",
+        "version = 'models/gemini-1.5-flash' # @param [\"models/gemini-1.5-flash\", \"models/gemini-1.5-pro\"]\n",
         "model = genai.GenerativeModel(version)"
       ]
     },
diff --git a/quickstarts/Models.ipynb b/quickstarts/Models.ipynb
index 506fdeacf..9f8b8cb74 100644
--- a/quickstarts/Models.ipynb
+++ b/quickstarts/Models.ipynb
@@ -134,15 +134,13 @@
             "models/gemini-1.0-pro\n",
             "models/gemini-1.0-pro-001\n",
             "models/gemini-1.0-pro-latest\n",
-            "models/gemini-1.0-pro-vision-latest\n",
             "models/gemini-1.5-flash\n",
             "models/gemini-1.5-flash-001\n",
             "models/gemini-1.5-flash-latest\n",
             "models/gemini-1.5-pro\n",
             "models/gemini-1.5-pro-001\n",
             "models/gemini-1.5-pro-latest\n",
-            "models/gemini-pro\n",
-            "models/gemini-pro-vision\n"
+            "models/gemini-pro\n"
           ]
         }
       ],
diff --git a/quickstarts/rest/Prompting_REST.ipynb b/quickstarts/rest/Prompting_REST.ipynb
index c8c53e82d..2ca6500a0 100644
--- a/quickstarts/rest/Prompting_REST.ipynb
+++ b/quickstarts/rest/Prompting_REST.ipynb
@@ -110,7 +110,7 @@
             "      \"content\": {\n",
             "        \"parts\": [\n",
             "          {\n",
-            "            \"text\": \"```python\\n# Example list to be sorted\\nlist1 = [5, 3, 1, 2, 4]\\n\\n# Sort the list in ascending order\\nlist1.sort()\\n\\n# Print the sorted list\\nprint(list1)\\n```\"\n",
+            "            \"text\": \"```python\\n# Example list to be sorted\\nmy_list = [5, 3, 1, 2, 4]\\n\\n# Sort the list in ascending order using the sort() method\\nmy_list.sort()\\n\\n# Print the sorted list\\nprint(my_list)\\n```\\n\\nOutput:\\n\\n```\\n[1, 2, 3, 4, 5]\\n```\"\n",
             "          }\n",
             "        ],\n",
             "        \"role\": \"model\"\n",
@@ -137,25 +137,10 @@
             "      ]\n",
             "    }\n",
             "  ],\n",
-            "  \"promptFeedback\": {\n",
-            "    \"safetyRatings\": [\n",
-            "      {\n",
-            "        \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n",
-            "        \"probability\": \"NEGLIGIBLE\"\n",
-            "      },\n",
-            "      {\n",
-            "        \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n",
-            "        \"probability\": \"NEGLIGIBLE\"\n",
-            "      },\n",
-            "      {\n",
-            "        \"category\": \"HARM_CATEGORY_HARASSMENT\",\n",
-            "        \"probability\": \"NEGLIGIBLE\"\n",
-            "      },\n",
-            "      {\n",
-            "        \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n",
-            "        \"probability\": \"NEGLIGIBLE\"\n",
-            "      }\n",
-            "    ]\n",
+            "  \"usageMetadata\": {\n",
+            "    \"promptTokenCount\": 9,\n",
+            "    \"candidatesTokenCount\": 87,\n",
+            "    \"totalTokenCount\": 96\n",
             "  }\n",
             "}\n"
           ]
@@ -182,9 +167,7 @@
       "source": [
         "### Use images in your prompt\n",
         "\n",
-        "Here we download an image from a URL and pass that image in our prompt.\n",
-        "\n",
-        "First, we download the image and load it with PIL:"
+        "Here you will download an image from a URL and pass that image in the prompt."
       ]
     },
     {
@@ -200,9 +183,7 @@
           "text": [
             "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
             "                                 Dload  Upload   Total   Spent    Left  Speed\n",
-            "\r",
-            "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r",
-            "100  349k  100  349k    0     0  1430k      0 --:--:-- --:--:-- --:--:-- 1436k\n"
+            "\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r100  349k  100  349k    0     0  1548k      0 --:--:-- --:--:-- --:--:-- 1551k\n"
           ]
         }
       ],
@@ -244,9 +225,9 @@
         "id": "ZZenFznXQvJF"
       },
       "source": [
-        "Now we will base64 encode the image, and include it in our prompt.\n",
+        "Now you will base64 encode the image, and include it in the prompt.\n",
         "\n",
-        "There are slight output differences of different base64 encoding tools, so we have written two examples for you.\n",
+        "There are slight output differences of different base64 encoding tools, so you will need to pick the right one.\n",
         "\n",
         "The following will work in Google Colab."
       ]
@@ -287,7 +268,7 @@
         "id": "qFG3q7tJY2NW"
       },
       "source": [
-        "Then we can include the image in our prompt by just passing in the `request.json` created to `generateContent`. Note that you will need to use the `gemini-pro-vision` model if your prompt contains images."
+        "Then you can include the image in the prompt by just passing in the `request.json` created to `generateContent`."
       ]
     },
     {
@@ -307,7 +288,7 @@
             "      \"content\": {\n",
             "        \"parts\": [\n",
             "          {\n",
-            "            \"text\": \" {\\n  \\\"description\\\": \\\"The Jetpack Backpack is a lightweight backpack that looks like a normal backpack but has a number of features that make it perfect for travel. It has a built-in USB-C charging port, so you can charge your devices on the go. It also has a 15-minute battery life, so you can use it for short trips without having to worry about running out of power. The backpack also has retractable boosters that can be used to give you a boost of speed when you need it. The boosters are powered by steam, so they are green and clean.\\\",\\n  \\\"features\\\": [\\n    \\\"Fits 18\\\\\\\" laptop\\\",\\n    \\\"Padded strap support\\\",\\n    \\\"Lightweight\\\",\\n    \\\"Retractable boosters\\\",\\n    \\\"USB-C charging\\\",\\n    \\\"15-minute battery life\\\",\\n    \\\"Steam-powered, green/clean\\\"\\n  ]\\n}\"\n",
+            "            \"text\": \"```json\\n{\\n \\\"description\\\": \\\"The Jetpack Backpack is a backpack that allows the user to fly. It looks and functions like a normal backpack, but has retractable boosters that are powered by steam. The backpack has a 15-minute battery life and is charged via USB-C.  The backpack is also lightweight and has padded strap support.\\\",\\n \\\"features\\\": [\\n  \\\"retractable boosters\\\",\\n  \\\"steam-powered\\\",\\n  \\\"green/clean\\\",\\n  \\\"15-min battery life\\\",\\n  \\\"USB-C charging\\\",\\n  \\\"padded strap support\\\",\\n  \\\"lightweight\\\",\\n  \\\"looks like a normal backpack\\\",\\n  \\\"fits 18\\\\\\\" laptop\\\"\\n ]\\n}\\n```\"\n",
             "          }\n",
             "        ],\n",
             "        \"role\": \"model\"\n",
@@ -334,25 +315,10 @@
             "      ]\n",
             "    }\n",
             "  ],\n",
-            "  \"promptFeedback\": {\n",
-            "    \"safetyRatings\": [\n",
-            "      {\n",
-            "        \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n",
-            "        \"probability\": \"NEGLIGIBLE\"\n",
-            "      },\n",
-            "      {\n",
-            "        \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n",
-            "        \"probability\": \"NEGLIGIBLE\"\n",
-            "      },\n",
-            "      {\n",
-            "        \"category\": \"HARM_CATEGORY_HARASSMENT\",\n",
-            "        \"probability\": \"NEGLIGIBLE\"\n",
-            "      },\n",
-            "      {\n",
-            "        \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n",
-            "        \"probability\": \"NEGLIGIBLE\"\n",
-            "      }\n",
-            "    ]\n",
+            "  \"usageMetadata\": {\n",
+            "    \"promptTokenCount\": 331,\n",
+            "    \"candidatesTokenCount\": 154,\n",
+            "    \"totalTokenCount\": 485\n",
             "  }\n",
             "}\n"
           ]
@@ -363,23 +329,14 @@
           "text": [
             "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
             "                                 Dload  Upload   Total   Spent    Left  Speed\n",
-            "\r",
-            "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r",
-            "100  466k    0     0  100  466k      0  2187k --:--:-- --:--:-- --:--:-- 2188k\r",
-            "100  466k    0     0  100  466k      0   367k  0:00:01  0:00:01 --:--:--  367k\r",
-            "100  466k    0     0  100  466k      0   205k  0:00:02  0:00:02 --:--:--  205k\r",
-            "100  466k    0     0  100  466k      0   142k  0:00:03  0:00:03 --:--:--  142k\r",
-            "100  466k    0     0  100  466k      0   109k  0:00:04  0:00:04 --:--:--  109k\r",
-            "100  466k    0     0  100  466k      0  90514  0:00:05  0:00:05 --:--:--     0\r",
-            "100  468k    0  1952  100  466k    311  76249  0:00:06  0:00:06 --:--:--   391\r",
-            "100  468k    0  1952  100  466k    311  76248  0:00:06  0:00:06 --:--:--   489\n"
+            "\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r100  466k    0     0  100  466k      0   298k  0:00:01  0:00:01 --:--:--  298k\r100  467k    0  1410  100  466k    727   240k  0:00:01  0:00:01 --:--:--  240k\n"
           ]
         }
       ],
       "source": [
         "%%bash\n",
         "\n",
-        "curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro-vision:generateContent?key=${GOOGLE_API_KEY}\" \\\n",
+        "curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=${GOOGLE_API_KEY}\" \\\n",
         "        -H 'Content-Type: application/json' \\\n",
         "        -d @request.json"
       ]
@@ -400,7 +357,7 @@
       },
       "source": [
         "```\n",
-        "curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro-vision:generateContent?key=${GOOGLE_API_KEY}\" \\\n",
+        "curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=${GOOGLE_API_KEY}\" \\\n",
         "        -H 'Content-Type: application/json' \\\n",
         "        -d '{\n",
         "  \"contents\":[\n",
@@ -426,7 +383,7 @@
         "id": "gpMssqFdNRDS"
       },
       "source": [
-        "Here we are `base64` encoding the image, and saving the curl request with the image data in a JSON file. Run this cell to see which version of `base64` you have. Based on the output, you may need to run this request on either a Mac or on Colab."
+        "Here image is being `base64` encoded, then savie the curl request with the image data in a JSON file. Run this cell to see which version of `base64` you have. Based on the output, you may need to run this request on either a Mac or on Colab."
       ]
     },
     {