diff --git a/examples/langchain/Gemini_LangChain_QA_Chroma_WebLoad.ipynb b/examples/langchain/Gemini_LangChain_QA_Chroma_WebLoad.ipynb
new file mode 100644
index 000000000..ff90970aa
--- /dev/null
+++ b/examples/langchain/Gemini_LangChain_QA_Chroma_WebLoad.ipynb
@@ -0,0 +1,636 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "Tce3stUlHN0L"
+ },
+ "source": [
+ "##### Copyright 2024 Google LLC."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "cellView": "form",
+ "id": "tuOe1ymfHZPu"
+ },
+ "outputs": [],
+ "source": [
+ "# @title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "0c5ea3f4a75c"
+ },
+ "source": [
+ "# Gemini API: Question Answering using LangChain and Chroma"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "awKO767lQIWh"
+ },
+ "source": [
+ "
\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "479790a71f3c"
+ },
+ "source": [
+ "## Overview\n",
+ "\n",
+ "[Gemini](https://ai.google.dev/models/gemini) is a family of generative AI models that lets developers generate content and solve problems. These models are designed and trained to handle both text and images as input.\n",
+ "\n",
+ "[LangChain](https://www.langchain.com/) is a data framework designed to make integration of Large Language Models (LLM) like Gemini easier for applications.\n",
+ "\n",
+ "[Chroma](https://docs.trychroma.com/) is an open-source embedding database focused on simplicity and developer productivity. Chroma allows users to store embeddings and their metadata, embed documents and queries, and search the embeddings quickly.\n",
+ "\n",
+ "In this notebook, you'll learn how to create an application that answers questions using data from a website with the help of Gemini, LangChain, and Chroma."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "_qRjVe1tZhsx"
+ },
+ "source": [
+ "## Setup\n",
+ "\n",
+ "First, you must install the packages and set the necessary environment variables.\n",
+ "\n",
+ "### Installation\n",
+ "\n",
+ "Install LangChain's Python library, `langchain` and LangChain's integration package for Gemini, `langchain-google-genai`. Next, install Chroma's Python client SDK, `chromadb`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "id": "olK4Ejjzuj76"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m241.2/241.2 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m55.4/55.4 kB\u001b[0m \u001b[31m1.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.0/53.0 kB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m802.4/802.4 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m12.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.2/49.2 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m302.9/302.9 kB\u001b[0m \u001b[31m15.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m20.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m26.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m25.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m32.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m40.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m46.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m48.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m33.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m43.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m47.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m42.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m47.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m32.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m63.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m65.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m42.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m61.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m75.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m146.9/146.9 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m598.7/598.7 kB\u001b[0m \u001b[31m20.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m559.5/559.5 kB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m42.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.0/92.0 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.4/62.4 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.3/41.3 kB\u001b[0m \u001b[31m3.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m87.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m59.9/59.9 kB\u001b[0m \u001b[31m7.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m107.0/107.0 kB\u001b[0m \u001b[31m12.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.3/67.3 kB\u001b[0m \u001b[31m8.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
+ " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
+ " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m283.7/283.7 kB\u001b[0m \u001b[31m25.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m74.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.6/67.6 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m145.0/145.0 kB\u001b[0m \u001b[31m16.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.9/71.9 kB\u001b[0m \u001b[31m8.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.6/53.6 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m7.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m6.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m52.5/52.5 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m130.5/130.5 kB\u001b[0m \u001b[31m15.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m341.4/341.4 kB\u001b[0m \u001b[31m32.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m87.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m64.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m130.2/130.2 kB\u001b[0m \u001b[31m15.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m307.7/307.7 kB\u001b[0m \u001b[31m30.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m11.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h Building wheel for pypika (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"
+ ]
+ }
+ ],
+ "source": [
+ "!pip install --quiet langchain-core==0.1.23\n",
+ "!pip install --quiet langchain==0.1.1\n",
+ "!pip install --quiet langchain-google-genai==0.0.6\n",
+ "!pip install --quiet -U langchain-community==0.0.20\n",
+ "!pip install --quiet chromadb"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "id": "TcvGPVdXu05F"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain import PromptTemplate\n",
+ "from langchain import hub\n",
+ "from langchain.docstore.document import Document\n",
+ "from langchain.document_loaders import WebBaseLoader\n",
+ "from langchain.schema import StrOutputParser\n",
+ "from langchain.schema.prompt_template import format_document\n",
+ "from langchain.schema.runnable import RunnablePassthrough\n",
+ "from langchain.vectorstores import Chroma"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "FQOGMejVu-6D"
+ },
+ "source": [
+ "## Configure your API key\n",
+ "\n",
+ "To run the following cell, your API key must be stored in a Colab Secret named `GOOGLE_API_KEY`. If you don't already have an API key, or you're not sure how to create a Colab Secret, see [Authentication](https://github.com/google-gemini/cookbook/blob/main/quickstarts/Authentication.ipynb) for an example.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "ysayz8skEfBW"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from google.colab import userdata\n",
+ "GOOGLE_API_KEY=userdata.get('GOOGLE_API_KEY')\n",
+ "\n",
+ "os.environ[\"GOOGLE_API_KEY\"] = GOOGLE_API_KEY"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "aEKMUyVmckWI"
+ },
+ "source": [
+ "## Basic steps\n",
+ "LLMs are trained offline on a large corpus of public data. Hence they cannot answer questions based on custom or private data accurately without additional context.\n",
+ "\n",
+ "If you want to make use of LLMs to answer questions based on private data, you have to provide the relevant documents as context alongside your prompt. This approach is called Retrieval Augmented Generation (RAG).\n",
+ "\n",
+ "You will use this approach to create a question-answering assistant using the Gemini text model integrated through LangChain. The assistant is expected to answer questions about the Gemini model. To make this possible you will add more context to the assistant using data from a website.\n",
+ "\n",
+ "In this tutorial, you'll implement the two main components in an RAG-based architecture:\n",
+ "\n",
+ "1. Retriever\n",
+ "\n",
+ " Based on the user's query, the retriever retrieves relevant snippets that add context from the document. In this tutorial, the document is the website data.\n",
+ " The relevant snippets are passed as context to the next stage - \"Generator\".\n",
+ "\n",
+ "2. Generator\n",
+ "\n",
+ " The relevant snippets from the website data are passed to the LLM along with the user's query to generate accurate answers.\n",
+ "\n",
+ "You'll learn more about these stages in the upcoming sections while implementing the application."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "4461Jihk_rWq"
+ },
+ "source": [
+ "## Retriever\n",
+ "\n",
+ "In this stage, you will perform the following steps:\n",
+ "\n",
+ "1. Read and parse the website data using LangChain.\n",
+ "\n",
+ "2. Create embeddings of the website data.\n",
+ "\n",
+ " Embeddings are numerical representations (vectors) of text. Hence, text with similar meaning will have similar embedding vectors. You'll make use of Gemini's embedding model to create the embedding vectors of the website data.\n",
+ "\n",
+ "3. Store the embeddings in Chroma's vector store.\n",
+ " \n",
+ " Chroma is a vector database. The Chroma vector store helps in the efficient retrieval of similar vectors. Thus, for adding context to the prompt for the LLM, relevant embeddings of the text matching the user's question can be retrieved easily using Chroma.\n",
+ "\n",
+ "4. Create a Retriever from the Chroma vector store.\n",
+ "\n",
+ " The retriever will be used to pass relevant website embeddings to the LLM along with user queries."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "WomGvIAVjZeI"
+ },
+ "source": [
+ "### Read and parse the website data\n",
+ "\n",
+ "LangChain provides a wide variety of document loaders. To read the website data as a document, you will use the `WebBaseLoader` from LangChain.\n",
+ "\n",
+ "To know more about how to read and parse input data from different sources using the document loaders of LangChain, read LangChain's [document loaders guide](https://python.langchain.com/docs/integrations/document_loaders)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "id": "DeNX9QFM0V-C"
+ },
+ "outputs": [],
+ "source": [
+ "loader = WebBaseLoader(\"https://blog.google/technology/ai/google-gemini-ai/\")\n",
+ "docs = loader.load()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "olIlIOYrJTlF"
+ },
+ "source": [
+ "If you only want to select a specific portion of the website data to add context to the prompt, you can use regex, text slicing, or text splitting.\n",
+ "\n",
+ "In this example, you'll use Python's `split()` function to extract the required portion of the text. The extracted text should be converted back to LangChain's `Document` format."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "id": "EDL9YLRb9Bw2"
+ },
+ "outputs": [],
+ "source": [
+ "# Extract the text from the website data document\n",
+ "text_content = docs[0].page_content\n",
+ "\n",
+ "# The text content between the substrings \"code, audio, image and video.\" to\n",
+ "# \"Cloud TPU v5p\" is relevant for this tutorial. You can use Python's `split()`\n",
+ "# to select the required content.\n",
+ "text_content_1 = text_content.split(\"code, audio, image and video.\",1)[1]\n",
+ "final_text = text_content_1.split(\"Cloud TPU v5p\",1)[0]\n",
+ "\n",
+ "# Convert the text to LangChain's `Document` format\n",
+ "docs = [Document(page_content=final_text, metadata={\"source\": \"local\"})]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "yDsdAg4Fjo5o"
+ },
+ "source": [
+ "### Initialize Gemini's embedding model\n",
+ "\n",
+ "To create the embeddings from the website data, you'll use Gemini's embedding model, **embedding-001** which supports creating text embeddings.\n",
+ "\n",
+ "To use this embedding model, you have to import `GoogleGenerativeAIEmbeddings` from LangChain. To know more about the embedding model, read Google AI's [language documentation](https://ai.google.dev/models/gemini)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "id": "8NXNTrjp0jdh"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_google_genai import GoogleGenerativeAIEmbeddings\n",
+ "\n",
+ "gemini_embeddings = GoogleGenerativeAIEmbeddings(model=\"models/embedding-001\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "m9Vzw30wpebs"
+ },
+ "source": [
+ "### Store the data using Chroma\n",
+ "\n",
+ "To create a Chroma vector database from the website data, you will use the `from_documents` function of `Chroma`. Under the hood, this function creates embeddings from the documents created by the document loader of LangChain using any specified embedding model and stores them in a Chroma vector database. \n",
+ "\n",
+ "You have to specify the `docs` you created from the website data using LangChain's `WebBasedLoader` and the `gemini_embeddings` as the embedding model when invoking the `from_documents` function to create the vector database from the website data. You can also specify a directory in the `persist_directory` argument to store the vector store on the disk. If you don't specify a directory, the data will be ephemeral in-memory.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "id": "n1VwhUQMvpcN"
+ },
+ "outputs": [],
+ "source": [
+ "# Save to disk\n",
+ "vectorstore = Chroma.from_documents(\n",
+ " documents=docs, # Data\n",
+ " embedding=gemini_embeddings, # Embedding model\n",
+ " persist_directory=\"./chroma_db\" # Directory to save data\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "WFKyb3JXOeaQ"
+ },
+ "source": [
+ "### Create a retriever using Chroma\n",
+ "\n",
+ "You'll now create a retriever that can retrieve website data embeddings from the newly created Chroma vector store. This retriever can be later used to pass embeddings that provide more context to the LLM for answering user's queries.\n",
+ "\n",
+ "\n",
+ "To load the vector store that you previously stored in the disk, you can specify the name of the directory that contains the vector store in `persist_directory` and the embedding model in the `embedding_function` arguments of Chroma's initializer.\n",
+ "\n",
+ "You can then invoke the `as_retriever` function of `Chroma` on the vector store to create a retriever."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "id": "s3t4kmzIOZQq"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "1\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Load from disk\n",
+ "vectorstore_disk = Chroma(\n",
+ " persist_directory=\"./chroma_db\", # Directory of db\n",
+ " embedding_function=gemini_embeddings # Embedding model\n",
+ " )\n",
+ "# Get the Retriever interface for the store to use later.\n",
+ "# When an unstructured query is given to a retriever it will return documents.\n",
+ "# Read more about retrievers in the following link.\n",
+ "# https://python.langchain.com/docs/modules/data_connection/retrievers/\n",
+ "#\n",
+ "# Since only 1 document is stored in the Chroma vector store, search_kwargs `k`\n",
+ "# is set to 1 to decrease the `k` value of chroma's similarity search from 4 to\n",
+ "# 1. If you don't pass this value, you will get a warning.\n",
+ "retriever = vectorstore_disk.as_retriever(search_kwargs={\"k\": 1})\n",
+ "\n",
+ "# Check if the retriever is working by trying to fetch the relevant docs related\n",
+ "# to the word 'MMLU' (Massive Multitask Language Understanding). If the length is greater than zero, it means that\n",
+ "# the retriever is functioning well.\n",
+ "print(len(retriever.get_relevant_documents(\"MMLU\")))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "LZwcZyRxSO0q"
+ },
+ "source": [
+ "## Generator\n",
+ "\n",
+ "The Generator prompts the LLM for an answer when the user asks a question. The retriever you created in the previous stage from the Chroma vector store will be used to pass relevant embeddings from the website data to the LLM to provide more context to the user's query.\n",
+ "\n",
+ "You'll perform the following steps in this stage:\n",
+ "\n",
+ "1. Chain together the following:\n",
+ " * A prompt for extracting the relevant embeddings using the retriever.\n",
+ " * A prompt for answering any question using LangChain.\n",
+ " * An LLM model from Gemini for prompting.\n",
+ " \n",
+ "2. Run the created chain with a question as input to prompt the model for an answer.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "FtUi5FBIJMDy"
+ },
+ "source": [
+ "### Initialize Gemini\n",
+ "\n",
+ "You must import `ChatGoogleGenerativeAI` from LangChain to initialize your model.\n",
+ " In this example, you will use **gemini-1.5-flash-latest**, as it supports text summarization. To know more about the text model, read Google AI's [language documentation](https://ai.google.dev/models/gemini).\n",
+ "\n",
+ "You can configure the model parameters such as ***temperature*** or ***top_p***, by passing the appropriate values when initializing the `ChatGoogleGenerativeAI` LLM. To learn more about the parameters and their uses, read Google AI's [concepts guide](https://ai.google.dev/docs/concepts#model_parameters)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "id": "CaA1vRCh7s36"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n",
+ "\n",
+ "# To configure model parameters use the `generation_config` parameter.\n",
+ "# eg. generation_config = {\"temperature\": 0.7, \"topP\": 0.8, \"topK\": 40}\n",
+ "# If you only want to set a custom temperature for the model use the\n",
+ "# \"temperature\" parameter directly.\n",
+ "\n",
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-1.5-flash-latest\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "jC4QDhiPpDJa"
+ },
+ "source": [
+ "### Create prompt templates\n",
+ "\n",
+ "You'll use LangChain's [PromptTemplate](https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/) to generate prompts to the LLM for answering questions.\n",
+ "\n",
+ "In the `llm_prompt`, the variable `question` will be replaced later by the input question, and the variable `context` will be replaced by the relevant text from the website retrieved from the Chroma vector store."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "id": "90Czqh074dEC"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "input_variables=['context', 'question'] template=\"You are an assistant for question-answering tasks.\\nUse the following context to answer the question.\\nIf you don't know the answer, just say that you don't know.\\nUse five sentences maximum and keep the answer concise.\\n\\nQuestion: {question} \\nContext: {context} \\nAnswer:\"\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Prompt template to query Gemini\n",
+ "llm_prompt_template = \"\"\"You are an assistant for question-answering tasks.\n",
+ "Use the following context to answer the question.\n",
+ "If you don't know the answer, just say that you don't know.\n",
+ "Use five sentences maximum and keep the answer concise.\\n\n",
+ "Question: {question} \\nContext: {context} \\nAnswer:\"\"\"\n",
+ "\n",
+ "llm_prompt = PromptTemplate.from_template(llm_prompt_template)\n",
+ "\n",
+ "print(llm_prompt)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "KXDh2jsdp4sr"
+ },
+ "source": [
+ "### Create a stuff documents chain\n",
+ "\n",
+ "LangChain provides [Chains](https://python.langchain.com/docs/modules/chains/) for chaining together LLMs with each other or other components for complex applications. You will create a **stuff documents chain** for this application. A stuff documents chain lets you combine all the relevant documents, insert them into the prompt, and pass that prompt to the LLM.\n",
+ "\n",
+ "You can create a stuff documents chain using the [LangChain Expression Language (LCEL)](https://python.langchain.com/docs/expression_language).\n",
+ "\n",
+ "To learn more about different types of document chains, read LangChain's [chains guide](https://python.langchain.com/docs/modules/chains/document/).\n",
+ "\n",
+ "The stuff documents chain for this application retrieves the relevant website data and passes it as the context to an LLM prompt along with the input question."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "id": "gj5sWzpwp7vc"
+ },
+ "outputs": [],
+ "source": [
+ "# Combine data from documents to readable string format.\n",
+ "def format_docs(docs):\n",
+ " return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
+ "\n",
+ "# Create stuff documents chain using LCEL.\n",
+ "#\n",
+ "# This is called a chain because you are chaining together different elements\n",
+ "# with the LLM. In the following example, to create the stuff chain, you will\n",
+ "# combine the relevant context from the website data matching the question, the\n",
+ "# LLM model, and the output parser together like a chain using LCEL.\n",
+ "#\n",
+ "# The chain implements the following pipeline:\n",
+ "# 1. Extract the website data relevant to the question from the Chroma\n",
+ "# vector store and save it to the variable `context`.\n",
+ "# 2. `RunnablePassthrough` option to provide `question` when invoking\n",
+ "# the chain.\n",
+ "# 3. The `context` and `question` are then passed to the prompt where they\n",
+ "# are populated in the respective variables.\n",
+ "# 4. This prompt is then passed to the LLM (`gemini-pro`).\n",
+ "# 5. Output from the LLM is passed through an output parser\n",
+ "# to structure the model's response.\n",
+ "rag_chain = (\n",
+ " {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
+ " | llm_prompt\n",
+ " | llm\n",
+ " | StrOutputParser()\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "cPPqsGCLIrs1"
+ },
+ "source": [
+ "### Prompt the model\n",
+ "\n",
+ "You can now query the LLM by passing any question to the `invoke()` function of the stuff documents chain you created previously."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "id": "4vIaopCsIq0B"
+ },
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.google.colaboratory.intrinsic+json": {
+ "type": "string"
+ },
+ "text/plain": [
+ "\"Gemini is Google's largest and most capable AI model, designed to be highly flexible and efficient across various devices, from data centers to mobile devices. It's optimized in three sizes: Ultra, Pro, and Nano, catering to different complexity levels and task requirements. Gemini surpasses state-of-the-art performance on multiple benchmarks, including text, code, and multimodal tasks, showcasing its advanced reasoning abilities. This model is trained to understand and process information across various modalities, including text, images, audio, and more, making it ideal for complex tasks like coding and scientific research. \\n\""
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "rag_chain.invoke(\"What is Gemini?\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "lV7T9rqDdjZK"
+ },
+ "source": [
+ "# Conclusion\n",
+ "\n",
+ "That's it. You have successfully created an LLM application that answers questions using data from a website with the help of Gemini, LangChain, and Chroma."
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "name": "Gemini_LangChain_QA_Chroma_WebLoad.ipynb",
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/examples/langchain/Gemini_LangChain_QA_Pinecone_WebLoad.ipynb b/examples/langchain/Gemini_LangChain_QA_Pinecone_WebLoad.ipynb
new file mode 100644
index 000000000..8758b488d
--- /dev/null
+++ b/examples/langchain/Gemini_LangChain_QA_Pinecone_WebLoad.ipynb
@@ -0,0 +1,619 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "YdsMOBaBfyT0"
+ },
+ "source": [
+ "##### Copyright 2024 Google LLC."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "cellView": "form",
+ "id": "rIIf_RgOf3sr"
+ },
+ "outputs": [],
+ "source": [
+ "# @title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "TySweisNf_Am"
+ },
+ "source": [
+ "# Gemini API: Question Answering using LangChain and Pinecone"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "awKO767lQIWh"
+ },
+ "source": [
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "bA5Hys5PU_nt"
+ },
+ "source": [
+ "## Overview\n",
+ "\n",
+ "[Gemini](https://ai.google.dev/models/gemini) is a family of generative AI models that lets developers generate content and solve problems. These models are designed and trained to handle both text and images as input.\n",
+ "\n",
+ "[LangChain](https://www.langchain.com/) is a data framework designed to make integration of Large Language Models (LLM) like Gemini easier for applications.\n",
+ "\n",
+ "[Pinecone](https://www.pinecone.io/) is a cloud-first vector database that allows users to search across billions of embeddings with ultra-low query latency.\n",
+ "\n",
+ "In this notebook, you'll learn how to create an application that answers questions using data from a website with the help of Gemini, LangChain, and Pinecone."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "_qRjVe1tZhsx"
+ },
+ "source": [
+ "## Setup\n",
+ "\n",
+ "First, you must install the packages and set the necessary environment variables.\n",
+ "\n",
+ "### Installation\n",
+ "\n",
+ "Install LangChain's Python library, `langchain` and LangChain's integration package for Gemini, `langchain-google-genai`. Next, install LangChain's integration package for the new version of Pinecone, `langchain-pinecone` and the `pinecone-client`, which is Pinecone's Python SDK. Finally, install `langchain-community` to access the `WebBaseLoader` module later."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "olK4Ejjzuj76"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install --quiet langchain==0.1.1\n",
+ "!pip install --quiet langchain-google-genai==0.0.6\n",
+ "!pip install --quiet langchain-pinecone\n",
+ "!pip install --quiet pinecone-client\n",
+ "!pip install --quiet langchain-community==0.0.20"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "FQOGMejVu-6D"
+ },
+ "source": [
+ "## Configure your API key\n",
+ "\n",
+ "To run the following cell, your API key must be stored in a Colab Secret named `GOOGLE_API_KEY`. If you don't already have an API key, or you're not sure how to create a Colab Secret, see [Authentication](https://github.com/google-gemini/cookbook/blob/main/quickstarts/Authentication.ipynb) for an example.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "ysayz8skEfBW"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from google.colab import userdata\n",
+ "GOOGLE_API_KEY=userdata.get('GOOGLE_API_KEY')\n",
+ "\n",
+ "os.environ[\"GOOGLE_API_KEY\"] = GOOGLE_API_KEY"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "MPQLjFvRooqn"
+ },
+ "source": [
+ "### Setup Pinecone\n",
+ "\n",
+ "To use Pinecone in your application, you must have an API key. To create an API key you have to set up a Pinecone account. Visit [Pinecone's app page](https://app.pinecone.io/), and Sign up/Log in to your account. Then navigate to the \"API Keys\" section and copy your API key.\n",
+ "\n",
+ "For more detailed instructions on getting the API key, you can read Pinecone's [Quickstart documentation](https://docs.pinecone.io/docs/quickstart#2-get-your-api-key).\n",
+ "\n",
+ "Set the environment variable `PINECONE_API_KEY` to configure Pinecone to use your API key.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "id": "A7jTZLEApgtm"
+ },
+ "outputs": [],
+ "source": [
+ "PINECONE_API_KEY=userdata.get('PINECONE_API_KEY')\n",
+ "\n",
+ "os.environ['PINECONE_API_KEY'] = PINECONE_API_KEY"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "YGOKV3XflBCe"
+ },
+ "source": [
+ "## Basic steps\n",
+ "LLMs are trained offline on a large corpus of public data. Hence they cannot answer questions based on custom or private data accurately without additional context.\n",
+ "\n",
+ "If you want to make use of LLMs to answer questions based on private data, you have to provide the relevant documents as context alongside your prompt. This approach is called Retrieval Augmented Generation (RAG).\n",
+ "\n",
+ "You will use this approach to create a question-answering assistant using the Gemini text model integrated through LangChain. The assistant is expected to answer questions about Gemini model. To make this possible you will add more context to the assistant using data from a website.\n",
+ "\n",
+ "In this tutorial, you'll implement the two main components in an RAG-based architecture:\n",
+ "\n",
+ "1. Retriever\n",
+ "\n",
+ " Based on the user's query, the retriever retrieves relevant snippets that add context from the document. In this tutorial, the document is the website data.\n",
+ " The relevant snippets are passed as context to the next stage - \"Generator\".\n",
+ "\n",
+ "2. Generator\n",
+ "\n",
+ " The relevant snippets from the website data are passed to the LLM along with the user's query to generate accurate answers.\n",
+ "\n",
+ "You'll learn more about these stages in the upcoming sections while implementing the application."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "kPhs4mDkjdgY"
+ },
+ "source": [
+ "## Import the required libraries"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "id": "TcvGPVdXu05F"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain import hub\n",
+ "from langchain import PromptTemplate\n",
+ "from langchain.docstore.document import Document\n",
+ "from langchain.document_loaders import WebBaseLoader\n",
+ "from langchain.schema import StrOutputParser\n",
+ "from langchain.schema.prompt_template import format_document\n",
+ "from langchain.schema.runnable import RunnablePassthrough\n",
+ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
+ "from langchain_pinecone import Pinecone\n",
+ "\n",
+ "from pinecone import Pinecone as pc\n",
+ "from pinecone import PodSpec"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "qZ3tM0T2lbVm"
+ },
+ "source": [
+ "## Retriever\n",
+ "\n",
+ "In this stage, you will perform the following steps:\n",
+ "\n",
+ "1. Read and parse the website data using LangChain.\n",
+ "\n",
+ "2. Create embeddings of the website data.\n",
+ "\n",
+ " Embeddings are numerical representations (vectors) of text. Hence, text with similar meaning will have similar embedding vectors. You'll make use of Gemini's embedding model to create the embedding vectors of the website data.\n",
+ "\n",
+ "3. Store the embeddings in Pinecone's vector store.\n",
+ " \n",
+ " Pinecone is a vector database. The Pinecone vector store helps in the efficient retrieval of similar vectors. Thus, for adding context to the prompt for the LLM, relevant embeddings of the text matching the user's question can be retrieved easily using Pinecone.\n",
+ "\n",
+ "4. Create a Retriever from the Pinecone vector store.\n",
+ "\n",
+ " The retriever will be used to pass relevant website embeddings to the LLM along with user queries."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "W2N-NCPElqN3"
+ },
+ "source": [
+ "### Read and parse the website data\n",
+ "\n",
+ "LangChain provides a wide variety of document loaders. To read the website data as a document, you will use the `WebBaseLoader` from LangChain.\n",
+ "\n",
+ "To know more about how to read and parse input data from different sources using the document loaders of LangChain, read LangChain's [document loaders guide](https://python.langchain.com/docs/integrations/document_loaders)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "id": "DeNX9QFM0V-C"
+ },
+ "outputs": [],
+ "source": [
+ "loader = WebBaseLoader(\"https://blog.google/technology/ai/google-gemini-ai/\")\n",
+ "docs = loader.load()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "y2N6RoTDlwsM"
+ },
+ "source": [
+ "If you only want to select a specific portion of the website data to add context to the prompt, you can use regex, text slicing, or text splitting.\n",
+ "\n",
+ "In this example, you'll use Python's `split()` function to extract the required portion of the text. The extracted text should be converted back to LangChain's `Document` format."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "id": "qOwDregSBVVG"
+ },
+ "outputs": [],
+ "source": [
+ "# Extract the text from the website data document\n",
+ "text_content = docs[0].page_content\n",
+ "# The text content between the substrings \"code, audio, image and video.\" to\n",
+ "# \"Cloud TPU v5p\" is relevant for this tutorial. You can use Python's `split()`\n",
+ "# to select the required content.\n",
+ "text_content_1 = text_content.split(\"code, audio, image and video.\",1)[1]\n",
+ "final_text = text_content_1.split(\"Cloud TPU v5p\",1)[0]\n",
+ "\n",
+ "# Convert the text to LangChain's `Document` format\n",
+ "docs = [Document(page_content=final_text, metadata={\"source\": \"local\"})]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "sgGVAFqWl20v"
+ },
+ "source": [
+ "### Initialize Gemini's embedding model\n",
+ "\n",
+ "To create the embeddings from the website data, you'll use Gemini's embedding model, **embedding-001** which supports creating text embeddings.\n",
+ "\n",
+ "To use this embedding model, you have to import `GoogleGenerativeAIEmbeddings` from LangChain. To know more about the embedding model, read Google AI's [language documentation](https://ai.google.dev/models/gemini)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "id": "8NXNTrjp0jdh"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_google_genai import GoogleGenerativeAIEmbeddings\n",
+ "\n",
+ "gemini_embeddings = GoogleGenerativeAIEmbeddings(model=\"models/embedding-001\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "Zr5xeWUXmnUe"
+ },
+ "source": [
+ "### Store the data using Pinecone\n",
+ "\n",
+ "\n",
+ "To create a Pinecone vector database, first, you have to initialize your Pinecone client connection using the API key you set previously.\n",
+ "\n",
+ "In Pinecone, vector embeddings have to be stored in indexes. An index represents the vector data's top-level organizational unit. The vectors in any index must have the same dimensionality and distance metric for calculating similarity. You can read more about indexes in [Pinecone's Indexes documentation](https://docs.pinecone.io/docs/indexes).\n",
+ "\n",
+ "First, you'll create an index using Pinecone's `create_index` function. Pinecone allows you to create two types of indexes, Serverless indexes and Pod-based indexes. Pinecone's free starter plan lets you create only one project and one pod-based starter index with sufficient resources to support 100,000 vectors. For this tutorial, you have to create a pod-based starter index. To know more about different indexes and how they can be created, read Pinecone's [create indexes guide](https://docs.pinecone.io/docs/new-api#creating-indexes).\n",
+ "\n",
+ "\n",
+ "Next, you'll insert the documents you extracted earlier from the website data into the newly created index using LangChain's `Pinecone.from_documents`. Under the hood, this function creates embeddings from the documents created by the document loader of LangChain using any specified embedding model and inserts them into the specified index in a Pinecone vector database. \n",
+ "\n",
+ "You have to specify the `docs` you created from the website data using LangChain's `WebBasedLoader` and the `gemini_embeddings` as the embedding model when invoking the `from_documents` function to create the vector database from the website data."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "id": "n1VwhUQMvpcN"
+ },
+ "outputs": [],
+ "source": [
+ "# Initialize Pinecone client\n",
+ "\n",
+ "pine_client= pc(\n",
+ " api_key = os.getenv(\"PINECONE_API_KEY\"), # API key from app.pinecone.io\n",
+ " )\n",
+ "index_name = \"langchain-demo\"\n",
+ "\n",
+ "# First, check if the index already exists. If it doesn't, create a new one.\n",
+ "if index_name not in pine_client.list_indexes().names():\n",
+ " # Create a new index.\n",
+ " # https://docs.pinecone.io/docs/new-api#creating-a-starter-index\n",
+ " print(\"Creating index\")\n",
+ " pine_client.create_index(name=index_name,\n",
+ " # `cosine` distance metric compares different documents\n",
+ " # for similarity.\n",
+ " # Read more about different distance metrics from\n",
+ " # https://docs.pinecone.io/docs/indexes#distance-metrics.\n",
+ " metric=\"cosine\",\n",
+ " # The Gemini embedding model `embedding-001` uses\n",
+ " # 768 dimensions.\n",
+ " dimension=768,\n",
+ " # Specify the pod details.\n",
+ " spec=PodSpec(\n",
+ " # Starter indexes are hosted in the `gcp-starter`\n",
+ " # environment.\n",
+ " environment=\"gcp-starter\",\n",
+ " pod_type=\"starter\",\n",
+ " pods=1)\n",
+ " )\n",
+ " print(pine_client.describe_index(index_name))\n",
+ "\n",
+ "vectorstore = Pinecone.from_documents(docs,\n",
+ " gemini_embeddings, index_name=index_name)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "BuSjapvHnc6T"
+ },
+ "source": [
+ "### Create a retriever using Pinecone\n",
+ "\n",
+ "You'll now create a retriever that can retrieve website data embeddings from the newly created Pinecone vector store. This retriever can be later used to pass embeddings that provide more context to the LLM for answering user's queries.\n",
+ "\n",
+ "Invoke the `as_retriever` function of the vector store you initialized in the last step, to create a retriever."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "id": "qndTwf0tnQDv"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "3\n"
+ ]
+ }
+ ],
+ "source": [
+ "retriever = vectorstore.as_retriever()\n",
+ "# Check if the retriever is working by trying to fetch the relevant docs related\n",
+ "# to the word 'MMLU'(Massive Multitask Language Understanding). If the length is\n",
+ "# greater than zero, it means that the retriever is functioning well.\n",
+ "print(len(retriever.invoke(\"MMLU\")))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "7Qw00lvPnjfR"
+ },
+ "source": [
+ "## Generator\n",
+ "\n",
+ "The Generator prompts the LLM for an answer when the user asks a question. The retriever you created in the previous stage from the Pinecone vector store will be used to pass relevant embeddings from the website data to the LLM to provide more context to the user's query.\n",
+ "\n",
+ "You'll perform the following steps in this stage:\n",
+ "\n",
+ "1. Chain together the following:\n",
+ " * A prompt for extracting the relevant embeddings using the retriever.\n",
+ " * A prompt for answering any question using LangChain.\n",
+ " * An LLM model from Gemini for prompting.\n",
+ " \n",
+ "2. Run the created chain with a question as input to prompt the model for an answer.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "c2MK2wLwnkLg"
+ },
+ "source": [
+ "### Initialize Gemini\n",
+ "\n",
+ "You must import `ChatGoogleGenerativeAI` from LangChain to initialize your model.\n",
+ " In this example, you will use **gemini-1.5-flash-latest**, as it supports text summarization. To know more about the text model, read Google AI's [language documentation](https://ai.google.dev/models/gemini).\n",
+ "\n",
+ "You can configure the model parameters such as ***temperature*** or ***top_p***, by passing the appropriate values when initializing the `ChatGoogleGenerativeAI` LLM. To learn more about the parameters and their uses, read Google AI's [concepts guide](https://ai.google.dev/docs/concepts#model_parameters)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "id": "CaA1vRCh7s36"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n",
+ "\n",
+ "# To configure model parameters use the `generation_config` parameter.\n",
+ "# eg. generation_config = {\"temperature\": 0.7, \"topP\": 0.8, \"topK\": 40}\n",
+ "# If you only want to set a custom temperature for the model use the\n",
+ "# \"temperature\" parameter directly.\n",
+ "\n",
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-1.5-flash-latest\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "2BeLN6RXnuS2"
+ },
+ "source": [
+ "### Create prompt templates\n",
+ "\n",
+ "You'll use LangChain's [PromptTemplate](https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/) to generate prompts to the LLM for answering questions.\n",
+ "\n",
+ "In the `llm_prompt`, the variable `question` will be replaced later by the input question, and the variable `context` will be replaced by the relevant text from the website retrieved from the Pinecone vector store."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "id": "90Czqh074dEC"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "input_variables=['context', 'question'] template=\"You are an assistant for question-answering tasks.\\nUse the following context to answer the question.\\nIf you don't know the answer, just say that you don't know.\\nUse five sentences maximum and keep the answer concise.\\n\\nQuestion: {question}\\nContext: {context}\\nAnswer:\"\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Prompt template to query Gemini\n",
+ "llm_prompt_template = \"\"\"You are an assistant for question-answering tasks.\n",
+ "Use the following context to answer the question.\n",
+ "If you don't know the answer, just say that you don't know.\n",
+ "Use five sentences maximum and keep the answer concise.\n",
+ "\n",
+ "Question: {question}\n",
+ "Context: {context}\n",
+ "Answer:\"\"\"\n",
+ "\n",
+ "llm_prompt = PromptTemplate.from_template(llm_prompt_template)\n",
+ "\n",
+ "print(llm_prompt)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "TkWpzMmpnx7b"
+ },
+ "source": [
+ "### Create a stuff documents chain\n",
+ "\n",
+ "LangChain provides [Chains](https://python.langchain.com/docs/modules/chains/) for chaining together LLMs with each other or other components for complex applications. You will create a **stuff documents chain** for this application. A stuff documents chain lets you combine all the relevant documents, insert them into the prompt, and pass that prompt to the LLM.\n",
+ "\n",
+ "You can create a stuff documents chain using the [LangChain Expression Language (LCEL)](https://python.langchain.com/docs/expression_language).\n",
+ "\n",
+ "To learn more about different types of document chains, read LangChain's [chains guide](https://python.langchain.com/docs/modules/chains/document/).\n",
+ "\n",
+ "The stuff documents chain for this application retrieves the relevant website data and passes it as the context to an LLM prompt along with the input question."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "id": "gj5sWzpwp7vc"
+ },
+ "outputs": [],
+ "source": [
+ "# Combine data from documents to readable string format.\n",
+ "def format_docs(docs):\n",
+ " return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
+ "\n",
+ "# Create stuff documents chain using LCEL.\n",
+ "# This is called a chain because you are chaining\n",
+ "# together different elements with the LLM.\n",
+ "# In the following example, to create a stuff chain,\n",
+ "# you will combine content, prompt, LLM model, and\n",
+ "# output parser together like a chain using LCEL.\n",
+ "#\n",
+ "# The chain implements the following pipeline:\n",
+ "# 1. Extract data from documents and save to the variable `context`.\n",
+ "# 2. Use the `RunnablePassthrough` option to provide question during invoke.\n",
+ "# 3. The `context` and `question` are then passed to the prompt and\n",
+ "# input variables in the prompt are populated.\n",
+ "# 4. The prompt is then passed to the LLM (`gemini-pro`).\n",
+ "# 5. Output from the LLM is passed through an output parser\n",
+ "# to structure the model response.\n",
+ "rag_chain = (\n",
+ " {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
+ " | llm_prompt\n",
+ " | llm\n",
+ " | StrOutputParser()\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "gmHx_F7DoMgM"
+ },
+ "source": [
+ "### Prompt the model\n",
+ "\n",
+ "You can now query the LLM by passing any question to the `invoke()` function of the stuff documents chain you created previously."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "id": "95W-sbTjoGGj"
+ },
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.google.colaboratory.intrinsic+json": {
+ "type": "string"
+ },
+ "text/plain": [
+ "\"Gemini is Google's latest and most capable AI model, designed to be flexible and efficient across various platforms. It excels in various tasks like image, audio, and video understanding, mathematical reasoning, and coding. Gemini comes in three sizes: Ultra, Pro, and Nano, catering to different complexities and computational needs. Its multimodal capabilities and advanced reasoning abilities are considered state-of-the-art in many domains. \\n\""
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "rag_chain.invoke(\"What is Gemini?\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "name": "Gemini_LangChain_QA_Pinecone_WebLoad.ipynb",
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/examples/langchain/Gemini_LangChain_Summarization_WebLoad.ipynb b/examples/langchain/Gemini_LangChain_Summarization_WebLoad.ipynb
new file mode 100644
index 000000000..f0b7b6b78
--- /dev/null
+++ b/examples/langchain/Gemini_LangChain_Summarization_WebLoad.ipynb
@@ -0,0 +1,409 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "Tce3stUlHN0L"
+ },
+ "source": [
+ "##### Copyright 2024 Google LLC."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "cellView": "form",
+ "id": "tuOe1ymfHZPu"
+ },
+ "outputs": [],
+ "source": [
+ "# @title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "f22a409c18ef"
+ },
+ "source": [
+ "# Gemini API: Summarize large documents using LangChain"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "awKO767lQIWh"
+ },
+ "source": [
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "f892e8b2c8ef"
+ },
+ "source": [
+ "## Overview\n",
+ "\n",
+ "The [Gemini](https://ai.google.dev/models/gemini) models are a family of generative AI models that allow developers generate content and solve problems. These models are designed and trained to handle both text and images as input.\n",
+ "\n",
+ "[LangChain](https://www.langchain.com/) is a framework designed to make integration of Large Language Models (LLM) like Gemini easier for applications.\n",
+ "\n",
+ "In this notebook, you'll learn how to create an application to summarize large documents using the Gemini API and LangChain.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "iHj4T7hsx1EB"
+ },
+ "source": [
+ "## Setup\n",
+ "\n",
+ "First, you must install the packages and set the necessary environment variables.\n",
+ "\n",
+ "### Installation\n",
+ "\n",
+ "Install LangChain's Python library, `langchain` and LangChain's integration package for the Gemini API, `langchain-google-genai`. Installing `langchain-community` allows you to use the `WebBaseLoader` tool shown later in this example."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "id": "-49oubeWCHfO"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m241.2/241.2 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m55.4/55.4 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.0/53.0 kB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m802.4/802.4 kB\u001b[0m \u001b[31m8.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m42.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.2/49.2 kB\u001b[0m \u001b[31m3.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m302.9/302.9 kB\u001b[0m \u001b[31m19.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m48.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m53.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m67.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m39.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m51.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m37.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m40.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m45.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m56.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m56.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m60.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m78.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m80.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m76.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m80.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m69.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m76.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m83.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m146.9/146.9 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m598.7/598.7 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h"
+ ]
+ }
+ ],
+ "source": [
+ "!pip install --quiet langchain-core==0.1.23\n",
+ "!pip install --quiet langchain==0.1.1\n",
+ "!pip install --quiet langchain-google-genai==0.0.6\n",
+ "!pip install --quiet -U langchain-community==0.0.20"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "rAv0UicpKARZ"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain import PromptTemplate\n",
+ "from langchain.document_loaders import WebBaseLoader\n",
+ "from langchain.schema import StrOutputParser\n",
+ "from langchain.schema.prompt_template import format_document"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "FQOGMejVu-6D"
+ },
+ "source": [
+ "## Configure your API key\n",
+ "\n",
+ "To run the following cell, your API key must be stored in a Colab Secret named `GOOGLE_API_KEY`. If you don't already have an API key, or you're not sure how to create a Colab Secret, see [Authentication](https://github.com/google-gemini/cookbook/blob/main/quickstarts/Authentication.ipynb) for an example.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "id": "ysayz8skEfBW"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from google.colab import userdata\n",
+ "GOOGLE_API_KEY=userdata.get('GOOGLE_API_KEY')\n",
+ "\n",
+ "os.environ[\"GOOGLE_API_KEY\"] = GOOGLE_API_KEY"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "i7wgsoiz418u"
+ },
+ "source": [
+ "## Summarize text\n",
+ "\n",
+ "In this tutorial, you are going to summarize the text from a website using the Gemini model integrated through LangChain.\n",
+ "\n",
+ "You'll perform the following steps to achieve the same:\n",
+ "1. Read and parse the website data using LangChain.\n",
+ "2. Chain together the following:\n",
+ " * A prompt for extracting the required input data from the parsed website data.\n",
+ " * A prompt for summarizing the text using LangChain.\n",
+ " * An LLM model (such as the Gemini model) for prompting.\n",
+ "\n",
+ "3. Run the created chain to prompt the model for the summary of the website data."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "4tKpRvmMRX23"
+ },
+ "source": [
+ "### Read and parse the website data\n",
+ "\n",
+ "LangChain provides a wide variety of document loaders. To read the website data as a document, you will use the `WebBaseLoader` from LangChain.\n",
+ "\n",
+ "To know more about how to read and parse input data from different sources using the document loaders of LangChain, read LangChain's [document loaders guide](https://python.langchain.com/docs/integrations/document_loaders)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "id": "TTgmyxXzKCSq"
+ },
+ "outputs": [],
+ "source": [
+ "loader = WebBaseLoader(\"https://blog.google/technology/ai/google-gemini-ai/#sundar-note\")\n",
+ "docs = loader.load()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "4xlf_F_4B6lB"
+ },
+ "source": [
+ "### Initialize the Gemini model\n",
+ "\n",
+ "You must import the `ChatGoogleGenerativeAI` LLM from LangChain to initialize your model.\n",
+ "\n",
+ "In this example you will use Gemini 1.5 Flash, (`gemini-1.5-flash-latest`), as it supports text summarization. To know more about this model and the other models availabe, read Google AI's [language documentation](https://ai.google.dev/models/gemini).\n",
+ "\n",
+ "You can configure the model parameters such as `temperature` or `top_p`, by passing the appropriate values when creating the `ChatGoogleGenerativeAI` LLM. To learn more about the parameters and their uses, read Google AI's [concepts guide](https://ai.google.dev/docs/concepts#model_parameters)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "id": "WWA9F0ZqB-8k"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n",
+ "\n",
+ "# To configure model parameters use the `generation_config` parameter.\n",
+ "# eg. generation_config = {\"temperature\": 0.7, \"topP\": 0.8, \"topK\": 40}\n",
+ "# If you only want to set a custom temperature for the model use the\n",
+ "# \"temperature\" parameter directly.\n",
+ "\n",
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-1.5-flash-latest\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "6TECDzaUSTvS"
+ },
+ "source": [
+ "### Create prompt templates\n",
+ "\n",
+ "You'll use LangChain's [`PromptTemplate`](https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/) to generate prompts for summarizing the text.\n",
+ "\n",
+ "To summarize the text from the website, you will need the following prompts.\n",
+ "1. Prompt to extract the data from the output of `WebBaseLoader`, named `doc_prompt`\n",
+ "2. Prompt for the Gemini model to summarize the extracted text, named `llm_prompt`.\n",
+ "\n",
+ "In the `llm_prompt`, the variable `text` will be replaced later by the text from the website."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "id": "rixvvvaNKLe_"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "input_variables=['text'] template='Write a concise summary of the following:\\n\"{text}\"\\nCONCISE SUMMARY:'\n"
+ ]
+ }
+ ],
+ "source": [
+ "# To extract data from WebBaseLoader\n",
+ "doc_prompt = PromptTemplate.from_template(\"{page_content}\")\n",
+ "\n",
+ "# To query Gemini\n",
+ "llm_prompt_template = \"\"\"Write a concise summary of the following:\n",
+ "\"{text}\"\n",
+ "CONCISE SUMMARY:\"\"\"\n",
+ "llm_prompt = PromptTemplate.from_template(llm_prompt_template)\n",
+ "\n",
+ "print(llm_prompt)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "-wPBMFyISh13"
+ },
+ "source": [
+ "### Create a Stuff documents chain\n",
+ "\n",
+ "LangChain provides [Chains](https://python.langchain.com/docs/modules/chains/) for chaining together LLMs with each other or other components for complex applications. You will create a **Stuff documents chain** for this application. A **Stuff documents chain** lets you combine all the documents, insert them into the prompt and pass that prompt to the LLM.\n",
+ "\n",
+ "You can create a Stuff documents chain using the [LangChain Expression Language (LCEL)](https://python.langchain.com/docs/expression_language).\n",
+ "\n",
+ "To learn more about different types of document chains, read LangChain's [chains guide](https://python.langchain.com/docs/modules/chains/document/)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "id": "EMZomQdyKMr5"
+ },
+ "outputs": [],
+ "source": [
+ "# Create Stuff documents chain using LCEL.\n",
+ "# This is called a chain because you are chaining\n",
+ "# together different elements with the LLM.\n",
+ "# In the following example, to create stuff chain,\n",
+ "# you will combine content, prompt, LLM model and\n",
+ "# output parser together like a chain using LCEL.\n",
+ "#\n",
+ "# The chain implements the following pipeline:\n",
+ "# 1. Extract data from documents and save to variable `text`.\n",
+ "# 2. This `text` is then passed to the prompt and input variable\n",
+ "# in prompt is populated.\n",
+ "# 3. The prompt is then passed to the LLM (Gemini).\n",
+ "# 4. Output from the LLM is passed through an output parser\n",
+ "# to structure the model response.\n",
+ "\n",
+ "stuff_chain = (\n",
+ " # Extract data from the documents and add to the key `text`.\n",
+ " {\n",
+ " \"text\": lambda docs: \"\\n\\n\".join(\n",
+ " format_document(doc, doc_prompt) for doc in docs\n",
+ " )\n",
+ " }\n",
+ " | llm_prompt # Prompt for Gemini\n",
+ " | llm # Gemini API function\n",
+ " | StrOutputParser() # output parser\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "5L0Tvk_5eQzC"
+ },
+ "source": [
+ "### Prompt the model\n",
+ "\n",
+ "To generate the summary of the the website data, pass the documents extracted using the `WebBaseLoader` (`docs`) to `invoke()`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "id": "k9_GxkA5ePRR"
+ },
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.google.colaboratory.intrinsic+json": {
+ "type": "string"
+ },
+ "text/plain": [
+ "\"Google has introduced Gemini, its most capable AI model yet. Gemini is multimodal, meaning it can understand and interact with various forms of information, including text, code, audio, images, and video. It comes in three sizes: Ultra (for complex tasks), Pro (for a wide range of tasks), and Nano (for on-device tasks). Gemini surpasses existing models in performance benchmarks across various domains, including natural language understanding, reasoning, and coding. \\n\\nGoogle emphasizes Gemini's safety and responsibility features, including comprehensive bias and toxicity evaluation, adversarial testing, and collaboration with external experts. \\n\\nGemini is being integrated into various Google products, such as Bard, Pixel, Search, and Ads, and will be available to developers through APIs. \\n\\nThe release of Gemini marks a significant milestone in AI development, opening up new possibilities for innovation and enhancing human capabilities in various areas. \\n\""
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "stuff_chain.invoke(docs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "nfrBsxUFgZzc"
+ },
+ "source": [
+ "# Conclusion\n",
+ "\n",
+ "That's it. You have successfully created an LLM application to summarize text using LangChain and the Gemini API."
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "name": "Gemini_LangChain_Summarization_WebLoad.ipynb",
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/examples/prompting/Basic_Code_Generation.ipynb b/examples/prompting/Basic_Code_Generation.ipynb
index 3f1021977..0762008fc 100644
--- a/examples/prompting/Basic_Code_Generation.ipynb
+++ b/examples/prompting/Basic_Code_Generation.ipynb
@@ -66,7 +66,7 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {
"id": "Ne-3gnXqR0hI"
},
@@ -77,7 +77,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"metadata": {
"id": "EconMHePQHGw"
},
@@ -101,7 +101,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"metadata": {
"id": "v-JZzORUpVR2"
},
@@ -133,7 +133,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": null,
"metadata": {
"id": "kVF8ZQ38Vs1P"
},
@@ -148,7 +148,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"metadata": {
"id": "CHTdAVE0pIFf"
},
@@ -191,7 +191,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": null,
"metadata": {
"id": "1T1QSzjVVvE_"
},
@@ -210,7 +210,7 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": null,
"metadata": {
"id": "8KVpzExDqRj2"
},
@@ -244,7 +244,7 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": null,
"metadata": {
"id": "lOU_abTPSmZu"
},
@@ -286,7 +286,7 @@
],
"metadata": {
"colab": {
- "name": "Basic_code_generation.ipynb",
+ "name": "Basic_Code_Generation.ipynb",
"toc_visible": true
},
"kernelspec": {