Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Notebook testing changes #1606

Open
wants to merge 17 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 14 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

SHELL := /bin/bash
REPO_ROOT := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
PYTEST := poetry run pytest --rootdir=. -s
PYTEST := poetry run pytest -n auto --rootdir=. -s
POETRY_DIRS := $(shell find . \
-not -path "./dist/*" \
-maxdepth 4 \
Expand All @@ -29,6 +29,7 @@ env-%:
env-tests:
poetry run pip install \
pytest \
pytest-xdist[psutil] \
nbconvert \
nbformat \
pytest-subtests \
Expand Down Expand Up @@ -63,8 +64,12 @@ env-tests-db: env-tests
env-tests-notebook: env-tests env-tests-optional
poetry run pip install \
faiss-cpu \
langchainhub \
llama-hub \
rank_bm25 \
ipytree \
llama-index-readers-web
llama-index-readers-web \
llama-index-vector-stores-milvus

# Lock the poetry dependencies for all the subprojects.
lock: $(POETRY_DIRS)
Expand Down Expand Up @@ -127,7 +132,7 @@ codespell:

# Generates a coverage report.
coverage:
ALLOW_OPTIONALS=true poetry run pytest --rootdir=. tests/* --cov src --cov-report html
ALLOW_OPTIONALS=true poetry run pytest -n auto --rootdir=. tests/* --cov src --cov-report html

# Run the static unit tests only, those in the static subfolder. They are run
# for every tested python version while those outside of static are run only for
Expand Down Expand Up @@ -201,18 +206,21 @@ test-%-allow-optional: env
test-%-optional: env-tests-optional
TEST_OPTIONAL=true make test-$*

test-notebook: env-tests-notebook
make test-notebook-optional

# Run the unit tests, those in the tests/unit. They are run in the CI pipeline
# frequently.
test-unit:
poetry run pytest --rootdir=. tests/unit/*
poetry run pytest -n auto --rootdir=. tests/unit/*
# Tests in the e2e folder make use of possibly costly endpoints. They
# are part of only the less frequently run release tests.
test-e2e:
poetry run pytest --rootdir=. tests/e2e/*
poetry run pytest -n auto --rootdir=. tests/e2e/*

# Runs the notebook test
test-notebook:
poetry run pytest --rootdir=. tests/docs_notebooks/*
poetry run pytest -n auto --rootdir=. tests/docs_notebooks/*

install-wheels:
pip install dist/*/*.whl
Expand Down
2 changes: 1 addition & 1 deletion docs/blog/posts/release_blog_1dot.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ To see the core re-architecture changes in action, we've included some usage exa
retriever = vectorstore.as_retriever()

prompt = hub.pull("rlm/rag-prompt")
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)

rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
Expand Down
2 changes: 1 addition & 1 deletion docs/component_guides/instrumentation/langchain.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ First, this requires loading data into a vector store.
retriever = vectorstore.as_retriever()

prompt = hub.pull("rlm/rag-prompt")
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)


def format_docs(docs):
Expand Down
2 changes: 1 addition & 1 deletion docs/component_guides/instrumentation/nemo.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ Below is a quick example of usage. First, we'll create a standard Nemo app.
models:
- type: main
engine: openai
model: gpt-3.5-turbo-instruct
model: gpt-4o-mini

%%writefile config.co
# Adapted from NeMo-Guardrails/tests/test_configs/with_kb_openai_embeddings/config.co
Expand Down
2 changes: 1 addition & 1 deletion examples/experimental/MultiQueryRetrievalLangchain.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@
"# select context to be used in feedback. the location of context is app specific.\n",
"\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"llm = ChatOpenAI(model_name=\"gpt-4o-mini\", temperature=0)\n",
"\n",
"\n",
"def format_docs(docs):\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/experimental/end2end_apps/trubot/App_TruBot.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
key_utils.check_keys("PINECONE_API_KEY", "PINECONE_ENV", "OPENAI_API_KEY")

# Set up GPT-3 model
model_name = "gpt-3.5-turbo"
model_name = "gpt-4o-mini"

app_name = "TruBot"
# app_name = "TruBot_langprompt"
Expand Down
2 changes: 1 addition & 1 deletion examples/experimental/end2end_apps/trubot/trubot.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def get_or_make_app(
pp.pprint(f"Starting a new conversation with {app_version}.")

# Embedding needed for Pinecone vector db.
embedding = OpenAIEmbeddings(model="text-embedding-ada-002") # 1536 dims
embedding = OpenAIEmbeddings(model="text-embedding-3-small") # 1536 dims
docsearch = Pinecone.from_existing_index(
index_name="llmdemo", embedding=embedding
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@
"# db_host = \"pinecone\"\n",
"db_host = \"pinecone\"\n",
"\n",
"model_name = \"gpt-3.5-turbo\"\n",
"model_name = \"gpt-4o-mini\"\n",
"app_name = \"TruBot\"\n",
"\n",
"# Embedding for vector db.\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@
"# db_host = \"pinecone\"\n",
"db_host = \"hnsw\"\n",
"\n",
"model_name = \"gpt-3.5-turbo\"\n",
"model_name = \"gpt-4o-mini\"\n",
"app_name = \"TruBot\"\n",
"\n",
"# Embedding for vector db.\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/experimental/generate_test_set.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@
"retriever = vectorstore.as_retriever()\n",
"\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"llm = ChatOpenAI(model_name=\"gpt-4o-mini\", temperature=0)\n",
"\n",
"\n",
"def format_docs(docs):\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/experimental/llamaindex_async_stream.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
"\n",
"message = \"What did the author do growing up?\"\n",
"\n",
"Settings.llm = OpenAI(model=\"gpt-3.5-turbo\", temperature=0.0)\n",
"Settings.llm = OpenAI(model=\"gpt-4o-mini\", temperature=0.0)\n",
"Settings.num_output = 64\n",
"\n",
"documents = SimpleDirectoryReader(\"data\").load_data()\n",
Expand Down
4 changes: 2 additions & 2 deletions examples/experimental/random_evaluation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@
"\n",
"embedding_function = OpenAIEmbeddingFunction(\n",
" api_key=os.environ.get(\"OPENAI_API_KEY\"),\n",
" model_name=\"text-embedding-ada-002\",\n",
" model_name=\"text-embedding-3-small\",\n",
")\n",
"\n",
"chroma_client = chromadb.Client()\n",
Expand Down Expand Up @@ -161,7 +161,7 @@
" \"\"\"\n",
" completion = (\n",
" oai_client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" model=\"gpt-4o-mini\",\n",
" temperature=0,\n",
" messages=[\n",
" {\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@
" description=\"useful for when you need to answer questions about current events\",\n",
")\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"\n",
"tools = [search_tool]\n",
"\n",
Expand Down Expand Up @@ -152,7 +152,7 @@
" return (\n",
" float(\n",
" self.endpoint.client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" model=\"gpt-4o-mini\",\n",
" messages=[\n",
" {\n",
" \"role\": \"system\",\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
"metadata": {},
"outputs": [],
"source": [
"# !pip install trulens trulens-apps-langchain langchain==0.0.283"
"# !pip install trulens trulens-apps-langchain langchain langchain-openai"
]
},
{
Expand All @@ -35,11 +35,11 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import LLMMathChain\n",
"from langchain.chains.llm_math.base import LLMMathChain\n",
"from langchain.agents import AgentType\n",
"from langchain.agents import Tool\n",
"from langchain.agents import initialize_agent\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain_openai.chat_models import ChatOpenAI\n",
"from trulens.core import TruSession\n",
"from trulens.apps.langchain import TruChain\n",
"\n",
Expand Down Expand Up @@ -80,7 +80,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")\n",
"llm = ChatOpenAI(temperature=0)\n",
"\n",
"llm_math_chain = LLMMathChain.from_llm(llm, verbose=True)\n",
"\n",
Expand Down Expand Up @@ -139,7 +139,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.11.4 ('agents')",
"display_name": "pkg_311",
"language": "python",
"name": "python3"
},
Expand All @@ -153,11 +153,6 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
},
"vscode": {
"interpreter": {
"hash": "7d153714b979d5e6d08dd8ec90712dd93bff2c9b6c1f0c118169738af3430cd4"
}
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo-16k\", temperature=0.0)\n",
"llm = ChatOpenAI(model_name=\"gpt-4o-mini\", temperature=0.0)\n",
"\n",
"conversational_memory = ConversationSummaryBufferMemory(\n",
" k=4,\n",
Expand Down Expand Up @@ -245,7 +245,7 @@
" return (\n",
" float(\n",
" self.endpoint.client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" model=\"gpt-4o-mini\",\n",
" messages=[\n",
" {\n",
" \"role\": \"system\",\n",
Expand All @@ -267,7 +267,7 @@
" return (\n",
" float(\n",
" self.endpoint.client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" model=\"gpt-4o-mini\",\n",
" messages=[\n",
" {\n",
" \"role\": \"system\",\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a standalone GPT3.5 for comparison"
"### Create a standalone GPT for comparison"
]
},
{
Expand All @@ -154,9 +154,7 @@
"metadata": {},
"outputs": [],
"source": [
"client = openai.OpenAI()\n",
"\n",
"chat_completion = client.chat.completions.create"
"client = openai.OpenAI()"
]
},
{
Expand All @@ -166,15 +164,15 @@
"outputs": [],
"source": [
"from trulens.apps.custom import TruCustomApp\n",
"from trulens.core import instrument\n",
"from trulens.core.instruments import instrument\n",
"\n",
"\n",
"class LLMStandaloneApp:\n",
" @instrument\n",
" def __call__(self, prompt):\n",
" return (\n",
" chat_completion(\n",
" model=\"gpt-3.5-turbo\",\n",
" client.chat.completions.create(\n",
" model=\"gpt-4o-mini\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": gordon_ramsay_prompt},\n",
" {\"role\": \"user\", \"content\": prompt},\n",
Expand Down Expand Up @@ -486,7 +484,7 @@
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "pkg_311",
"language": "python",
"name": "python3"
},
Expand All @@ -500,11 +498,6 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
},
"vscode": {
"interpreter": {
"hash": "7d153714b979d5e6d08dd8ec90712dd93bff2c9b6c1f0c118169738af3430cd4"
}
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@
"outputs": [],
"source": [
"# Merge into a single large document rather than one document per-page\n",
"from llama_index import Document\n",
"from llama_index.core import Document\n",
"\n",
"document = Document(text=\"\\n\\n\".join([doc.text for doc in documents]))"
]
Expand All @@ -107,8 +107,8 @@
"outputs": [],
"source": [
"from llama_index.core import ServiceContext\n",
"from llama_index.llms import OpenAI\n",
"from llama_index.node_parser import SentenceWindowNodeParser\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core.node_parser import SentenceWindowNodeParser\n",
"\n",
"# create the sentence window node parser w/ default settings\n",
"node_parser = SentenceWindowNodeParser.from_defaults(\n",
Expand All @@ -117,7 +117,7 @@
" original_text_metadata_key=\"original_text\",\n",
")\n",
"\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\", temperature=0.1)\n",
"llm = OpenAI(model=\"gpt-4o-mini\", temperature=0.1)\n",
"sentence_context = ServiceContext.from_defaults(\n",
" llm=llm,\n",
" embed_model=\"local:BAAI/bge-small-en-v1.5\",\n",
Expand Down Expand Up @@ -340,7 +340,7 @@
"provenance": []
},
"kernelspec": {
"display_name": "milvus",
"display_name": "pkg_311",
"language": "python",
"name": "python3"
},
Expand Down
Loading
Loading