From c059902f11482dd4e852f995908b3812718011e4 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Sat, 14 Sep 2024 14:31:20 -0400 Subject: [PATCH] x --- examples/configurable_chain/client.ipynb | 62 +++------------- examples/llm/client.ipynb | 94 ++++-------------------- examples/local_llm/client.ipynb | 57 +++----------- examples/passthrough_dict/client.ipynb | 29 ++------ 4 files changed, 39 insertions(+), 203 deletions(-) diff --git a/examples/configurable_chain/client.ipynb b/examples/configurable_chain/client.ipynb index 5a7b9412..606ee9db 100644 --- a/examples/configurable_chain/client.ipynb +++ b/examples/configurable_chain/client.ipynb @@ -23,14 +23,7 @@ "tags": [] }, "outputs": [], - "source": [ - "import requests\n", - "\n", - "inputs = {\"input\": {\"topic\": \"sports\"}}\n", - "response = requests.post(\"http://localhost:8000/configurable_temp/invoke\", json=inputs)\n", - "\n", - "response.json()" - ] + "source": ["import requests\n\ninputs = {\"input\": {\"topic\": \"sports\"}}\nresponse = requests.post(\"http://localhost:8000/configurable_temp/invoke\", json=inputs)\n\nresponse.json()"] }, { "cell_type": "markdown", @@ -46,11 +39,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langserve import RemoteRunnable\n", - "\n", - "remote_runnable = RemoteRunnable(\"http://localhost:8000/configurable_temp\")" - ] + "source": ["from langserve import RemoteRunnable\n\nremote_runnable = RemoteRunnable(\"http://localhost:8000/configurable_temp\")"] }, { "cell_type": "markdown", @@ -66,9 +55,7 @@ "tags": [] }, "outputs": [], - "source": [ - "response = await remote_runnable.ainvoke({\"topic\": \"sports\"})" - ] + "source": ["response = await remote_runnable.ainvoke({\"topic\": \"sports\"})"] }, { "cell_type": "markdown", @@ -84,11 +71,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langchain.schema.runnable.config import RunnableConfig\n", - "\n", - "remote_runnable.batch([{\"topic\": \"sports\"}, {\"topic\": \"cars\"}])" - ] + "source": ["from langchain_core.runnables import RunnableConfig\n\nremote_runnable.batch([{\"topic\": \"sports\"}, {\"topic\": \"cars\"}])"] }, { "cell_type": "markdown", @@ -104,10 +87,7 @@ "tags": [] }, "outputs": [], - "source": [ - "async for chunk in remote_runnable.astream({\"topic\": \"bears, but a bit verbose\"}):\n", - " print(chunk, end=\"\", flush=True)" - ] + "source": ["async for chunk in remote_runnable.astream({\"topic\": \"bears, but a bit verbose\"}):\n print(chunk, end=\"\", flush=True)"] }, { "cell_type": "markdown", @@ -157,14 +137,7 @@ "tags": [] }, "outputs": [], - "source": [ - "await remote_runnable.ainvoke(\n", - " {\"topic\": \"sports\"},\n", - " config={\n", - " \"configurable\": {\"prompt\": \"how to say {topic} in french\", \"llm\": \"low_temp\"}\n", - " },\n", - ")" - ] + "source": ["await remote_runnable.ainvoke(\n {\"topic\": \"sports\"},\n config={\n \"configurable\": {\"prompt\": \"how to say {topic} in french\", \"llm\": \"low_temp\"}\n },\n)"] }, { "cell_type": "markdown", @@ -221,13 +194,7 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [ - "# The model will fail with an auth error\n", - "unauthenticated_response = requests.post(\n", - " \"http://localhost:8000/auth_from_header/invoke\", json={\"input\": \"hello\"}\n", - ")\n", - "unauthenticated_response.json()" - ] + "source": ["# The model will fail with an auth error\nunauthenticated_response = requests.post(\n \"http://localhost:8000/auth_from_header/invoke\", json={\"input\": \"hello\"}\n)\nunauthenticated_response.json()"] }, { "cell_type": "markdown", @@ -244,25 +211,14 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [ - "# The model will succeed as long as the above shell script is run previously\n", - "import os\n", - "\n", - "test_key = os.environ[\"TEST_API_KEY\"]\n", - "authenticated_response = requests.post(\n", - " \"http://localhost:8000/auth_from_header/invoke\",\n", - " json={\"input\": \"hello\"},\n", - " headers={\"x-api-key\": test_key},\n", - ")\n", - "authenticated_response.json()" - ] + "source": ["# The model will succeed as long as the above shell script is run previously\nimport os\n\ntest_key = os.environ[\"TEST_API_KEY\"]\nauthenticated_response = requests.post(\n \"http://localhost:8000/auth_from_header/invoke\",\n json={\"input\": \"hello\"},\n headers={\"x-api-key\": test_key},\n)\nauthenticated_response.json()"] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [""] } ], "metadata": { diff --git a/examples/llm/client.ipynb b/examples/llm/client.ipynb index f5bda8cc..7fb8c291 100644 --- a/examples/llm/client.ipynb +++ b/examples/llm/client.ipynb @@ -16,9 +16,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langchain.prompts.chat import ChatPromptTemplate" - ] + "source": ["from langchain_core.prompts import ChatPromptTemplate"] }, { "cell_type": "code", @@ -27,12 +25,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langserve import RemoteRunnable\n", - "\n", - "openai_llm = RemoteRunnable(\"http://localhost:8000/openai/\")\n", - "anthropic = RemoteRunnable(\"http://localhost:8000/anthropic/\")" - ] + "source": ["from langserve import RemoteRunnable\n\nopenai_llm = RemoteRunnable(\"http://localhost:8000/openai/\")\nanthropic = RemoteRunnable(\"http://localhost:8000/anthropic/\")"] }, { "cell_type": "markdown", @@ -48,18 +41,7 @@ "tags": [] }, "outputs": [], - "source": [ - "prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " (\n", - " \"system\",\n", - " \"You are a highly educated person who loves to use big words. \"\n", - " + \"You are also concise. Never answer in more than three sentences.\",\n", - " ),\n", - " (\"human\", \"Tell me about your favorite novel\"),\n", - " ]\n", - ").format_messages()" - ] + "source": ["prompt = ChatPromptTemplate.from_messages(\n [\n (\n \"system\",\n \"You are a highly educated person who loves to use big words. \"\n + \"You are also concise. Never answer in more than three sentences.\",\n ),\n (\"human\", \"Tell me about your favorite novel\"),\n ]\n).format_messages()"] }, { "cell_type": "markdown", @@ -86,9 +68,7 @@ "output_type": "execute_result" } ], - "source": [ - "anthropic.invoke(prompt)" - ] + "source": ["anthropic.invoke(prompt)"] }, { "cell_type": "code", @@ -97,9 +77,7 @@ "tags": [] }, "outputs": [], - "source": [ - "openai_llm.invoke(prompt)" - ] + "source": ["openai_llm.invoke(prompt)"] }, { "cell_type": "markdown", @@ -126,9 +104,7 @@ "output_type": "execute_result" } ], - "source": [ - "await openai_llm.ainvoke(prompt)" - ] + "source": ["await openai_llm.ainvoke(prompt)"] }, { "cell_type": "code", @@ -149,9 +125,7 @@ "output_type": "execute_result" } ], - "source": [ - "anthropic.batch([prompt, prompt])" - ] + "source": ["anthropic.batch([prompt, prompt])"] }, { "cell_type": "code", @@ -172,9 +146,7 @@ "output_type": "execute_result" } ], - "source": [ - "await anthropic.abatch([prompt, prompt])" - ] + "source": ["await anthropic.abatch([prompt, prompt])"] }, { "cell_type": "markdown", @@ -198,10 +170,7 @@ ] } ], - "source": [ - "for chunk in anthropic.stream(prompt):\n", - " print(chunk.content, end=\"\", flush=True)" - ] + "source": ["for chunk in anthropic.stream(prompt):\n print(chunk.content, end=\"\", flush=True)"] }, { "cell_type": "code", @@ -218,19 +187,14 @@ ] } ], - "source": [ - "async for chunk in anthropic.astream(prompt):\n", - " print(chunk.content, end=\"\", flush=True)" - ] + "source": ["async for chunk in anthropic.astream(prompt):\n print(chunk.content, end=\"\", flush=True)"] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], - "source": [ - "from langchain.schema.runnable import RunnablePassthrough" - ] + "source": ["from langchain_core.runnables import RunnablePassthrough"] }, { "cell_type": "code", @@ -239,37 +203,7 @@ "tags": [] }, "outputs": [], - "source": [ - "comedian_chain = (\n", - " ChatPromptTemplate.from_messages(\n", - " [\n", - " (\n", - " \"system\",\n", - " \"You are a comedian that sometimes tells funny jokes and other times you just state facts that are not funny. Please either tell a joke or state fact now but only output one.\",\n", - " ),\n", - " ]\n", - " )\n", - " | openai_llm\n", - ")\n", - "\n", - "joke_classifier_chain = (\n", - " ChatPromptTemplate.from_messages(\n", - " [\n", - " (\n", - " \"system\",\n", - " \"Please determine if the joke is funny. Say `funny` if it's funny and `not funny` if not funny. Then repeat the first five words of the joke for reference...\",\n", - " ),\n", - " (\"human\", \"{joke}\"),\n", - " ]\n", - " )\n", - " | anthropic\n", - ")\n", - "\n", - "\n", - "chain = {\"joke\": comedian_chain} | RunnablePassthrough.assign(\n", - " classification=joke_classifier_chain\n", - ")" - ] + "source": ["comedian_chain = (\n ChatPromptTemplate.from_messages(\n [\n (\n \"system\",\n \"You are a comedian that sometimes tells funny jokes and other times you just state facts that are not funny. Please either tell a joke or state fact now but only output one.\",\n ),\n ]\n )\n | openai_llm\n)\n\njoke_classifier_chain = (\n ChatPromptTemplate.from_messages(\n [\n (\n \"system\",\n \"Please determine if the joke is funny. Say `funny` if it's funny and `not funny` if not funny. Then repeat the first five words of the joke for reference...\",\n ),\n (\"human\", \"{joke}\"),\n ]\n )\n | anthropic\n)\n\n\nchain = {\"joke\": comedian_chain} | RunnablePassthrough.assign(\n classification=joke_classifier_chain\n)"] }, { "cell_type": "code", @@ -290,9 +224,7 @@ "output_type": "execute_result" } ], - "source": [ - "chain.invoke({})" - ] + "source": ["chain.invoke({})"] } ], "metadata": { diff --git a/examples/local_llm/client.ipynb b/examples/local_llm/client.ipynb index c9768708..2624865a 100644 --- a/examples/local_llm/client.ipynb +++ b/examples/local_llm/client.ipynb @@ -18,9 +18,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langchain.prompts.chat import ChatPromptTemplate" - ] + "source": ["from langchain_core.prompts import ChatPromptTemplate"] }, { "cell_type": "code", @@ -29,11 +27,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langserve import RemoteRunnable\n", - "\n", - "model = RemoteRunnable(\"http://localhost:8000/ollama/\")" - ] + "source": ["from langserve import RemoteRunnable\n\nmodel = RemoteRunnable(\"http://localhost:8000/ollama/\")"] }, { "cell_type": "markdown", @@ -49,9 +43,7 @@ "tags": [] }, "outputs": [], - "source": [ - "prompt = \"Tell me a 3 sentence story about a cat.\"" - ] + "source": ["prompt = \"Tell me a 3 sentence story about a cat.\""] }, { "cell_type": "code", @@ -71,9 +63,7 @@ "output_type": "execute_result" } ], - "source": [ - "model.invoke(prompt)" - ] + "source": ["model.invoke(prompt)"] }, { "cell_type": "code", @@ -93,9 +83,7 @@ "output_type": "execute_result" } ], - "source": [ - "await model.ainvoke(prompt)" - ] + "source": ["await model.ainvoke(prompt)"] }, { "cell_type": "markdown", @@ -131,10 +119,7 @@ "output_type": "execute_result" } ], - "source": [ - "%%time\n", - "model.batch([prompt, prompt])" - ] + "source": ["%%time\nmodel.batch([prompt, prompt])"] }, { "cell_type": "code", @@ -152,11 +137,7 @@ ] } ], - "source": [ - "%%time\n", - "for _ in range(2):\n", - " model.invoke(prompt)" - ] + "source": ["%%time\nfor _ in range(2):\n model.invoke(prompt)"] }, { "cell_type": "code", @@ -177,9 +158,7 @@ "output_type": "execute_result" } ], - "source": [ - "await model.abatch([prompt, prompt])" - ] + "source": ["await model.abatch([prompt, prompt])"] }, { "cell_type": "markdown", @@ -206,10 +185,7 @@ ] } ], - "source": [ - "for chunk in model.stream(prompt):\n", - " print(chunk.content, end=\"|\", flush=True)" - ] + "source": ["for chunk in model.stream(prompt):\n print(chunk.content, end=\"|\", flush=True)"] }, { "cell_type": "code", @@ -227,10 +203,7 @@ ] } ], - "source": [ - "async for chunk in model.astream(prompt):\n", - " print(chunk.content, end=\"|\", flush=True)" - ] + "source": ["async for chunk in model.astream(prompt):\n print(chunk.content, end=\"|\", flush=True)"] }, { "cell_type": "markdown", @@ -266,15 +239,7 @@ ] } ], - "source": [ - "i = 0\n", - "async for event in model.astream_events(prompt, version='v1'):\n", - " print(event)\n", - " if i > 10:\n", - " print('...')\n", - " break\n", - " i += 1" - ] + "source": ["i = 0\nasync for event in model.astream_events(prompt, version='v1'):\n print(event)\n if i > 10:\n print('...')\n break\n i += 1"] } ], "metadata": { diff --git a/examples/passthrough_dict/client.ipynb b/examples/passthrough_dict/client.ipynb index 0c6a300e..b16d3b2b 100644 --- a/examples/passthrough_dict/client.ipynb +++ b/examples/passthrough_dict/client.ipynb @@ -16,9 +16,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langchain.prompts.chat import ChatPromptTemplate" - ] + "source": ["from langchain_core.prompts import ChatPromptTemplate"] }, { "cell_type": "code", @@ -27,11 +25,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langserve import RemoteRunnable\n", - "\n", - "chain = RemoteRunnable(\"http://localhost:8000/v1/\")" - ] + "source": ["from langserve import RemoteRunnable\n\nchain = RemoteRunnable(\"http://localhost:8000/v1/\")"] }, { "cell_type": "markdown", @@ -59,9 +53,7 @@ "output_type": "execute_result" } ], - "source": [ - "chain.invoke({'thing': 'apple', 'language': 'italian', 'info': {\"user_id\": 42, \"user_info\": {\"address\": 42}}})" - ] + "source": ["chain.invoke({'thing': 'apple', 'language': 'italian', 'info': {\"user_id\": 42, \"user_info\": {\"address\": 42}}})"] }, { "cell_type": "code", @@ -82,10 +74,7 @@ ] } ], - "source": [ - "for chunk in chain.stream({'thing': 'apple', 'language': 'italian', 'info': {\"user_id\": 42, \"user_info\": {\"address\": 42}}}):\n", - " print(chunk)" - ] + "source": ["for chunk in chain.stream({'thing': 'apple', 'language': 'italian', 'info': {\"user_id\": 42, \"user_info\": {\"address\": 42}}}):\n print(chunk)"] }, { "cell_type": "code", @@ -94,11 +83,7 @@ "tags": [] }, "outputs": [], - "source": [ - "from langserve import RemoteRunnable\n", - "\n", - "chain = RemoteRunnable(\"http://localhost:8000/v2/\")" - ] + "source": ["from langserve import RemoteRunnable\n\nchain = RemoteRunnable(\"http://localhost:8000/v2/\")"] }, { "cell_type": "code", @@ -119,9 +104,7 @@ "output_type": "execute_result" } ], - "source": [ - "chain.invoke({'thing': 'apple', 'language': 'italian', 'info': {\"user_id\": 42, \"user_info\": {\"address\": 42}}})" - ] + "source": ["chain.invoke({'thing': 'apple', 'language': 'italian', 'info': {\"user_id\": 42, \"user_info\": {\"address\": 42}}})"] } ], "metadata": {