From dcbefb03a6509f9cfe858183d1b2bddd8159efb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A1udio=20Lemos?= Date: Tue, 27 Aug 2024 12:21:41 +0100 Subject: [PATCH] [chore] delete notebooks --- examples/06_azure_function_calling.ipynb | 974 ----------------------- examples/09_azure_llama_tests.ipynb | 0 llmstudio/llm/langchain.py | 2 +- 3 files changed, 1 insertion(+), 975 deletions(-) delete mode 100644 examples/06_azure_function_calling.ipynb delete mode 100644 examples/09_azure_llama_tests.ipynb diff --git a/examples/06_azure_function_calling.ipynb b/examples/06_azure_function_calling.ipynb deleted file mode 100644 index 6d36a15b..00000000 --- a/examples/06_azure_function_calling.ipynb +++ /dev/null @@ -1,974 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# OpenAI for reference" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running LLMstudio Engine on http://localhost:50001 Running LLMstudio Tracking on http://localhost:50002 \n", - "\n", - "ChatCompletionChunk(id='chatcmpl-9yhJwIOGC8UzklZgPah42ISpSRCLg', choices=[Choice(delta=ChoiceDelta(content=None, function_call=ChoiceDeltaFunctionCall(arguments='', name='get_weather'), role='assistant', tool_calls=None, refusal=None), finish_reason=None, index=0, logprobs=None)], created=1724253232, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhJwIOGC8UzklZgPah42ISpSRCLg', choices=[Choice(delta=ChoiceDelta(content=None, function_call=ChoiceDeltaFunctionCall(arguments='{\"', name=None), role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724253232, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhJwIOGC8UzklZgPah42ISpSRCLg', choices=[Choice(delta=ChoiceDelta(content=None, function_call=ChoiceDeltaFunctionCall(arguments='location', name=None), role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724253232, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhJwIOGC8UzklZgPah42ISpSRCLg', choices=[Choice(delta=ChoiceDelta(content=None, function_call=ChoiceDeltaFunctionCall(arguments='\":\"', name=None), role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724253232, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhJwIOGC8UzklZgPah42ISpSRCLg', choices=[Choice(delta=ChoiceDelta(content=None, function_call=ChoiceDeltaFunctionCall(arguments='San', name=None), role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724253232, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhJwIOGC8UzklZgPah42ISpSRCLg', choices=[Choice(delta=ChoiceDelta(content=None, function_call=ChoiceDeltaFunctionCall(arguments=' Francisco', name=None), role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724253232, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhJwIOGC8UzklZgPah42ISpSRCLg', choices=[Choice(delta=ChoiceDelta(content=None, function_call=ChoiceDeltaFunctionCall(arguments='\"}', name=None), role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724253232, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhJwIOGC8UzklZgPah42ISpSRCLg', choices=[Choice(delta=ChoiceDelta(content=None, function_call=None, role=None, tool_calls=None), finish_reason='function_call', index=0, logprobs=None)], created=1724253232, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "function_calls: [{'arguments': '{\"', 'name': None}, {'arguments': 'location', 'name': None}, {'arguments': '\":\"', 'name': None}, {'arguments': 'San', 'name': None}, {'arguments': ' Francisco', 'name': None}, {'arguments': '\"}', 'name': None}]\n", - "messages: [{'role': 'user', 'content': '<|begin_of_text|>\\n <|start_header_id|>system<|end_header_id|>\\n You are a helpful AI assistant.\\n \\nYou have access to the following functions:\\nUse the function \\'get_weather\\' to \\'Gets the weather in a certain location.\\':\\nParameters format:\\n{\\n \"type\": \"object\",\\n \"properties\": {\\n \"location\": {\\n \"type\": \"string\",\\n \"description\": \"Location to get the weather\"\\n }\\n }\\n}\\n\\n\\nIf you choose to call a function, ONLY reply in the following format with no prefix or suffix:\\n§function_name§{{\"param_name\": \"param_value\"}}\\n\\nReminder:\\n- Function calls MUST follow the specified format.\\n- Only call one function at a time.\\n- NEVER call more than one function at a time.\\n- Required parameters MUST be specified.\\n- Put the entire function call reply on one line.\\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls.\\n- If you have already called a function and got the response for the user\\'s question, please reply with the response.\\n\\n<|eot_id|>\\n <|start_header_id|>user<|end_header_id|>\\n Get the weather in San Francisco.\\n <|eot_id|>\\n '}]\n", - "function name: get_weather\n", - "name_chunk: {'id': '99731b21-f57a-4609-bbfe-fab30ab5dbee', 'choices': [{'delta': {'content': None, 'function_call': {'arguments': '', 'name': 'get_weather'}, 'role': 'assistant', 'tool_calls': None, 'refusal': None}, 'finish_reason': None, 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "{'id': '0e87efb3-44fb-446b-b6c5-9028bb2c8676', 'choices': [{'delta': {'content': None, 'function_call': {'arguments': '{\"', 'name': None}, 'role': None, 'tool_calls': None}, 'finish_reason': None, 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "{'id': '2a461223-fe52-4fa1-a394-0406ea30ecfd', 'choices': [{'delta': {'content': None, 'function_call': {'arguments': 'location', 'name': None}, 'role': None, 'tool_calls': None}, 'finish_reason': None, 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "{'id': '95e9e373-7f45-4758-bd8b-20fed75704bd', 'choices': [{'delta': {'content': None, 'function_call': {'arguments': '\":', 'name': None}, 'role': None, 'tool_calls': None}, 'finish_reason': None, 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "{'id': 'fa6e23e8-d88e-4f5a-8df6-131861c8d21c', 'choices': [{'delta': {'content': None, 'function_call': {'arguments': ' \"', 'name': None}, 'role': None, 'tool_calls': None}, 'finish_reason': None, 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "{'id': 'ea79d72c-99bc-4721-acba-b7c505e6a1c4', 'choices': [{'delta': {'content': None, 'function_call': {'arguments': 'San', 'name': None}, 'role': None, 'tool_calls': None}, 'finish_reason': None, 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "{'id': '6cfce79d-30e0-48f8-b580-fee6d9d6a3a0', 'choices': [{'delta': {'content': None, 'function_call': {'arguments': ' Francisco', 'name': None}, 'role': None, 'tool_calls': None}, 'finish_reason': None, 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "{'id': 'dd932437-7a87-4023-885f-77f8b01cdafd', 'choices': [{'delta': {'content': None, 'function_call': {'arguments': '\"}', 'name': None}, 'role': None, 'tool_calls': None}, 'finish_reason': None, 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "{'id': '3cb441d9-7e22-485f-b2c1-9b38b0971cf4', 'choices': [{'delta': {'content': None, 'function_call': None, 'role': None, 'tool_calls': None}, 'finish_reason': 'function_call', 'index': 0, 'logprobs': None}], 'created': 1724253263, 'model': 'Meta-Llama-3.1-405B-Instruct', 'object': 'chat.completion.chunk', 'system_fingerprint': None, 'usage': None}\n", - "function_calls: [{'arguments': '', 'name': 'get_weather'}, {'arguments': '{\"', 'name': None}, {'arguments': 'location', 'name': None}, {'arguments': '\":', 'name': None}, {'arguments': ' \"', 'name': None}, {'arguments': 'San', 'name': None}, {'arguments': ' Francisco', 'name': None}, {'arguments': '\"}', 'name': None}]\n", - "messages: [{'role': 'user', 'content': '<|begin_of_text|>\\n <|start_header_id|>system<|end_header_id|>\\n You are a helpful AI assistant.\\n \\n You have access to the following tools:\\n Use the function \\'get_current_weather\\' to \\'Get the current weather in a given location\\':\\nParameters format:\\n{\\n \"type\": \"object\",\\n \"properties\": {\\n \"location\": {\\n \"type\": \"string\",\\n \"description\": \"The city and state, e.g. San Francisco, CA\"\\n }\\n },\\n \"required\": [\\n \"location\"\\n ]\\n}\\n\\nUse the function \\'get_humidity_level\\' to \\'Get the current humidity level in a given location\\':\\nParameters format:\\n{\\n \"type\": \"object\",\\n \"properties\": {\\n \"location\": {\\n \"type\": \"string\",\\n \"description\": \"The city and state, e.g. San Francisco, CA\"\\n }\\n },\\n \"required\": [\\n \"location\"\\n ]\\n}\\n\\n\\n If you choose to call a function, ONLY reply in the following format with no prefix or suffix:\\n §function_name§{{\"param_name\": \"param_value\"}}\\n\\n Reminder:\\n - Function calls MUST follow the specified format.\\n - Only call one function at a time.\\n - NEVER call more than one function at a time.\\n - Required parameters MUST be specified.\\n - Put the entire function call reply on one line.\\n - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls.\\n - If you have already called a tool and got the response for the users question please reply with the response.\\n \\n<|eot_id|>\\n <|start_header_id|>user<|end_header_id|>\\n Get the weather in San Francisco.\\n <|eot_id|>\\n '}]\n", - "function name: get_current_weather\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, role='assistant', tool_calls=None, refusal=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content='Hello', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content='!', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content=' How', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content=' can', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content=' I', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content=' assist', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content=' you', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content=' today', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content='?', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n", - "ChatCompletionChunk(id='chatcmpl-9yhb15S04Zx8oaQ3PDewdV3W7AsiS', choices=[Choice(delta=ChoiceDelta(content=None, function_call=None, role=None, tool_calls=None), finish_reason='stop', index=0, logprobs=None)], created=1724254291, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)\n" - ] - } - ], - "source": [ - "from llmstudio import LLM" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "functions = [\n", - " {\n", - " \"name\": \"get_weather\",\n", - " \"description\": \"Gets the weather in a certain location.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Location to get the weather\"\n", - " },\n", - " }\n", - " }\n", - " }\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "llm = LLM('openai/gpt-3.5-turbo', functions = functions)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "ChatCompletion(id='eaf94781-b466-40ca-913c-c2711f18824c', choices=[Choice(finish_reason='function_call', index=0, logprobs=None, message=ChatCompletionMessage(content=None, role='assistant', function_call=FunctionCall(arguments='{\"location\":\"San Francisco\"}', name='get_weather'), tool_calls=None))], created=1724253232, model='gpt-3.5-turbo', object='chat.completion', system_fingerprint=None, usage=None, session_id=None, chat_input='Get the weather in San francisco', chat_output='{\"location\":\"San Francisco\"}', context=[{'role': 'user', 'content': 'Get the weather in San francisco'}], provider='openai', deployment='gpt-3.5-turbo-0125', timestamp=1724253232.4190779, parameters={'temperature': 1, 'max_tokens': 2048, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}, metrics={'input_tokens': 7, 'output_tokens': 6, 'total_tokens': 13, 'cost_usd': 1.25e-05, 'latency_s': 0.8489010334014893, 'time_to_first_token_s': 0.7715389728546143, 'inter_token_latency_s': 0.010512420109340124, 'tokens_per_second': 9.423948947198873})" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "llm.chat('Get the weather in San francisco', functions = functions)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Azure OpenAI function calling" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "from llmstudio import LLM" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "functions = [\n", - " {\n", - " \"name\": \"get_weather\",\n", - " \"description\": \"Gets the weather in a certain location.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Location to get the weather\"\n", - " },\n", - " }\n", - " }\n", - " }\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "llm = LLM('azure/gpt-4',\n", - " api_endpoint = 'https://mdclonetestshu5680824075.openai.azure.com/',\n", - " api_key = 'f730981f93804a7b92be6528fa3621ed',\n", - " api_version = '2024-05-01-preview')" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "ename": "Exception", - "evalue": "{'statusCode': 401, 'message': 'Auth token validation failed. Please provide a valid key or a valid AAD token'}", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mException\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[4], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mllm\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchat\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mGet the weather in San francisco\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunctions\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mfunctions\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/Documents/GitHub/LLMstudio/llmstudio/llm/__init__.py:70\u001b[0m, in \u001b[0;36mLLM.chat\u001b[0;34m(self, input, is_stream, retries, **kwargs)\u001b[0m\n\u001b[1;32m 68\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m:\n\u001b[1;32m 69\u001b[0m error_data \u001b[38;5;241m=\u001b[39m response\u001b[38;5;241m.\u001b[39mtext\n\u001b[0;32m---> 70\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m(error_data)\n\u001b[1;32m 72\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_stream:\n\u001b[1;32m 73\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgenerate_chat(response)\n", - "\u001b[0;31mException\u001b[0m: {'statusCode': 401, 'message': 'Auth token validation failed. Please provide a valid key or a valid AAD token'}" - ] - } - ], - "source": [ - "llm.chat('Get the weather in San francisco', functions = functions)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Llama Function calling" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running LLMstudio Engine on http://localhost:50001 Running LLMstudio Tracking on http://localhost:50002 \n", - "\n", - "chat_input: Get the weather in San Francisco.\n" - ] - } - ], - "source": [ - "from llmstudio import LLM\n", - "import os\n", - "from dotenv import load_dotenv\n", - "\n", - "load_dotenv()\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "tools = [\n", - " {\n", - " \"type\": \"function\",\n", - " \"function\": {\n", - " \"name\": \"get_current_weather\",\n", - " \"description\": \"Get the current weather in a given location\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"The city and state, e.g. San Francisco, CA\",\n", - " },\n", - " },\n", - " \"required\": [\"location\"],\n", - " },\n", - " },\n", - " },\n", - " {\n", - " \"type\": \"function\",\n", - " \"function\": {\n", - " \"name\": \"get_humidity_level\",\n", - " \"description\": \"Get the current humidity level in a given location\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"The city and state, e.g. San Francisco, CA\",\n", - " },\n", - " },\n", - " \"required\": [\"location\"],\n", - " },\n", - " },\n", - " },\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "functions = [\n", - " {\n", - " \"name\": \"get_weather\",\n", - " \"description\": \"Gets the weather in a certain location.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Location to get the weather\"\n", - " },\n", - " }\n", - " }\n", - " }\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "llm = LLM('azure/Meta-Llama-3.1-405B-Instruct',\n", - " base_url = os.getenv('AZURE_BASE_URL'),\n", - " api_key = os.getenv('AZURE_API_KEY'))" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "ChatCompletion(id='709760c1-caf8-40f1-973f-6b0fbb1a21df', choices=[Choice(finish_reason='function_call', index=0, logprobs=None, message=ChatCompletionMessage(content=None, role='assistant', function_call=FunctionCall(arguments='{\"location\": \"San Francisco\"}', name='get_weather'), tool_calls=None))], created=1724256798, model='Meta-Llama-3.1-405B-Instruct', object='chat.completion', system_fingerprint=None, usage=None, session_id=None, chat_input='Get the weather in San Francisco.', chat_output='{\"location\": \"San Francisco\"}', context=[{'role': 'user', 'content': 'Get the weather in San Francisco.'}], provider='azure', deployment='Meta-Llama-3.1-405B-Instruct', timestamp=1724256798.471922, parameters={'temperature': 1, 'max_tokens': 2048, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}, metrics={'input_tokens': 7, 'output_tokens': 7, 'total_tokens': 14, 'cost_usd': 0.14931, 'latency_s': 1.957301139831543, 'time_to_first_token_s': 1.9479811191558838, 'inter_token_latency_s': 0.0002950032552083333, 'tokens_per_second': 5.109075857821582})" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "llm.chat('Get the weather in San Francisco.', functions = functions)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "ChatCompletion(id='c92b7a79-c876-4bf6-a738-1d9ddd431b66', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, role='assistant', function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='b01666b7-22fb-417b-93ec-de2a1e4ad2ee', function=Function(arguments='{\"location\": \"San Francisco, CA\"}', name='get_current_weather'), type='function')]))], created=1724253279, model='Meta-Llama-3.1-405B-Instruct', object='chat.completion', system_fingerprint=None, usage=None, session_id=None, chat_input='Get the weather in San Francisco.', chat_output='{\"location\": \"San Francisco, CA\"}', context=[{'role': 'user', 'content': 'Get the weather in San Francisco.'}], provider='azure', deployment='Meta-Llama-3.1-405B-Instruct', timestamp=1724253279.6445482, parameters={'temperature': 1, 'max_tokens': 2048, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}, metrics={'input_tokens': 7, 'output_tokens': 9, 'total_tokens': 16, 'cost_usd': 0.18131000000000003, 'latency_s': 2.3323428630828857, 'time_to_first_token_s': 2.127868175506592, 'inter_token_latency_s': 0.00015252286737615412, 'tokens_per_second': 5.145041147225853})" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "llm.chat('Get the weather in San Francisco.', tools = tools)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Raw Azure OpenAI" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "functions = [\n", - " {\n", - " \"name\": \"get_weather\",\n", - " \"description\": \"Gets the weather in a certain location.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Location to get the weather\"\n", - " },\n", - " }\n", - " }\n", - " }\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"id\": \"chatcmpl-9yhI48RopckokT3VG6ZasGsq7aqKg\",\n", - " \"choices\": [\n", - " {\n", - " \"finish_reason\": \"function_call\",\n", - " \"index\": 0,\n", - " \"logprobs\": null,\n", - " \"message\": {\n", - " \"content\": null,\n", - " \"role\": \"assistant\",\n", - " \"function_call\": {\n", - " \"arguments\": \"{\\\"location\\\":\\\"San Francisco\\\"}\",\n", - " \"name\": \"get_weather\"\n", - " }\n", - " },\n", - " \"content_filter_results\": {}\n", - " }\n", - " ],\n", - " \"created\": 1724253116,\n", - " \"model\": \"gpt-4\",\n", - " \"object\": \"chat.completion\",\n", - " \"system_fingerprint\": \"fp_811936bd4f\",\n", - " \"usage\": {\n", - " \"completion_tokens\": 15,\n", - " \"prompt_tokens\": 62,\n", - " \"total_tokens\": 77\n", - " },\n", - " \"prompt_filter_results\": [\n", - " {\n", - " \"prompt_index\": 0,\n", - " \"content_filter_results\": {\n", - " \"hate\": {\n", - " \"filtered\": false,\n", - " \"severity\": \"safe\"\n", - " },\n", - " \"jailbreak\": {\n", - " \"filtered\": false,\n", - " \"detected\": false\n", - " },\n", - " \"self_harm\": {\n", - " \"filtered\": false,\n", - " \"severity\": \"safe\"\n", - " },\n", - " \"sexual\": {\n", - " \"filtered\": false,\n", - " \"severity\": \"safe\"\n", - " },\n", - " \"violence\": {\n", - " \"filtered\": false,\n", - " \"severity\": \"safe\"\n", - " }\n", - " }\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "import os\n", - "from openai import AzureOpenAI\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", - "\n", - "endpoint = os.getenv(\"ENDPOINT_URL\", \"https://mdclonetestshu5680824075.openai.azure.com/\")\n", - "deployment = os.getenv(\"DEPLOYMENT_NAME\", \"gpt-4\")\n", - "\n", - "token_provider = get_bearer_token_provider(\n", - " DefaultAzureCredential(),\n", - " \"https://cognitiveservices.azure.com/.default\")\n", - " \n", - "client = AzureOpenAI(\n", - " azure_endpoint=endpoint,\n", - " # azure_ad_token_provider=token_provider,\n", - " api_key = 'f730981f93804a7b92be6528fa3621ed',\n", - " api_version=\"2024-05-01-preview\",\n", - ")\n", - " \n", - "completion = client.chat.completions.create(\n", - " model=deployment,\n", - " messages= [\n", - " {\n", - " \"role\": \"user\",\n", - " \"content\": \"What is the weather in San Francisco?\"\n", - " }],\n", - " max_tokens=800,\n", - " temperature=0.7,\n", - " top_p=0.95,\n", - " frequency_penalty=0,\n", - " presence_penalty=0,\n", - " stop=None,\n", - " stream=False,\n", - " functions=functions\n", - ")\n", - "\n", - "# for chunk in completion:\n", - "# print(chunk)\n", - "print(completion.to_json())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# LLMstudio function output" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running LLMstudio Engine on http://localhost:50001 \n", - "Running LLMstudio Tracking on http://localhost:50002 \n" - ] - } - ], - "source": [ - "from llmstudio.llm.langchain import ChatLLMstudio" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "functions = [\n", - " {\n", - " \"name\": \"get_weather\",\n", - " \"description\": \"Gets the weather in a certain location.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Location to get the weather\"\n", - " },\n", - " }\n", - " }\n", - " }\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "chat_input: [{'role': 'user', 'content': 'What is the weather in San Francisco'}]\n", - "chat_input: [{'role': 'user', 'content': 'What is the weather in San Francisco'}]\n", - "chat_input: [{'role': 'user', 'content': 'Hello'}]\n", - "chat_input: [{'role': 'system', 'content': '\\nYou are working with a pandas dataframe in Python. The name of the dataframe is `df`.\\nThis is the result of `print(df.head())`:\\n| | hashed_patient_id | gender | age_at_admission | admitting_department | hospital_length_of_stay | colonoscopy_indication_path | preparation_administered_dose | preparation_medication | cannot_be_scored_value_textual | failed_colonoscopy_all_row_data | dynamic_tables_path | diverticulosis_diagnosis | boston_total_value_numeric | prior_failed_colonoscopy_all_row_data | prior_failed_colonoscopy_age | prior_failed_colonoscopy_event_date_days_from_reference | bmi_numeric_result | constipation_diagnosis | obesity_diagnosis | diabetes_diagnosis | hypothyroidism_diagnosis | ibd_diagnosis | parkinson_diagnosis | dementia_diagnosis | calcium_numeric_result_average | calcium_numeric_result_max | tx_glp1_medication | tx_laxatives_medication | tx_opiates_medication | days_to_colonoscopy | all_failed_5_and_below | just_boston_5_and_below | all_failed_4_and_below | time_from_admission_to_procedure | time_from_preperation_to_procedure | time_from_consult_to_procedure |\\n|---:|--------------------:|:---------|-------------------:|-----------------------:|--------------------------:|:------------------------------|--------------------------------:|-------------------------:|:---------------------------------|:----------------------------------|:----------------------|:---------------------------|-----------------------------:|:----------------------------------------|-------------------------------:|----------------------------------------------------------:|---------------------:|:-------------------------|:--------------------|:---------------------|:---------------------------|:----------------|:----------------------|:---------------------|---------------------------------:|-----------------------------:|:---------------------|:--------------------------|:------------------------|----------------------:|:-------------------------|:--------------------------|:-------------------------|-----------------------------------:|-------------------------------------:|---------------------------------:|\\n| 0 | 1095 | נקבה | 61.6 | 1 | 31.9 | nan | nan | nan | True | False | False | False | 0 | False | nan | nan | nan | False | False | False | False | False | False | False | 8.1 | 10.6 | False | False | False | 30.7 | FAILURE | FAILURE | FAILURE | 30.7 | nan | nan |\\n| 1 | 356 | זכר | 49.2 | 1 | 99.3 | RECTAL_BLEEDING_14111_ | nan | nan | False | False | False | False | 3 | False | nan | nan | nan | False | False | False | False | False | False | False | 8 | 8.9 | False | False | False | 75.9 | FAILURE | FAILURE | FAILURE | 75.9 | nan | nan |\\n| 2 | 61 | נקבה | 74.3 | 1 | 15.8 | RECTAL_BLEEDING_14111_ | nan | nan | True | False | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.9 | 8.4 | False | False | False | 15.3 | nan | nan | nan | 15.3 | nan | nan |\\n| 3 | 120 | נקבה | 48.6 | 1 | 67 | nan | nan | nan | False | True | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.8 | 9.1 | False | False | False | 63 | FAILURE | nan | FAILURE | 63 | nan | nan |\\n| 4 | 1117 | זכר | 67.3 | 1 | 125 | nan | nan | nan | True | False | False | False | nan | False | nan | nan | 29.4 | False | False | False | False | False | False | False | 8.3 | 10.8 | False | False | False | 22.9 | nan | nan | nan | 22.9 | nan | nan |'}, {'role': 'user', 'content': 'how many rows are there?'}]\n" - ] - }, - { - "data": { - "text/plain": [ - "AIMessage(content='', additional_kwargs={'function_call': {'arguments': '{\"location\":\"San Francisco\"}', 'name': 'get_weather'}}, response_metadata={'token_usage': None, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'function_call', 'logprobs': None}, id='run-6c2b4f28-40b7-4063-8b80-9acdd00e3c13-0')" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "llm = ChatLLMstudio('openai/gpt-3.5-turbo')\n", - "llm.invoke('What is the weather in San Francisco',functions = functions)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage(content='', additional_kwargs={'function_call': {'arguments': '{\"location\": \"San Francisco\"}', 'name': 'get_weather'}}, response_metadata={'token_usage': None, 'model_name': 'Meta-Llama-3.1-405B-Instruct', 'system_fingerprint': None, 'finish_reason': 'function_call', 'logprobs': None}, id='run-4c689530-9e75-4967-88a7-52c75349505b-0')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "llm = ChatLLMstudio('azure/Meta-Llama-3.1-405B-Instruct')\n", - "llm.invoke('What is the weather in San Francisco',functions = functions)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Pandas agent tests" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running LLMstudio Engine on http://localhost:50001 Running LLMstudio Tracking on http://localhost:50002 \n", - "\n" - ] - } - ], - "source": [ - "from llmstudio.llm.langchain import ChatLLMstudio\n", - "from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent\n", - "import pandas as pd\n", - "from langchain.agents.agent_types import AgentType\n", - "from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent\n", - "from langchain_openai import ChatOpenAI" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
hashed_patient_idgenderage_at_admissionadmitting_departmenthospital_length_of_staycolonoscopy_indication_pathpreparation_administered_dosepreparation_medicationcannot_be_scored_value_textualfailed_colonoscopy_all_row_data...tx_glp1_medicationtx_laxatives_medicationtx_opiates_medicationdays_to_colonoscopyall_failed_5_and_belowjust_boston_5_and_belowall_failed_4_and_belowtime_from_admission_to_proceduretime_from_preperation_to_proceduretime_from_consult_to_procedure
01095נקבה61.6131.9NaNNaNNaNTrueFalse...FalseFalseFalse30.7FAILUREFAILUREFAILURE30.7NaNNaN
1356זכר49.2199.3RECTAL_BLEEDING_14111_NaNNaNFalseFalse...FalseFalseFalse75.9FAILUREFAILUREFAILURE75.9NaNNaN
261נקבה74.3115.8RECTAL_BLEEDING_14111_NaNNaNTrueFalse...FalseFalseFalse15.3NaNNaNNaN15.3NaNNaN
3120נקבה48.6167.0NaNNaNNaNFalseTrue...FalseFalseFalse63.0FAILURENaNFAILURE63.0NaNNaN
41117זכר67.31125.0NaNNaNNaNTrueFalse...FalseFalseFalse22.9NaNNaNNaN22.9NaNNaN
\n", - "

5 rows × 36 columns

\n", - "
" - ], - "text/plain": [ - " hashed_patient_id gender age_at_admission admitting_department \\\n", - "0 1095 נקבה 61.6 1 \n", - "1 356 זכר 49.2 1 \n", - "2 61 נקבה 74.3 1 \n", - "3 120 נקבה 48.6 1 \n", - "4 1117 זכר 67.3 1 \n", - "\n", - " hospital_length_of_stay colonoscopy_indication_path \\\n", - "0 31.9 NaN \n", - "1 99.3 RECTAL_BLEEDING_14111_ \n", - "2 15.8 RECTAL_BLEEDING_14111_ \n", - "3 67.0 NaN \n", - "4 125.0 NaN \n", - "\n", - " preparation_administered_dose preparation_medication \\\n", - "0 NaN NaN \n", - "1 NaN NaN \n", - "2 NaN NaN \n", - "3 NaN NaN \n", - "4 NaN NaN \n", - "\n", - " cannot_be_scored_value_textual failed_colonoscopy_all_row_data ... \\\n", - "0 True False ... \n", - "1 False False ... \n", - "2 True False ... \n", - "3 False True ... \n", - "4 True False ... \n", - "\n", - " tx_glp1_medication tx_laxatives_medication tx_opiates_medication \\\n", - "0 False False False \n", - "1 False False False \n", - "2 False False False \n", - "3 False False False \n", - "4 False False False \n", - "\n", - " days_to_colonoscopy all_failed_5_and_below just_boston_5_and_below \\\n", - "0 30.7 FAILURE FAILURE \n", - "1 75.9 FAILURE FAILURE \n", - "2 15.3 NaN NaN \n", - "3 63.0 FAILURE NaN \n", - "4 22.9 NaN NaN \n", - "\n", - " all_failed_4_and_below time_from_admission_to_procedure \\\n", - "0 FAILURE 30.7 \n", - "1 FAILURE 75.9 \n", - "2 NaN 15.3 \n", - "3 FAILURE 63.0 \n", - "4 NaN 22.9 \n", - "\n", - " time_from_preperation_to_procedure time_from_consult_to_procedure \n", - "0 NaN NaN \n", - "1 NaN NaN \n", - "2 NaN NaN \n", - "3 NaN NaN \n", - "4 NaN NaN \n", - "\n", - "[5 rows x 36 columns]" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "df = pd.read_csv('Colonoscopy.csv')\n", - "df.head()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "chat_input: [{'role': 'user', 'content': 'Hello'}]\n", - "chat_input: [{'role': 'system', 'content': '\\nYou are working with a pandas dataframe in Python. The name of the dataframe is `df`.\\nThis is the result of `print(df.head())`:\\n| | hashed_patient_id | gender | age_at_admission | admitting_department | hospital_length_of_stay | colonoscopy_indication_path | preparation_administered_dose | preparation_medication | cannot_be_scored_value_textual | failed_colonoscopy_all_row_data | dynamic_tables_path | diverticulosis_diagnosis | boston_total_value_numeric | prior_failed_colonoscopy_all_row_data | prior_failed_colonoscopy_age | prior_failed_colonoscopy_event_date_days_from_reference | bmi_numeric_result | constipation_diagnosis | obesity_diagnosis | diabetes_diagnosis | hypothyroidism_diagnosis | ibd_diagnosis | parkinson_diagnosis | dementia_diagnosis | calcium_numeric_result_average | calcium_numeric_result_max | tx_glp1_medication | tx_laxatives_medication | tx_opiates_medication | days_to_colonoscopy | all_failed_5_and_below | just_boston_5_and_below | all_failed_4_and_below | time_from_admission_to_procedure | time_from_preperation_to_procedure | time_from_consult_to_procedure |\\n|---:|--------------------:|:---------|-------------------:|-----------------------:|--------------------------:|:------------------------------|--------------------------------:|-------------------------:|:---------------------------------|:----------------------------------|:----------------------|:---------------------------|-----------------------------:|:----------------------------------------|-------------------------------:|----------------------------------------------------------:|---------------------:|:-------------------------|:--------------------|:---------------------|:---------------------------|:----------------|:----------------------|:---------------------|---------------------------------:|-----------------------------:|:---------------------|:--------------------------|:------------------------|----------------------:|:-------------------------|:--------------------------|:-------------------------|-----------------------------------:|-------------------------------------:|---------------------------------:|\\n| 0 | 1095 | נקבה | 61.6 | 1 | 31.9 | nan | nan | nan | True | False | False | False | 0 | False | nan | nan | nan | False | False | False | False | False | False | False | 8.1 | 10.6 | False | False | False | 30.7 | FAILURE | FAILURE | FAILURE | 30.7 | nan | nan |\\n| 1 | 356 | זכר | 49.2 | 1 | 99.3 | RECTAL_BLEEDING_14111_ | nan | nan | False | False | False | False | 3 | False | nan | nan | nan | False | False | False | False | False | False | False | 8 | 8.9 | False | False | False | 75.9 | FAILURE | FAILURE | FAILURE | 75.9 | nan | nan |\\n| 2 | 61 | נקבה | 74.3 | 1 | 15.8 | RECTAL_BLEEDING_14111_ | nan | nan | True | False | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.9 | 8.4 | False | False | False | 15.3 | nan | nan | nan | 15.3 | nan | nan |\\n| 3 | 120 | נקבה | 48.6 | 1 | 67 | nan | nan | nan | False | True | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.8 | 9.1 | False | False | False | 63 | FAILURE | nan | FAILURE | 63 | nan | nan |\\n| 4 | 1117 | זכר | 67.3 | 1 | 125 | nan | nan | nan | True | False | False | False | nan | False | nan | nan | 29.4 | False | False | False | False | False | False | False | 8.3 | 10.8 | False | False | False | 22.9 | nan | nan | nan | 22.9 | nan | nan |'}, {'role': 'user', 'content': 'how many rows are there?'}]\n", - "chat_input: [{'role': 'system', 'content': '\\nYou are working with a pandas dataframe in Python. The name of the dataframe is `df`.\\nThis is the result of `print(df.head())`:\\n| | hashed_patient_id | gender | age_at_admission | admitting_department | hospital_length_of_stay | colonoscopy_indication_path | preparation_administered_dose | preparation_medication | cannot_be_scored_value_textual | failed_colonoscopy_all_row_data | dynamic_tables_path | diverticulosis_diagnosis | boston_total_value_numeric | prior_failed_colonoscopy_all_row_data | prior_failed_colonoscopy_age | prior_failed_colonoscopy_event_date_days_from_reference | bmi_numeric_result | constipation_diagnosis | obesity_diagnosis | diabetes_diagnosis | hypothyroidism_diagnosis | ibd_diagnosis | parkinson_diagnosis | dementia_diagnosis | calcium_numeric_result_average | calcium_numeric_result_max | tx_glp1_medication | tx_laxatives_medication | tx_opiates_medication | days_to_colonoscopy | all_failed_5_and_below | just_boston_5_and_below | all_failed_4_and_below | time_from_admission_to_procedure | time_from_preperation_to_procedure | time_from_consult_to_procedure |\\n|---:|--------------------:|:---------|-------------------:|-----------------------:|--------------------------:|:------------------------------|--------------------------------:|-------------------------:|:---------------------------------|:----------------------------------|:----------------------|:---------------------------|-----------------------------:|:----------------------------------------|-------------------------------:|----------------------------------------------------------:|---------------------:|:-------------------------|:--------------------|:---------------------|:---------------------------|:----------------|:----------------------|:---------------------|---------------------------------:|-----------------------------:|:---------------------|:--------------------------|:------------------------|----------------------:|:-------------------------|:--------------------------|:-------------------------|-----------------------------------:|-------------------------------------:|---------------------------------:|\\n| 0 | 1095 | נקבה | 61.6 | 1 | 31.9 | nan | nan | nan | True | False | False | False | 0 | False | nan | nan | nan | False | False | False | False | False | False | False | 8.1 | 10.6 | False | False | False | 30.7 | FAILURE | FAILURE | FAILURE | 30.7 | nan | nan |\\n| 1 | 356 | זכר | 49.2 | 1 | 99.3 | RECTAL_BLEEDING_14111_ | nan | nan | False | False | False | False | 3 | False | nan | nan | nan | False | False | False | False | False | False | False | 8 | 8.9 | False | False | False | 75.9 | FAILURE | FAILURE | FAILURE | 75.9 | nan | nan |\\n| 2 | 61 | נקבה | 74.3 | 1 | 15.8 | RECTAL_BLEEDING_14111_ | nan | nan | True | False | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.9 | 8.4 | False | False | False | 15.3 | nan | nan | nan | 15.3 | nan | nan |\\n| 3 | 120 | נקבה | 48.6 | 1 | 67 | nan | nan | nan | False | True | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.8 | 9.1 | False | False | False | 63 | FAILURE | nan | FAILURE | 63 | nan | nan |\\n| 4 | 1117 | זכר | 67.3 | 1 | 125 | nan | nan | nan | True | False | False | False | nan | False | nan | nan | 29.4 | False | False | False | False | False | False | False | 8.3 | 10.8 | False | False | False | 22.9 | nan | nan | nan | 22.9 | nan | nan |'}, {'role': 'user', 'content': 'how many rows are there?'}, {'role': 'assistant', 'content': None, 'function_call': {'arguments': '{\"query\":\"df.shape[0]\"}', 'name': 'python_repl_ast'}}, {'role': 'function', 'content': '1192', 'name': 'python_repl_ast'}]\n" - ] - }, - { - "data": { - "text/plain": [ - "AIMessage(content='Hello! How can I assist you today?', response_metadata={'token_usage': None, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-62b44986-0ef8-4517-8388-c63f183409a6-0')" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "llm = ChatLLMstudio('openai/gpt-3.5-turbo')\n", - "# llm = ChatLLMstudio('azure/Meta-Llama-3.1-405B-Instruct')\n", - "llm.invoke('Hello')" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "agent = create_pandas_dataframe_agent(\n", - " # ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\"),\n", - " llm,\n", - " # ChatLLMstudio('azure/Meta-Llama-3.1-405B-Instruct'),\n", - " df,\n", - " verbose=True,\n", - " agent_type=AgentType.OPENAI_FUNCTIONS,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3m\n", - "Invoking: `python_repl_ast` with `{'query': 'df.shape[0]'}`\n", - "\n", - "\n", - "\u001b[0m\u001b[36;1m\u001b[1;3m1192\u001b[0m\u001b[32;1m\u001b[1;3mThere are 1192 rows in the dataframe.\u001b[0m\n", - "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" - ] - }, - { - "data": { - "text/plain": [ - "{'input': 'how many rows are there?',\n", - " 'output': 'There are 1192 rows in the dataframe.'}" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "agent.invoke(\"how many rows are there?\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "chat_input: [{'role': 'system', 'content': '\\nYou are working with a pandas dataframe in Python. The name of the dataframe is `df`.\\nThis is the result of `print(df.head())`:\\n| | hashed_patient_id | gender | age_at_admission | admitting_department | hospital_length_of_stay | colonoscopy_indication_path | preparation_administered_dose | preparation_medication | cannot_be_scored_value_textual | failed_colonoscopy_all_row_data | dynamic_tables_path | diverticulosis_diagnosis | boston_total_value_numeric | prior_failed_colonoscopy_all_row_data | prior_failed_colonoscopy_age | prior_failed_colonoscopy_event_date_days_from_reference | bmi_numeric_result | constipation_diagnosis | obesity_diagnosis | diabetes_diagnosis | hypothyroidism_diagnosis | ibd_diagnosis | parkinson_diagnosis | dementia_diagnosis | calcium_numeric_result_average | calcium_numeric_result_max | tx_glp1_medication | tx_laxatives_medication | tx_opiates_medication | days_to_colonoscopy | all_failed_5_and_below | just_boston_5_and_below | all_failed_4_and_below | time_from_admission_to_procedure | time_from_preperation_to_procedure | time_from_consult_to_procedure |\\n|---:|--------------------:|:---------|-------------------:|-----------------------:|--------------------------:|:------------------------------|--------------------------------:|-------------------------:|:---------------------------------|:----------------------------------|:----------------------|:---------------------------|-----------------------------:|:----------------------------------------|-------------------------------:|----------------------------------------------------------:|---------------------:|:-------------------------|:--------------------|:---------------------|:---------------------------|:----------------|:----------------------|:---------------------|---------------------------------:|-----------------------------:|:---------------------|:--------------------------|:------------------------|----------------------:|:-------------------------|:--------------------------|:-------------------------|-----------------------------------:|-------------------------------------:|---------------------------------:|\\n| 0 | 1095 | נקבה | 61.6 | 1 | 31.9 | nan | nan | nan | True | False | False | False | 0 | False | nan | nan | nan | False | False | False | False | False | False | False | 8.1 | 10.6 | False | False | False | 30.7 | FAILURE | FAILURE | FAILURE | 30.7 | nan | nan |\\n| 1 | 356 | זכר | 49.2 | 1 | 99.3 | RECTAL_BLEEDING_14111_ | nan | nan | False | False | False | False | 3 | False | nan | nan | nan | False | False | False | False | False | False | False | 8 | 8.9 | False | False | False | 75.9 | FAILURE | FAILURE | FAILURE | 75.9 | nan | nan |\\n| 2 | 61 | נקבה | 74.3 | 1 | 15.8 | RECTAL_BLEEDING_14111_ | nan | nan | True | False | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.9 | 8.4 | False | False | False | 15.3 | nan | nan | nan | 15.3 | nan | nan |\\n| 3 | 120 | נקבה | 48.6 | 1 | 67 | nan | nan | nan | False | True | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.8 | 9.1 | False | False | False | 63 | FAILURE | nan | FAILURE | 63 | nan | nan |\\n| 4 | 1117 | זכר | 67.3 | 1 | 125 | nan | nan | nan | True | False | False | False | nan | False | nan | nan | 29.4 | False | False | False | False | False | False | False | 8.3 | 10.8 | False | False | False | 22.9 | nan | nan | nan | 22.9 | nan | nan |'}, {'role': 'user', 'content': 'how many rows are there?'}]\n", - "chat_input: [{'role': 'system', 'content': '\\nYou are working with a pandas dataframe in Python. The name of the dataframe is `df`.\\nThis is the result of `print(df.head())`:\\n| | hashed_patient_id | gender | age_at_admission | admitting_department | hospital_length_of_stay | colonoscopy_indication_path | preparation_administered_dose | preparation_medication | cannot_be_scored_value_textual | failed_colonoscopy_all_row_data | dynamic_tables_path | diverticulosis_diagnosis | boston_total_value_numeric | prior_failed_colonoscopy_all_row_data | prior_failed_colonoscopy_age | prior_failed_colonoscopy_event_date_days_from_reference | bmi_numeric_result | constipation_diagnosis | obesity_diagnosis | diabetes_diagnosis | hypothyroidism_diagnosis | ibd_diagnosis | parkinson_diagnosis | dementia_diagnosis | calcium_numeric_result_average | calcium_numeric_result_max | tx_glp1_medication | tx_laxatives_medication | tx_opiates_medication | days_to_colonoscopy | all_failed_5_and_below | just_boston_5_and_below | all_failed_4_and_below | time_from_admission_to_procedure | time_from_preperation_to_procedure | time_from_consult_to_procedure |\\n|---:|--------------------:|:---------|-------------------:|-----------------------:|--------------------------:|:------------------------------|--------------------------------:|-------------------------:|:---------------------------------|:----------------------------------|:----------------------|:---------------------------|-----------------------------:|:----------------------------------------|-------------------------------:|----------------------------------------------------------:|---------------------:|:-------------------------|:--------------------|:---------------------|:---------------------------|:----------------|:----------------------|:---------------------|---------------------------------:|-----------------------------:|:---------------------|:--------------------------|:------------------------|----------------------:|:-------------------------|:--------------------------|:-------------------------|-----------------------------------:|-------------------------------------:|---------------------------------:|\\n| 0 | 1095 | נקבה | 61.6 | 1 | 31.9 | nan | nan | nan | True | False | False | False | 0 | False | nan | nan | nan | False | False | False | False | False | False | False | 8.1 | 10.6 | False | False | False | 30.7 | FAILURE | FAILURE | FAILURE | 30.7 | nan | nan |\\n| 1 | 356 | זכר | 49.2 | 1 | 99.3 | RECTAL_BLEEDING_14111_ | nan | nan | False | False | False | False | 3 | False | nan | nan | nan | False | False | False | False | False | False | False | 8 | 8.9 | False | False | False | 75.9 | FAILURE | FAILURE | FAILURE | 75.9 | nan | nan |\\n| 2 | 61 | נקבה | 74.3 | 1 | 15.8 | RECTAL_BLEEDING_14111_ | nan | nan | True | False | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.9 | 8.4 | False | False | False | 15.3 | nan | nan | nan | 15.3 | nan | nan |\\n| 3 | 120 | נקבה | 48.6 | 1 | 67 | nan | nan | nan | False | True | False | False | nan | False | nan | nan | nan | False | False | False | False | False | False | False | 7.8 | 9.1 | False | False | False | 63 | FAILURE | nan | FAILURE | 63 | nan | nan |\\n| 4 | 1117 | זכר | 67.3 | 1 | 125 | nan | nan | nan | True | False | False | False | nan | False | nan | nan | 29.4 | False | False | False | False | False | False | False | 8.3 | 10.8 | False | False | False | 22.9 | nan | nan | nan | 22.9 | nan | nan |'}, {'role': 'user', 'content': 'how many rows are there?'}, {'role': 'assistant', 'content': None, 'function_call': {'arguments': '{\"query\":\"len(df)\"}', 'name': 'python_repl_ast'}}, {'role': 'function', 'content': '1192', 'name': 'python_repl_ast'}]" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "llmstudiodev", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/09_azure_llama_tests.ipynb b/examples/09_azure_llama_tests.ipynb deleted file mode 100644 index e69de29b..00000000 diff --git a/llmstudio/llm/langchain.py b/llmstudio/llm/langchain.py index a65cd922..e25610f3 100644 --- a/llmstudio/llm/langchain.py +++ b/llmstudio/llm/langchain.py @@ -22,7 +22,7 @@ def __init__(self, model_id: str, **kwargs): @property def _llm_type(self): return "LLMstudio" - + # @property # def model_id(self) -> str: # return self.model_id