Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Run python autoformatter on entire Python libraries #770

Merged
merged 1 commit into from
Jan 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 5 additions & 9 deletions cookbooks/Basic-Prompt-Routing/assistant_app.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import asyncio
import os

# Create ~/.env file with this line: export OPENAI_API_KEY=<your key here>
# You can get your key from https://platform.openai.com/api-keys
import dotenv
import openai
import streamlit as st

from aiconfig import AIConfigRuntime

# Create ~/.env file with this line: export OPENAI_API_KEY=<your key here>
# You can get your key from https://platform.openai.com/api-keys
import dotenv
dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")

Expand All @@ -32,12 +32,8 @@ async def assistant_response(prompt):

# Streamlit Setup
st.title("AI Teaching Assistant")
st.markdown(
"Ask a math, physics, or general question. Based on your question, an AI math prof, physics prof, or general assistant will respond."
)
st.markdown(
"**This is a simple demo of prompt routing - based on your question, an LLM decides which AI teacher responds.**"
)
st.markdown("Ask a math, physics, or general question. Based on your question, an AI math prof, physics prof, or general assistant will respond.")
st.markdown("**This is a simple demo of prompt routing - based on your question, an LLM decides which AI teacher responds.**")

# Chat setup
if "messages" not in st.session_state:
Expand Down
20 changes: 5 additions & 15 deletions cookbooks/Cli-Mate/cli-mate.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,19 +53,13 @@ async def query(aiconfig_path: str, question: str) -> list[ExecuteResult]:
return result


async def get_mod_result(
aiconfig_path: str, source_code: str, question: str
) -> list[ExecuteResult]:
question_about_code = (
f"QUERY ABOUT SOURCE CODE:\n{question}\nSOURCE CODE:\n```{source_code}\n```"
)
async def get_mod_result(aiconfig_path: str, source_code: str, question: str) -> list[ExecuteResult]:
question_about_code = f"QUERY ABOUT SOURCE CODE:\n{question}\nSOURCE CODE:\n```{source_code}\n```"

return await query(aiconfig_path, question_about_code)


async def mod_code(
aiconfig_path: str, source_code_file: str, question: str, update_file: bool = False
):
async def mod_code(aiconfig_path: str, source_code_file: str, question: str, update_file: bool = False):
# read source code from file
with open(source_code_file, "r", encoding="utf8") as file:
source_code = file.read()
Expand Down Expand Up @@ -99,9 +93,7 @@ def signal_handler(_: int, __: FrameType | None):
i = 0
while True:
try:
user_input = await event_loop.run_in_executor(
None, session.prompt, "Query: [ctrl-D to exit] "
)
user_input = await event_loop.run_in_executor(None, session.prompt, "Query: [ctrl-D to exit] ")
except KeyboardInterrupt:
continue
except EOFError:
Expand Down Expand Up @@ -152,9 +144,7 @@ async def main():
subparsers = parser.add_subparsers(dest="command")

loop_parser = subparsers.add_parser("loop")
loop_parser.add_argument(
"-scf", "--source-code-file", help="Specify a source code file."
)
loop_parser.add_argument("-scf", "--source-code-file", help="Specify a source code file.")

args = parser.parse_args()

Expand Down
26 changes: 5 additions & 21 deletions cookbooks/HuggingFace/hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,7 @@

# HuggingFace API imports
from huggingface_hub import InferenceClient
from huggingface_hub.inference._text_generation import (
TextGenerationResponse,
TextGenerationStreamResponse,
)
from huggingface_hub.inference._text_generation import TextGenerationResponse, TextGenerationStreamResponse

# ModelParser Utils
# Type hint imports
Expand Down Expand Up @@ -166,14 +163,7 @@ def id(self) -> str:
"""
return "HuggingFaceTextParser"

def serialize(
self,
prompt_name: str,
data: Any,
ai_config: "AIConfigRuntime",
parameters: Optional[Dict] = None,
**kwargs
) -> List[Prompt]:
def serialize(self, prompt_name: str, data: Any, ai_config: "AIConfigRuntime", parameters: Optional[Dict] = None, **kwargs) -> List[Prompt]:
"""
Defines how a prompt and model inference settings get serialized in the .aiconfig.

Expand All @@ -196,9 +186,7 @@ def serialize(
prompt = Prompt(
name=prompt_name,
input=prompt_input,
metadata=PromptMetadata(
model=model_metadata, parameters=parameters, **kwargs
),
metadata=PromptMetadata(model=model_metadata, parameters=parameters, **kwargs),
)
return [prompt]

Expand Down Expand Up @@ -230,9 +218,7 @@ async def deserialize(

return completion_data

async def run_inference(
self, prompt: Prompt, aiconfig, options, parameters
) -> List[Output]:
async def run_inference(self, prompt: Prompt, aiconfig, options, parameters) -> List[Output]:
"""
Invoked to run a prompt in the .aiconfig. This method should perform
the actual model inference based on the provided prompt and inference settings.
Expand All @@ -247,9 +233,7 @@ async def run_inference(
completion_data = await self.deserialize(prompt, aiconfig, options, parameters)

# if stream enabled in runtime options and config, then stream. Otherwise don't stream.
stream = (options.stream if options else False) and (
not "stream" in completion_data or completion_data.get("stream") != False
)
stream = (options.stream if options else False) and (not "stream" in completion_data or completion_data.get("stream") != False)

response = self.client.text_generation(**completion_data)
response_is_detailed = completion_data.get("details", False)
Expand Down
26 changes: 5 additions & 21 deletions cookbooks/HuggingFace/python/hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,7 @@

# HuggingFace API imports
from huggingface_hub import InferenceClient
from huggingface_hub.inference._text_generation import (
TextGenerationResponse,
TextGenerationStreamResponse,
)
from huggingface_hub.inference._text_generation import TextGenerationResponse, TextGenerationStreamResponse

# ModelParser Utils
# Type hint imports
Expand Down Expand Up @@ -166,14 +163,7 @@ def id(self) -> str:
"""
return "HuggingFaceTextParser"

def serialize(
self,
prompt_name: str,
data: Any,
ai_config: "AIConfigRuntime",
parameters: Optional[Dict] = None,
**kwargs
) -> List[Prompt]:
def serialize(self, prompt_name: str, data: Any, ai_config: "AIConfigRuntime", parameters: Optional[Dict] = None, **kwargs) -> List[Prompt]:
"""
Defines how a prompt and model inference settings get serialized in the .aiconfig.

Expand All @@ -196,9 +186,7 @@ def serialize(
prompt = Prompt(
name=prompt_name,
input=prompt_input,
metadata=PromptMetadata(
model=model_metadata, parameters=parameters, **kwargs
),
metadata=PromptMetadata(model=model_metadata, parameters=parameters, **kwargs),
)
return [prompt]

Expand Down Expand Up @@ -230,9 +218,7 @@ async def deserialize(

return completion_data

async def run_inference(
self, prompt: Prompt, aiconfig, options, parameters
) -> List[Output]:
async def run_inference(self, prompt: Prompt, aiconfig, options, parameters) -> List[Output]:
"""
Invoked to run a prompt in the .aiconfig. This method should perform
the actual model inference based on the provided prompt and inference settings.
Expand All @@ -247,9 +233,7 @@ async def run_inference(
completion_data = await self.deserialize(prompt, aiconfig, options, parameters)

# if stream enabled in runtime options and config, then stream. Otherwise don't stream.
stream = (options.stream if options else False) and (
not "stream" in completion_data or completion_data.get("stream") != False
)
stream = (options.stream if options else False) and (not "stream" in completion_data or completion_data.get("stream") != False)

response = self.client.text_generation(**completion_data)
response_is_detailed = completion_data.get("details", False)
Expand Down
10 changes: 6 additions & 4 deletions cookbooks/Wizard-GPT/wizard-gpt.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import asyncio
import os

from aiconfig import AIConfigRuntime, InferenceOptions, Prompt
import dotenv

# Create ~/.env file with this line: export OPENAI_API_KEY=<your key here>
# You can get your key from https://platform.openai.com/api-keys
# You can get your key from https://platform.openai.com/api-keys
import openai
import dotenv
import os

from aiconfig import AIConfigRuntime, InferenceOptions, Prompt

dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")

Expand Down
35 changes: 8 additions & 27 deletions python/src/aiconfig/ChatCompletion.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,7 @@ def validate_and_add_prompts_to_config(prompts: List[Prompt], aiconfig) -> None:
in_config = False
for config_prompt in aiconfig.prompts:
# check for duplicates (same input and settings.)
if (
config_prompt.input == new_prompt.input
and new_prompt.metadata == config_prompt.metadata
):
if config_prompt.input == new_prompt.input and new_prompt.metadata == config_prompt.metadata:
in_config = True
# update outputs if different
if config_prompt.outputs != new_prompt.outputs:
Expand All @@ -58,9 +55,7 @@ def extract_outputs_from_response(response) -> List[Output]:

response = response.model_dump(exclude_none=True)

response_without_choices = {
key: copy.deepcopy(value) for key, value in response.items() if key != "choices"
}
response_without_choices = {key: copy.deepcopy(value) for key, value in response.items() if key != "choices"}
for i, choice in enumerate(response.get("choices")):
response_without_choices.update({"finish_reason": choice.get("finish_reason")})
output = ExecuteResult(
Expand All @@ -77,7 +72,7 @@ def extract_outputs_from_response(response) -> List[Output]:


def async_run_serialize_helper(
aiconfig: AIConfigRuntime,
aiconfig: AIConfigRuntime,
request_kwargs: Dict,
) -> List[Prompt]:
"""
Expand All @@ -88,9 +83,7 @@ def async_run_serialize_helper(
serialized_prompts = None

async def run_and_await_serialize():
result = await aiconfig.serialize(
request_kwargs.get("model"), request_kwargs, "prompt"
)
result = await aiconfig.serialize(request_kwargs.get("model"), request_kwargs, "prompt")
return result

# serialize prompts from ChatCompletion kwargs
Expand Down Expand Up @@ -125,17 +118,9 @@ def _get_aiconfig_runtime(output_aiconfig_path: str) -> AIConfigRuntime:
except IOError:
return AIConfigRuntime.create(**(aiconfig_settings or {}))

output_aiconfig = (
output_aiconfig_ref
if isinstance(output_aiconfig_ref, AIConfigRuntime)
else _get_aiconfig_runtime(output_aiconfig_ref)
)
output_aiconfig = output_aiconfig_ref if isinstance(output_aiconfig_ref, AIConfigRuntime) else _get_aiconfig_runtime(output_aiconfig_ref)

output_config_file_path = (
output_aiconfig_ref
if isinstance(output_aiconfig_ref, str)
else output_aiconfig_ref.file_path
)
output_config_file_path = output_aiconfig_ref if isinstance(output_aiconfig_ref, str) else output_aiconfig_ref.file_path

# TODO: openai makes it hard to statically annotate.
def _create_chat_completion_with_config_saving(*args, **kwargs) -> Any: # type: ignore
Expand All @@ -147,9 +132,7 @@ def _create_chat_completion_with_config_saving(*args, **kwargs) -> Any: # type:
outputs = []

# Check if response is a stream
stream = kwargs.get("stream", False) is True and isinstance(
response, openai.Stream
)
stream = kwargs.get("stream", False) is True and isinstance(response, openai.Stream)

# Convert Response to output for last prompt
if not stream:
Expand Down Expand Up @@ -189,9 +172,7 @@ def generate_streamed_response() -> Generator[Any, None, None]:
)
stream_outputs[index] = output
yield chunk
stream_outputs = [
stream_outputs[i] for i in sorted(list(stream_outputs.keys()))
]
stream_outputs = [stream_outputs[i] for i in sorted(list(stream_outputs.keys()))]

# Add outputs to last prompt
serialized_prompts[-1].outputs = stream_outputs
Expand Down
8 changes: 3 additions & 5 deletions python/src/aiconfig/Config.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,20 @@
import json
import os
import yaml
from typing import Any, Dict, List, Literal, Optional, Tuple

import requests
import yaml
from aiconfig.callback import CallbackEvent, CallbackManager
from aiconfig.default_parsers.anyscale_endpoint import DefaultAnyscaleEndpointParser
from aiconfig.default_parsers.openai import DefaultOpenAIParser
from aiconfig.default_parsers.palm import PaLMChatParser, PaLMTextParser
from aiconfig.model_parser import InferenceOptions, ModelParser

from aiconfig.schema import JSONObject

from .default_parsers.dalle import DalleImageGenerationParser
from .default_parsers.hf import HuggingFaceTextGenerationParser
from .registry import (
ModelParserRegistry,
update_model_parser_registry_with_config_runtime,
)
from .registry import ModelParserRegistry, update_model_parser_registry_with_config_runtime
from .schema import AIConfig, Prompt
from .util.config_utils import is_yaml_ext

Expand Down
20 changes: 3 additions & 17 deletions python/src/aiconfig/callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,7 @@
import asyncio
import logging
import time
from typing import (
Any,
Awaitable,
Callable,
Coroutine,
Final,
List,
Sequence,
TypeAlias,
Union,
)
from typing import Any, Awaitable, Callable, Coroutine, Final, List, Sequence, TypeAlias, Union

from pydantic import BaseModel, ConfigDict

Expand Down Expand Up @@ -43,9 +33,7 @@ def __init__(self, name: str, file: str, data: Any, ts_ns: int = time.time_ns())
Result: TypeAlias = Union[Ok[Any], Err[Any]]


async def execute_coroutine_with_timeout(
coroutine: Coroutine[Any, Any, Any], timeout: int
) -> Result:
async def execute_coroutine_with_timeout(coroutine: Coroutine[Any, Any, Any], timeout: int) -> Result:
"""
Execute a coroutine with a timeout, return an Ok result or an Err on Exception

Expand Down Expand Up @@ -123,9 +111,7 @@ def setup_logger():
name = "my-logger"
log_file = "aiconfig.log"

formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)

Expand Down
2 changes: 1 addition & 1 deletion python/src/aiconfig/editor/server/server.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging
from typing import Any, Dict, Type, Union
import webbrowser
from typing import Any, Dict, Type, Union

import lastmile_utils.lib.core.api as core_utils
import result
Expand Down
Loading