Skip to content

Commit

Permalink
fix(proxy_cli.py): only run prisma db push if prisma in environment
Browse files Browse the repository at this point in the history
krrishdholakia committed Jan 13, 2024
1 parent 8a7a745 commit 3d7c169
Showing 3 changed files with 67 additions and 50 deletions.
106 changes: 62 additions & 44 deletions litellm/proxy/proxy_cli.py
Original file line number Diff line number Diff line change
@@ -343,6 +343,7 @@ def _make_openai_completion():
)
try:
import uvicorn

if os.name == "nt":
pass
else:
@@ -384,64 +385,79 @@ def _make_openai_completion():
os.environ["DATABASE_URL"] = database_url

if os.getenv("DATABASE_URL", None) is not None:
# run prisma db push, before starting server
# Save the current working directory
original_dir = os.getcwd()
# set the working directory to where this script is
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
try:
subprocess.run(
["prisma", "db", "push", "--accept-data-loss"]
) # this looks like a weird edge case when prisma just wont start on render. we need to have the --accept-data-loss
finally:
os.chdir(original_dir)
subprocess.run(["prisma"], capture_output=True)
is_prisma_runnable = True
except FileNotFoundError:
is_prisma_runnable = False

if is_prisma_runnable:
# run prisma db push, before starting server
# Save the current working directory
original_dir = os.getcwd()
# set the working directory to where this script is
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
try:
subprocess.run(
["prisma", "db", "push", "--accept-data-loss"]
) # this looks like a weird edge case when prisma just wont start on render. we need to have the --accept-data-loss
finally:
os.chdir(original_dir)
else:
print(
f"Unable to connect to DB. DATABASE_URL found in environment, but prisma package not found."
)
if port == 8000 and is_port_in_use(port):
port = random.randint(1024, 49152)
_endpoint_str = f"curl --location 'http://0.0.0.0:{port}/chat/completions' \\"
curl_command = (
_endpoint_str
+ """
--header 'Content-Type: application/json' \\
--data ' {
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": "what llm are you"
}
]
}'
\n
"""
)
print() # noqa
print( # noqa
f'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
)
print( # noqa
f"\033[1;34mLiteLLM: Curl Command Test for your local proxy\n {curl_command} \033[0m\n"
)
print(
"\033[1;34mDocs: https://docs.litellm.ai/docs/simple_proxy\033[0m\n"
) # noqa
print( # noqa
f"\033[1;34mSee all Router/Swagger docs on http://0.0.0.0:{port} \033[0m\n"
) # noqa

from litellm.proxy.proxy_server import app

if os.name == "nt":
uvicorn.run(app, host=host, port=port) # run uvicorn
else:
import gunicorn.app.base

# Gunicorn Application Class
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {} # gunicorn options
self.application = app # FastAPI app
super().__init__()

_endpoint_str = (
f"curl --location 'http://0.0.0.0:{port}/chat/completions' \\"
)
curl_command = (
_endpoint_str
+ """
--header 'Content-Type: application/json' \\
--data ' {
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": "what llm are you"
}
]
}'
\n
"""
)
print() # noqa
print( # noqa
f'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
)
print( # noqa
f"\033[1;34mLiteLLM: Curl Command Test for your local proxy\n {curl_command} \033[0m\n"
)
print(
"\033[1;34mDocs: https://docs.litellm.ai/docs/simple_proxy\033[0m\n"
) # noqa
print( # noqa
f"\033[1;34mSee all Router/Swagger docs on http://0.0.0.0:{port} \033[0m\n"
) # noqa

def load_config(self):
# note: This Loads the gunicorn config - has nothing to do with LiteLLM Proxy config
config = {
@@ -462,7 +478,9 @@ def load(self):
"worker_class": "uvicorn.workers.UvicornWorker",
"preload": True, # Add the preload flag
}
StandaloneApplication(app=app, options=gunicorn_options).run() # Run gunicorn
StandaloneApplication(
app=app, options=gunicorn_options
).run() # Run gunicorn


if __name__ == "__main__":
5 changes: 0 additions & 5 deletions litellm/proxy/proxy_server.py
Original file line number Diff line number Diff line change
@@ -1210,11 +1210,6 @@ async def startup_event():
# check if master key set in environment - load from there
master_key = litellm.get_secret("LITELLM_MASTER_KEY", None)

### CONNECT TO DB ###
# check if DATABASE_URL in environment - load from there
if prisma_client is None:
prisma_setup(database_url=os.getenv("DATABASE_URL"))

### LOAD CONFIG ###
worker_config = litellm.get_secret("WORKER_CONFIG")
verbose_proxy_logger.debug(f"worker_config: {worker_config}")
6 changes: 5 additions & 1 deletion litellm/proxy/utils.py
Original file line number Diff line number Diff line change
@@ -259,7 +259,7 @@ def __init__(self, database_url: str, proxy_logging_obj: ProxyLogging):
self.proxy_logging_obj = proxy_logging_obj
try:
from prisma import Prisma # type: ignore
except:
except Exception as e:
os.environ["DATABASE_URL"] = database_url
# Save the current working directory
original_dir = os.getcwd()
@@ -273,6 +273,10 @@ def __init__(self, database_url: str, proxy_logging_obj: ProxyLogging):
subprocess.run(
["prisma", "db", "push", "--accept-data-loss"]
) # this looks like a weird edge case when prisma just wont start on render. we need to have the --accept-data-loss
except:
raise Exception(
f"Unable to run prisma commands. Run `pip install prisma`"
)
finally:
os.chdir(original_dir)
# Now you can import the Prisma Client

0 comments on commit 3d7c169

Please sign in to comment.