Skip to content

Commit

Permalink
[fix] Added event tracking for server startup (#129)
Browse files Browse the repository at this point in the history
* Added event tracking for server startup

* pre-commit changes

* Added missing status_code for openai APIConnectionError

* Reverted start_server back to class declaration instead of initialization

* Added exception to error message
  • Loading branch information
gabrielrfg authored Sep 9, 2024
1 parent f2bce13 commit e110a46
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 12 deletions.
10 changes: 7 additions & 3 deletions llmstudio/engine/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import json
import os
from pathlib import Path
from threading import Event
from typing import Any, Dict, List, Optional, Union

import uvicorn
Expand Down Expand Up @@ -78,7 +79,9 @@ def _merge_configs(config1, config2):
raise RuntimeError(f"Error in configuration data: {e}")


def create_engine_app(config: EngineConfig = _load_engine_config()) -> FastAPI:
def create_engine_app(
started_event: Event, config: EngineConfig = _load_engine_config()
) -> FastAPI:
app = FastAPI(
title=ENGINE_TITLE,
description=ENGINE_DESCRIPTION,
Expand Down Expand Up @@ -162,14 +165,15 @@ async def export(request: Request):

@app.on_event("startup")
async def startup_event():
started_event.set()
print(f"Running LLMstudio Engine on http://{ENGINE_HOST}:{ENGINE_PORT} ")

return app


def run_engine_app():
def run_engine_app(started_event: Event):
try:
engine = create_engine_app()
engine = create_engine_app(started_event)
uvicorn.run(
engine,
host=ENGINE_HOST,
Expand Down
8 changes: 6 additions & 2 deletions llmstudio/engine/providers/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,13 +121,17 @@ async def generate_client(
**function_args,
**request.parameters.model_dump(),
}

# Perform the asynchronous call
return await asyncio.to_thread(
client.chat.completions.create, **combined_args
)

except openai._exceptions.APIError as e:
except openai._exceptions.APIConnectionError as e:
raise HTTPException(
status_code=404, detail=f"There was an error reaching the endpoint: {e}"
)

except openai._exceptions.APIStatusError as e:
raise HTTPException(status_code=e.status_code, detail=e.response.json())

def prepare_messages(self, request: AzureRequest):
Expand Down
6 changes: 4 additions & 2 deletions llmstudio/server.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import threading
from threading import Event

import requests

Expand Down Expand Up @@ -29,8 +30,10 @@ def is_server_running(host, port, path="/health"):

def start_server_component(host, port, run_func, server_name):
if not is_server_running(host, port):
thread = threading.Thread(target=run_func, daemon=True)
started_event = Event()
thread = threading.Thread(target=run_func, daemon=True, args=(started_event,))
thread.start()
started_event.wait() # wait for startup, this assumes the event is set somewhere
return thread
else:
print(f"{server_name} server already running on {host}:{port}")
Expand All @@ -53,7 +56,6 @@ def setup_servers(engine, tracking, ui):
TRACKING_HOST, TRACKING_PORT, run_tracking_app, "Tracking"
)

ui_thread = None
if ui:
ui_thread = start_server_component(UI_HOST, UI_PORT, run_ui_app, "UI")

Expand Down
9 changes: 6 additions & 3 deletions llmstudio/tracking/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from threading import Event

import uvicorn
from fastapi import APIRouter, FastAPI
from fastapi.middleware.cors import CORSMiddleware
Expand All @@ -15,7 +17,7 @@


## Tracking
def create_tracking_app() -> FastAPI:
def create_tracking_app(started_event: Event) -> FastAPI:
app = FastAPI(
title=TRACKING_TITLE,
description=TRACKING_DESCRIPTION,
Expand Down Expand Up @@ -43,14 +45,15 @@ def health_check():

@app.on_event("startup")
async def startup_event():
started_event.set()
print(f"Running LLMstudio Tracking on http://{TRACKING_HOST}:{TRACKING_PORT} ")

return app


def run_tracking_app():
def run_tracking_app(started_event: Event):
try:
tracking = create_tracking_app()
tracking = create_tracking_app(started_event)
uvicorn.run(
tracking,
host=TRACKING_HOST,
Expand Down
5 changes: 3 additions & 2 deletions llmstudio/ui/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import subprocess
from pathlib import Path
import threading
import webbrowser
from threading import Event

from llmstudio.config import UI_PORT

Expand All @@ -20,6 +20,7 @@ def run_bun_in_thread():
print(f"Error running LLMstudio UI: {e}")


def run_ui_app():
def run_ui_app(started_event: Event):
thread = threading.Thread(target=run_bun_in_thread)
thread.start()
started_event.set() #just here for compatibility

0 comments on commit e110a46

Please sign in to comment.