Skip to content

Commit

Permalink
Merge pull request #216 from tisnik/better-error-message
Browse files Browse the repository at this point in the history
Better error message
  • Loading branch information
tisnik authored Dec 16, 2024
2 parents e963185 + 7d9d197 commit 5cf2cc1
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 13 deletions.
6 changes: 3 additions & 3 deletions ols/app/endpoints/feedback.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def store_feedback(user_id: str, feedback: dict) -> None:
with open(feedback_file_path, "w", encoding="utf-8") as feedback_file:
json.dump(data_to_store, feedback_file)

logger.debug(f"feedback stored in '{feedback_file_path}'")
logger.debug("feedback stored in '%s'", feedback_file_path)


@router.get("/status")
Expand Down Expand Up @@ -126,13 +126,13 @@ def store_user_feedback(
Returns:
Response indicating the status of the feedback storage request.
"""
logger.debug(f"feedback received {feedback_request}")
logger.debug("feedback received %s", str(feedback_request))

user_id = retrieve_user_id(auth)
try:
store_feedback(user_id, feedback_request.model_dump(exclude={"model_config"}))
except Exception as e:
logger.error(f"Error storing user feedback: {e}")
logger.error("Error storing user feedback: %s", e)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={
Expand Down
6 changes: 4 additions & 2 deletions ols/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,10 @@ async def stream_response_body(
) -> AsyncGenerator[bytes, None]:
async for chunk in response_body:
logger.debug(
f"Response to {host}:{port} "
f"Body chunk: {chunk.decode('utf-8', errors='ignore')}"
"Response to %s:%d Body chunk: %s}",
host,
port,
chunk.decode("utf-8", errors="ignore"),
)
yield chunk

Expand Down
6 changes: 3 additions & 3 deletions ols/src/cache/postgres_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __init__(self, config: PostgresConfig) -> None:
self.initialize_cache()
except Exception as e:
self.conn.close()
logger.exception(f"Error initializing Postgres cache:\n{e}")
logger.exception("Error initializing Postgres cache:\n%s", e)
raise
self.capacity = config.max_entries

Expand Down Expand Up @@ -122,7 +122,7 @@ def get(self, user_id: str, conversation_id: str) -> list[CacheEntry]:
return []
return [CacheEntry.from_dict(cache_entry) for cache_entry in value]
except psycopg2.DatabaseError as e:
logger.error(f"PostgresCache.get {e}")
logger.error("PostgresCache.get %s", e)
raise CacheError("PostgresCache.get", e) from e

def insert_or_append(
Expand Down Expand Up @@ -161,7 +161,7 @@ def insert_or_append(
PostgresCache._cleanup(cursor, self.capacity)
# commit is implicit at this point
except psycopg2.DatabaseError as e:
logger.error(f"PostgresCache.insert_or_append {e}")
logger.error("PostgresCache.insert_or_append: %s", e)
raise CacheError("PostgresCache.insert_or_append", e) from e

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion ols/src/llms/providers/azure_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def retrieve_access_token(
)
return credential.get_token("https://cognitiveservices.azure.com/.default")
except Exception as e:
logger.error(f"Error retrieving access token: {e}")
logger.error("Error retrieving access token: %s", e)
return None


Expand Down
2 changes: 1 addition & 1 deletion ols/src/llms/providers/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def register(cls, provider_type: str, llm_provider: Callable) -> None:
f"LLMProvider subclass required, got '{type(llm_provider)}'"
)
cls.llm_providers[provider_type] = llm_provider
logger.debug(f"LLM provider '{provider_type}' registered")
logger.debug("LLM provider '%s' registered", provider_type)


def register_llm_provider_as(provider_type: str) -> Callable:
Expand Down
4 changes: 2 additions & 2 deletions ols/src/query_helpers/docs_summarizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def summarize(
f"model: {self.model}, "
f"verbose: {verbose}"
)
logger.debug(f"{conversation_id} call settings: {settings_string}")
logger.debug("%s call settings: %s", conversation_id, settings_string)

token_handler = TokenHandler()
bare_llm = self.llm_loader(self.provider, self.model, self.generic_llm_params)
Expand Down Expand Up @@ -140,7 +140,7 @@ def summarize(

if len(rag_context) == 0:
logger.debug("Using llm to answer the query without reference content")
logger.debug(f"{conversation_id} Summary response: {response}")
logger.debug("%s Summary response: %s", conversation_id, response)

return SummarizerResponse(response, rag_chunks, truncated)

Expand Down
4 changes: 3 additions & 1 deletion ols/user_data_collection/data_collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,9 @@ def upload_data_to_ingress(tarball: io.BytesIO) -> requests.Response:
)

if response.status_code != requests.codes.accepted:
logger.error(f"posting payload failed, full response: {response}")
logger.error(
f"posting payload failed, response: {response.status_code}: {response.text}"
)
raise requests.exceptions.HTTPError(
f"data upload failed with response code: {response.status_code}"
)
Expand Down

0 comments on commit 5cf2cc1

Please sign in to comment.