diff --git a/helm/holmes/templates/holmes.yaml b/helm/holmes/templates/holmes.yaml index 431bbb04..4642854a 100644 --- a/helm/holmes/templates/holmes.yaml +++ b/helm/holmes/templates/holmes.yaml @@ -38,13 +38,26 @@ spec: - name: CERTIFICATE value: {{ .Values.certificate }} {{- end }} - {{ if .Values.openaiKey -}} + - name: HOLMES_LLM + value: {{ .Values.llm }} - name: OPENAI_API_KEY valueFrom: secretKeyRef: name: {{ .Values.secretName }} key: openaiKey - {{- end }} + optional: true + - name: AZURE_OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.secretName }} + key: azureOpenaiKey + optional: true + - name: AZURE_ENDPOINT + valueFrom: + secretKeyRef: + name: {{ .Values.secretName }} + key: azureEndpoint + optional: true {{- if .Values.additionalEnvVars -}} {{ toYaml .Values.additionalEnvVars | nindent 10 }} {{- end }} @@ -89,4 +102,6 @@ metadata: type: Opaque data: openaiKey: {{ .Values.openaiKey | b64enc | quote }} + azureEndpoint: {{ .Values.azureEndpoint | b64enc | quote }} + azureOpenaiKey: {{ .Values.azureOpenaiKey | b64enc | quote }} {{- end }} \ No newline at end of file diff --git a/helm/holmes/values.yaml b/helm/holmes/values.yaml index c77dfbde..2f1cad27 100644 --- a/helm/holmes/values.yaml +++ b/helm/holmes/values.yaml @@ -1,6 +1,9 @@ # Optional openai key and endpoint to create openai queries. +llm: "openai" openaiKey: "" +azureEndpoint: "" +azureOpenaiKey: "" certificate: "" # base64 encoded logLevel: INFO diff --git a/holmes/plugins/destinations/slack/plugin.py b/holmes/plugins/destinations/slack/plugin.py index a786581e..c7e8cea5 100644 --- a/holmes/plugins/destinations/slack/plugin.py +++ b/holmes/plugins/destinations/slack/plugin.py @@ -84,7 +84,7 @@ def __send_tool_usage(self, parent_thread, result: LLMResult) -> None: text = "*AI used info from alert and the following tools:*" for tool in result.tool_calls: - file_response = self.client.files_upload( + file_response = self.client.files_upload_v2( content=tool.result, title=f"{tool.description}" ) permalink = file_response["file"]["permalink"] @@ -107,7 +107,7 @@ def __send_prompt_for_debugging(self, parent_thread, result: LLMResult) -> None: return text = "*🐞 DEBUG: messages with OpenAI*" - file_response = self.client.files_upload( + file_response = self.client.files_upload_v2( content=result.prompt, title=f"ai-prompt" ) permalink = file_response["file"]["permalink"] @@ -131,7 +131,7 @@ def __send_issue_metadata(self, parent_thread, issue: Issue) -> None: filename = f"{issue.name}" issue_json = issue.model_dump_json() - file_response = self.client.files_upload(content=issue_json, title=filename) + file_response = self.client.files_upload_v2(content=issue_json, title=filename) permalink = file_response["file"]["permalink"] text = issue.presentation_all_metadata text += f"\n<{permalink}|{filename}>\n" diff --git a/server.py b/server.py index a107bb85..fc1c8822 100644 --- a/server.py +++ b/server.py @@ -12,7 +12,8 @@ import uvicorn import colorlog -from typing import List, Union +from holmes.core.tool_calling_llm import ToolCallResult +from typing import List, Union, Dict, Any, Optional from fastapi import FastAPI from pydantic import BaseModel @@ -25,18 +26,13 @@ from holmes.plugins.prompts import load_prompt -class InvestigateContext(BaseModel): - type: str - value: Union[str, dict] - - class InvestigateRequest(BaseModel): source: str # "prometheus" etc title: str description: str subject: dict - context: List[InvestigateContext] - source_instance_id: str + context: Dict[str, Any] + source_instance_id: str = "ApiRequest" include_tool_calls: bool = False include_tool_call_results: bool = False prompt_template: str = "builtin://generic_investigation.jinja2" @@ -68,49 +64,44 @@ def init_logging(): config = Config.load_from_env() +class InvestigationResult(BaseModel): + analysis: Optional[str] = None + tool_calls: List[ToolCallResult] = [] + + @app.post("/api/investigate") -def investigate_issues(request: InvestigateRequest): - context = fetch_context_data(request.context) - raw_data = request.model_dump() +def investigate_issues(investigate_request: InvestigateRequest): + context = fetch_context_data(investigate_request.context) + raw_data = investigate_request.model_dump() if context: raw_data["extra_context"] = context ai = config.create_issue_investigator(console, allowed_toolsets=ALLOWED_TOOLSETS) issue = Issue( id=context['id'] if context else "", - name=request.title, - source_type=request.source, - source_instance_id=request.source_instance_id, + name=investigate_request.title, + source_type=investigate_request.source, + source_instance_id=investigate_request.source_instance_id, raw=raw_data, ) investigation = ai.investigate( issue, - # TODO prompt should probably be configurable? - prompt=load_prompt(request.prompt), + prompt=load_prompt(investigate_request.prompt_template), console=console, ) - ret = { - "analysis": investigation.result - } - if request.include_tool_calls: - ret["tool_calls"] = [ - { - "tool_name": tool.tool_name, - "tool_call": tool.description, - } | ( - {"call_result": tool.result} if request.include_tool_call_results else {} - ) - for tool in investigation.tool_calls - ] - return ret - - -def fetch_context_data(context: List[InvestigateContext]) -> dict: - for context_item in context: - if context_item.type == "robusta_issue_id": + return InvestigationResult( + analysis=investigation.result, + tool_calls=investigation.tool_calls, + ) + + +def fetch_context_data(context: Dict[str, Any]) -> dict: + for context_item in context.keys(): + if context_item == "robusta_issue_id": # Note we only accept a single robusta_issue_id. I don't think it # makes sense to have several of them in the context structure. - return dal.get_issue_data(context_item.value) + return dal.get_issue_data(context[context_item]) + if __name__ == "__main__": uvicorn.run(app, host=HOLMES_HOST, port=HOLMES_PORT) \ No newline at end of file diff --git a/test-api.sh b/test-api.sh index 49d4cc58..73b0bbe2 100755 --- a/test-api.sh +++ b/test-api.sh @@ -15,12 +15,10 @@ curl -XPOST 127.0.0.1:8000/api/investigate -H "Content-Type: application/json" - }, \"annotations\": {} }, - \"context\": [ + \"context\": { - \"type\": \"robusta_issue_id\", - \"value\": \"5b3e2fb1-cb83-45ea-82ec-318c94718e44\" - } - ], + \"robusta_issue_id\": \"5b3e2fb1-cb83-45ea-82ec-318c94718e44\" + }, \"include_tool_calls\": true, \"include_tool_call_results\": true }"