Skip to content

Commit

Permalink
Merge branch 'master' into add_promql_capability
Browse files Browse the repository at this point in the history
  • Loading branch information
nherment authored Feb 3, 2025
2 parents e943f46 + bc4c0ac commit 9c7451a
Show file tree
Hide file tree
Showing 139 changed files with 1,948 additions and 1,153 deletions.
2 changes: 1 addition & 1 deletion .gitattributes
Original file line number Diff line number Diff line change
@@ -1 +1 @@
holmes/.git_archival.json export-subst
holmes/.git_archival.json export-subst
10 changes: 5 additions & 5 deletions .github/workflows/build-binaries-and-brew.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: '3.11'

- name: Install dependencies
if: matrix.os != 'windows-latest'
run: |
Expand All @@ -43,7 +43,7 @@ jobs:
if: matrix.os == 'ubuntu-20.04'
run: |
sudo apt-get install -y binutils
- name: Update package version (Linux)
if: matrix.os == 'ubuntu-20.04'
run: sed -i 's/__version__ = .*/__version__ = "${{ github.ref_name }}"/g' holmes/__init__.py
Expand All @@ -67,7 +67,7 @@ jobs:
# regarding the tiktoken part of the command, see https://github.com/openai/tiktoken/issues/80
# regarding the litellm part of the command, see https://github.com/pyinstaller/pyinstaller/issues/8620#issuecomment-2186540504
run: |
pyinstaller holmes.py --add-data 'holmes/plugins/runbooks/*:holmes/plugins/runbooks' --add-data 'holmes/plugins/prompts/*:holmes/plugins/prompts' --add-data 'holmes/plugins/toolsets/*:holmes/plugins/toolsets' --hidden-import=tiktoken_ext.openai_public --hidden-import=tiktoken_ext --hiddenimport litellm.llms.tokenizers --hiddenimport litellm.litellm_core_utils.tokenizers --collect-data litellm
pyinstaller holmes.py --add-data 'holmes/plugins/runbooks/*:holmes/plugins/runbooks' --add-data 'holmes/plugins/prompts/*:holmes/plugins/prompts' --add-data 'holmes/plugins/toolsets/*:holmes/plugins/toolsets' --hidden-import=tiktoken_ext.openai_public --hidden-import=tiktoken_ext --hiddenimport litellm.llms.tokenizers --hiddenimport litellm.litellm_core_utils.tokenizers --collect-data litellm
ls dist
- name: Zip the application (Unix)
Expand All @@ -91,7 +91,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./holmes-${{ matrix.os }}-${{ github.ref_name }}.zip
asset_name: holmes-${{ matrix.os }}-${{ github.ref_name }}.zip
asset_content_type: application/octet-stream
Expand All @@ -105,7 +105,7 @@ jobs:
check-latest:
needs: build
runs-on: ubuntu-20.04
outputs:
outputs:
IS_LATEST: ${{ steps.check-latest.outputs.release == github.ref_name }}
steps:
- id: check-latest
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/build-docker-images.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ jobs:
# Note: this ignores the "Set as latest release" checkbox in the GitHub UI
# it isn't possible to check whether that was set or not
# so if you do not want to override the "latest" tag, you should mark the release as a prerelease or a draft
# for prereleases and drafts we don't tag latest
# for prereleases and drafts we don't tag latest
- name: Tag and push Docker image as latest if applicable
if: ${{ github.event.release.prerelease == false && github.event.release.draft == false }}
run: |
Expand Down
14 changes: 13 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,16 @@ repos:
- id: poetry-lock
pass_filenames: false
args:
- --no-update
- --no-update
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.2
hooks:
- id: ruff
entry: ruff check --fix
- id: ruff-format
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: detect-private-key
- id: end-of-file-fixer
- id: trailing-whitespace
4 changes: 2 additions & 2 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ Please make sure to read and observe our [Code of Conduct](https://github.com/ro

## Reporting bugs

We encourage those interested to contribute code and also appreciate when issues are reported.
We encourage those interested to contribute code and also appreciate when issues are reported.

- Create a new issue and label is as `bug`
- Clearly state how to reproduce the bug:
- Which LLM you've used
- Which steps are required to reproduce
- As LLMs answers may differ between runs - Does it always reproduce, or occasionally?


## Contributing Code

- Fork the repository and clone it locally.
Expand Down
25 changes: 18 additions & 7 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -26,22 +26,34 @@ ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key -o Release.key

# Set the architecture-specific kube lineage URLs
ARG ARM_URL=https://github.com/Avi-Robusta/kube-lineage/releases/download/v2.2.1/kube-lineage-macos-latest-v2.2.1
ARG AMD_URL=https://github.com/Avi-Robusta/kube-lineage/releases/download/v2.2.1/kube-lineage-ubuntu-latest-v2.2.1
ARG KUBE_LINEAGE_ARM_URL=https://github.com/Avi-Robusta/kube-lineage/releases/download/v2.2.1/kube-lineage-macos-latest-v2.2.1
ARG KUBE_LINEAGE_AMD_URL=https://github.com/Avi-Robusta/kube-lineage/releases/download/v2.2.1/kube-lineage-ubuntu-latest-v2.2.1
# Define a build argument to identify the platform
ARG TARGETPLATFORM
# Conditional download based on the platform
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
curl -L -o kube-lineage $ARM_URL; \
curl -L -o kube-lineage $KUBE_LINEAGE_ARM_URL; \
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
curl -L -o kube-lineage $AMD_URL; \
curl -L -o kube-lineage $KUBE_LINEAGE_AMD_URL; \
else \
echo "Unsupported platform: $TARGETPLATFORM"; exit 1; \
fi
RUN chmod 777 kube-lineage
RUN ./kube-lineage --version

RUN curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64
# Set the architecture-specific argocd URLs
ARG ARGOCD_ARM_URL=https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-arm64
ARG ARGOCD_AMD_URL=https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64
# Conditional download based on the platform
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
curl -L -o argocd $ARGOCD_ARM_URL; \
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
curl -L -o argocd $ARGOCD_AMD_URL; \
else \
echo "Unsupported platform: $TARGETPLATFORM"; exit 1; \
fi
RUN chmod 777 argocd
RUN ./argocd --help

# Install Helm
RUN curl https://baltocdn.com/helm/signing.asc | gpg --dearmor -o /usr/share/keyrings/helm.gpg \
Expand Down Expand Up @@ -100,8 +112,7 @@ COPY --from=builder /app/kube-lineage /usr/local/bin
RUN kube-lineage --version

# Set up ArgoCD
COPY --from=builder /app/argocd-linux-amd64 /usr/local/bin/argocd
RUN chmod 555 /usr/local/bin/argocd
COPY --from=builder /app/argocd /usr/local/bin/argocd
RUN argocd --help

# Set up Helm
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile.dev
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ ARG PRIVATE_PACKAGE_REGISTRY="none"
RUN if [ "${PRIVATE_PACKAGE_REGISTRY}" != "none" ]; then \
pip config set global.index-url "${PRIVATE_PACKAGE_REGISTRY}"; \
fi \
&& pip install poetry
&& pip install poetry
ARG POETRY_REQUESTS_TIMEOUT
RUN poetry config virtualenvs.create false
COPY pyproject.toml poetry.lock /app/
Expand Down
4 changes: 4 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@


check:
poetry run pre-commit run -a
34 changes: 16 additions & 18 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -631,31 +631,31 @@ Using Grafana Loki

HolmesGPT can consult logs from [Loki](https://grafana.com/oss/loki/) by proxying through a [Grafana](https://grafana.com/oss/grafana/) instance.

There are 2 parts to configuring access to Grafana Loki: Access/Authentication and search terms.
To configure loki toolset:

For access and authentication, add the following environment variables:

* `GRAFANA_URL` - e.g. https://my-org.grafana.net
* `GRAFANA_API_KEY` - e.g. glsa_bsm6ZS_sdfs25f
```yaml
toolsets:
grafana/loki:
enabled: true
config:
api_key: "{{ env.GRAFANA_API_KEY }}"
url: "http://loki-url"
```
For search terms, you can optionally tweak the search terms used by the toolset.
This is done by appending the following to your Holmes configuration file:
This is done by appending the following to your Holmes grafana/loki configuration:
```yaml
grafana:
url: https://my-org.grafana.net #
api_key: glsa_bsm6ZS_sdfs25f
loki:
pod_name_search_key: "pod"
namespace_search_key: "namespace"
node_name_search_key: "node"
pod_name_search_key: "pod"
namespace_search_key: "namespace"
node_name_search_key: "node"
```
> You only need to tweak the configuration file if your Loki logs settings for pod, namespace and node differ from the above defaults.
The Loki toolset is configured the using the same Grafana settings as the Grafana Tempo toolset.
</details>
<details>
<summary>
Using Grafana Tempo
</summary>
Expand All @@ -664,8 +664,6 @@ HolmesGPT can fetch trace information from Grafana Tempo to debug performance re
Tempo is configured the using the same Grafana settings as the Grafana Loki toolset.
grafana:
url: https://my-org.grafana.net #
</details>
Expand Down Expand Up @@ -875,9 +873,9 @@ Configure Slack to send notifications to specific channels. Provide your Slack t
<summary>OpenSearch Integration</summary>

The OpenSearch toolset (`opensearch`) allows Holmes to consult an opensearch cluster for its health, settings and shards information.
The toolset supports multiple opensearch or elasticsearch clusters that are configured by editing Holmes' configuration file (or in cluster to the configuration secret):
The toolset supports multiple opensearch or elasticsearch clusters that are configured by editing Holmes' configuration file:

```
```
opensearch_clusters:
- hosts:
- https://my_elasticsearch.us-central1.gcp.cloud.es.io:443
Expand Down
56 changes: 29 additions & 27 deletions examples/custom_llm.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,14 @@

from typing import Any, Dict, List, Optional, Type, Union
from holmes.config import Config
from holmes.core.llm import LLM
from litellm.types.utils import ModelResponse
from holmes.core.tool_calling_llm import ToolCallingLLM
from holmes.core.tools import Tool, ToolExecutor
from holmes.plugins.toolsets import load_builtin_toolsets
from rich.console import Console
from pydantic import BaseModel
from holmes.plugins.prompts import load_and_render_prompt
import sys
class MyCustomLLM(LLM):


class MyCustomLLM(LLM):
def get_context_window_size(self) -> int:
return 128000

Expand All @@ -21,36 +18,41 @@ def get_maximum_output_token(self) -> int:
def count_tokens_for_message(self, messages: list[dict]) -> int:
return 1

def completion(self, messages: List[Dict[str, Any]], tools: Optional[List[Tool]] = [], tool_choice: Optional[Union[str, dict]] = None, response_format: Optional[Union[dict, Type[BaseModel]]] = None, temperature:Optional[float] = None, drop_params: Optional[bool] = None) -> ModelResponse:
return ModelResponse(choices=[{
"finish_reason": "stop",
"index": 0,
"message": {
"role": "assistant",
"content": "There are no issues with your cluster"
}
}],
usage={
"prompt_tokens": 0, # Integer
"completion_tokens": 0,
"total_tokens": 0
}
)
def completion(
self,
messages: List[Dict[str, Any]],
tools: Optional[List[Tool]] = [],
tool_choice: Optional[Union[str, dict]] = None,
response_format: Optional[Union[dict, Type[BaseModel]]] = None,
temperature: Optional[float] = None,
drop_params: Optional[bool] = None,
) -> ModelResponse:
return ModelResponse(
choices=[
{
"finish_reason": "stop",
"index": 0,
"message": {
"role": "assistant",
"content": "There are no issues with your cluster",
},
}
],
usage={
"prompt_tokens": 0, # Integer
"completion_tokens": 0,
"total_tokens": 0,
},
)


def ask_holmes():
console = Console()

prompt = "what issues do I have in my cluster"

system_prompt = load_and_render_prompt("builtin://generic_ask.jinja2")

tool_executor = ToolExecutor(load_builtin_toolsets())
ai = ToolCallingLLM(
tool_executor,
max_steps=10,
llm=MyCustomLLM()
)
ai = ToolCallingLLM(tool_executor, max_steps=10, llm=MyCustomLLM())

response = ai.prompt_call(system_prompt, prompt)

Expand Down
2 changes: 1 addition & 1 deletion examples/custom_runbooks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ runbooks:
instructions: >
Analyze pod logs for errors and also read the monogodb logs
Correlate between the two logs and try to find the root cause of the issue.
Based on the logs, report the session ids of impacted transactions
Based on the logs, report the session ids of impacted transactions
4 changes: 2 additions & 2 deletions examples/custom_toolset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ toolsets:
docs_url: "https://kubernetes.io/docs/home/"
# Icon URL. Used for display in the UI
icon_url: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRPKA-U9m5BxYQDF1O7atMfj9EMMXEoGu4t0Q&s"
# Tags for categorizing toolsets, 'core' will be used for all Holmes features (both cli's commands and chats in UI).
# Tags for categorizing toolsets, 'core' will be used for all Holmes features (both cli's commands and chats in UI).
# The 'cluster' tag is used for UI functionality, while 'cli' is for for command-line specific tools
tags:
- core
Expand All @@ -24,7 +24,7 @@ toolsets:
- name: "switch_cluster"
# The LLM looks at this description when deciding what tools are relevant for each task
description: "Used to switch between multiple kubernetes contexts(clusters)"

# A templated bash command using Jinja2 templates
# The LLM can only control parameters that you expose as template variables like {{ this_variable }}
command: "kubectl config use-context {{ cluster_name }}"
2 changes: 1 addition & 1 deletion helm/holmes/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ type: application
# we use 0.0.1 as a placeholder for the version` because Helm wont allow `0.0.0` and we want to be able to run
# `helm install` on development checkouts without updating this file. the version doesn't matter in that case anyway
version: 0.0.1
appVersion: 0.0.0
appVersion: 0.0.0
2 changes: 1 addition & 1 deletion helm/holmes/templates/holmesgpt-service-account.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -229,4 +229,4 @@ subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-holmes-service-account
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}
1 change: 0 additions & 1 deletion holmes/.git_archival.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,3 @@
"refs": "$Format:%D$",
"describe": "$Format:%(describe:tags=true,match=v[0-9]*)$"
}

Loading

0 comments on commit 9c7451a

Please sign in to comment.