diff --git a/docker/Dockerfile b/docker/Dockerfile index 48fb433..7e55de4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -17,7 +17,7 @@ RUN apt-get install -y gcc python3-dev # Official: https://pypi.org/simple ARG PYTHON_INDEX_URL=https://pypi.mirrors.ustc.edu.cn/simple/ COPY libs /libs -RUN python -m pip install ragas langchain sentencepiece -i ${PYTHON_INDEX_URL} +RUN python -m pip install ragas langchain sentencepiece protobuf -i ${PYTHON_INDEX_URL} WORKDIR /libs/core RUN pip install -e . -i ${PYTHON_INDEX_URL} diff --git a/libs/core/kubeagi_core/evaluation/ragas_eval.py b/libs/core/kubeagi_core/evaluation/ragas_eval.py index 15c701a..adecd66 100644 --- a/libs/core/kubeagi_core/evaluation/ragas_eval.py +++ b/libs/core/kubeagi_core/evaluation/ragas_eval.py @@ -22,7 +22,7 @@ from ragas import evaluate from ragas.embeddings import BaseRagasEmbeddings -from ragas.llms import BaseRagasLLM +from ragas.llms import BaseRagasLLM, LangchainLLMWrapper from ragas.metrics import ( AnswerCorrectness, AnswerRelevancy, @@ -87,10 +87,12 @@ def __init__( ) # Initialize judge llm - self.llm = ChatOpenAI( - model_name=self.llm_model, - openai_api_key=self.api_key, - openai_api_base=self.api_base, + self.llm = LangchainLLMWrapper( + langchain_llm=ChatOpenAI( + name=self.llm_model, + api_key=self.api_key, + base_url=self.api_base, + ) ) # Initialize judge embedding diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 9995154..349b3e5 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -18,8 +18,8 @@ classifiers = [ dependencies = [ "docx2txt==0.8", "kubernetes==25.3.0", - "langchain==0.1.0", - "ragas==0.0.22", + "langchain>=0.1.0", + "ragas>=0.1.0", "spacy==3.5.4", "zhipuai==1.0.7", ]