From d0ea6b909d6ace8946b3533bb6d17bbe728fe13d Mon Sep 17 00:00:00 2001 From: Tami Takamiya Date: Mon, 27 Jan 2025 16:48:30 -0500 Subject: [PATCH] Re-enable system prompt override --- ols/src/query_helpers/docs_summarizer.py | 15 ++------------- tests/unit/query_helpers/test_docs_summarizer.py | 2 +- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/ols/src/query_helpers/docs_summarizer.py b/ols/src/query_helpers/docs_summarizer.py index 8049db93..eca405f8 100644 --- a/ols/src/query_helpers/docs_summarizer.py +++ b/ols/src/query_helpers/docs_summarizer.py @@ -12,7 +12,7 @@ from ols.app.metrics import TokenMetricUpdater from ols.app.models.models import RagChunk, SummarizerResponse from ols.constants import RAG_CONTENT_LIMIT, GenericLLMParameters -from ols.customize import prompts, reranker +from ols.customize import reranker from ols.src.prompts.prompt_generator import ( GeneratePrompt, restructure_history, @@ -31,7 +31,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: """Initialize the QuestionValidator.""" super().__init__(*args, **kwargs) self._prepare_llm() - self._get_system_prompt() self.verbose = config.ols_config.logging_config.app_log_level == logging.DEBUG def _prepare_llm(self) -> None: @@ -45,16 +44,6 @@ def _prepare_llm(self) -> None: self.provider, self.model, self.generic_llm_params, self.streaming ) - def _get_system_prompt(self) -> None: - """Retrieve the system prompt.""" - # use system prompt from config if available otherwise use - # default system prompt fine-tuned for the service - if config.ols_config.system_prompt is not None: - self.system_prompt = config.ols_config.system_prompt - else: - self.system_prompt = prompts.QUERY_SYSTEM_INSTRUCTION - logger.debug("System prompt: %s", self.system_prompt) - def _prepare_prompt( self, query: str, @@ -123,7 +112,7 @@ def _prepare_prompt( ) final_prompt, llm_input_values = GeneratePrompt( - query, rag_context, history, self.system_prompt + query, rag_context, history, self._system_prompt ).generate_prompt(self.model) # Tokens-check: We trigger the computation of the token count diff --git a/tests/unit/query_helpers/test_docs_summarizer.py b/tests/unit/query_helpers/test_docs_summarizer.py index c82d6347..0949f197 100644 --- a/tests/unit/query_helpers/test_docs_summarizer.py +++ b/tests/unit/query_helpers/test_docs_summarizer.py @@ -58,7 +58,7 @@ def test_if_system_prompt_was_updated(): summarizer = DocsSummarizer(llm_loader=mock_llm_loader(None)) # expected prompt was loaded during configuration phase expected_prompt = config.ols_config.system_prompt - assert summarizer.system_prompt == expected_prompt + assert summarizer._system_prompt == expected_prompt def test_docs_summarizer_streaming_parameter():