diff --git a/freeact/model/gemini/model/chat.py b/freeact/model/gemini/model/chat.py
index a6a65a0..56af8a0 100644
--- a/freeact/model/gemini/model/chat.py
+++ b/freeact/model/gemini/model/chat.py
@@ -7,12 +7,13 @@
from google.genai.types import GenerateContentConfig, ThinkingConfig
from freeact.model.base import CodeActModel, CodeActModelResponse, CodeActModelTurn, StreamRetry
-from freeact.model.gemini.prompt import EXECUTION_ERROR_TEMPLATE, EXECUTION_OUTPUT_TEMPLATE, SYSTEM_TEMPLATE
+from freeact.model.gemini.prompt import default, thinking
GeminiModelName = Literal[
"gemini-2.0-flash",
"gemini-2.0-flash-001",
- "gemini-2.0-flash-lite-preview-02-05" "gemini-2.0-flash-exp",
+ "gemini-2.0-flash-lite-preview-02-05",
+ "gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21",
]
@@ -89,7 +90,26 @@ def __init__(
max_tokens: int = 4096,
**kwargs,
):
- self._model_name = model_name
+ is_thinking_model = "thinking" in model_name.lower()
+
+ if is_thinking_model:
+ # ------------------------------------------------------
+ # EXPERIMENTAL
+ # ------------------------------------------------------
+ self.system_template = thinking.SYSTEM_TEMPLATE.format(
+ python_modules=skill_sources or "",
+ python_packages=thinking.EXAMPLE_PYTHON_PACKAGES,
+ rest_apis=thinking.EXAMPLE_REST_APIS,
+ )
+ self.execution_error_template = thinking.EXECUTION_ERROR_TEMPLATE
+ self.execution_output_template = thinking.EXECUTION_OUTPUT_TEMPLATE
+ self.thinking_config = ThinkingConfig(include_thoughts=True)
+ else:
+ self.system_template = default.SYSTEM_TEMPLATE.format(python_modules=skill_sources or "")
+ self.execution_error_template = default.EXECUTION_ERROR_TEMPLATE
+ self.execution_output_template = default.EXECUTION_OUTPUT_TEMPLATE
+ self.thinking_config = None
+
self._client = genai.Client(**kwargs, http_options={"api_version": "v1alpha"})
self._chat = self._client.aio.chats.create(
model=model_name,
@@ -97,20 +117,16 @@ def __init__(
temperature=temperature,
max_output_tokens=max_tokens,
response_modalities=["TEXT"],
- system_instruction=SYSTEM_TEMPLATE.format(python_modules=skill_sources or ""),
- thinking_config=ThinkingConfig(include_thoughts=True) if self.thinking else None,
+ system_instruction=self.system_template,
+ thinking_config=self.thinking_config,
),
)
- @property
- def thinking(self) -> bool:
- return "thinking" in self._model_name.lower()
-
def request(self, user_query: str, **kwargs) -> GeminiTurn:
return GeminiTurn(self._chat, user_query)
def feedback(
self, feedback: str, is_error: bool, tool_use_id: str | None, tool_use_name: str | None, **kwargs
) -> GeminiTurn:
- feedback_template = EXECUTION_ERROR_TEMPLATE if is_error else EXECUTION_OUTPUT_TEMPLATE
+ feedback_template = self.execution_error_template if is_error else self.execution_output_template
return GeminiTurn(self._chat, feedback_template.format(execution_feedback=feedback))
diff --git a/freeact/model/gemini/model/live.py b/freeact/model/gemini/model/live.py
index ee26394..9d779b2 100644
--- a/freeact/model/gemini/model/live.py
+++ b/freeact/model/gemini/model/live.py
@@ -6,7 +6,7 @@
from freeact.model.base import CodeActModel, CodeActModelTurn, StreamRetry
from freeact.model.gemini.model.chat import GeminiModelName, GeminiResponse
-from freeact.model.gemini.prompt import EXECUTION_ERROR_TEMPLATE, EXECUTION_OUTPUT_TEMPLATE, SYSTEM_TEMPLATE
+from freeact.model.gemini.prompt.default import EXECUTION_ERROR_TEMPLATE, EXECUTION_OUTPUT_TEMPLATE, SYSTEM_TEMPLATE
class GeminiLiveTurn(CodeActModelTurn):
diff --git a/freeact/model/gemini/prompt/__init__.py b/freeact/model/gemini/prompt/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/freeact/model/gemini/prompt.py b/freeact/model/gemini/prompt/default.py
similarity index 100%
rename from freeact/model/gemini/prompt.py
rename to freeact/model/gemini/prompt/default.py
diff --git a/freeact/model/gemini/prompt/thinking.py b/freeact/model/gemini/prompt/thinking.py
new file mode 100644
index 0000000..7f3bcd6
--- /dev/null
+++ b/freeact/model/gemini/prompt/thinking.py
@@ -0,0 +1,101 @@
+SYSTEM_TEMPLATE = """You are a ReAct agent that acts by generating Python code.
+
+## Environment
+Your code execution environment is an IPython notebook.
+Generated code at each step is executed in a separate IPython notebook cell.
+
+## Workflow
+You will be given a user query. For that query follow this workflow:
+
+1. Generate an initial plan of steps how to answer the query
+
+2. Execute the plan step-by-step. At each step do the following (2.1. - 2.4.):
+
+ 2.1. Generate your thoughts what to do in the current step
+
+ 2.2. Generate Python code for the current step and then stop generating
+
+ 2.3. Wait for a message from the user with code execution results
+
+ 2.4. Process the code execution results and go back to 2.1. until you have a final answer
+
+3. Finally, provide a final answer to the user query
+
+## Tools
+You can use tools in the Python code you generate. You have access to the following types of tools:
+- [Skill modules](#skill-modules). These are provided as Python source code enclosed in ```python ...``` delimiters. At the top of each skill module is a line containing the module name which is needed for importing the definitions in this module.
+- [Python packages](#python-packages). These are packages that are available on pypi.org. You can install them with `!pip install ...`.
+- [REST APIs](#rest-apis). These are APIs that you should access with the Python `requests` package. The `requests` package is already installed.
+
+### Skill modules
+{python_modules}
+
+### Python packages
+{python_packages}
+
+### REST APIs
+{rest_apis}
+
+## Important rules
+- Always rely on code execution results to make decisions. Never guess an answer to a user query.
+- When you use a tool from [skill modules](#skill-modules), make sure to import the module first.
+- When you use a tool that returns unstructured data (e.g. text, ...) always print the data
+- When you use a tool that returns structured data (e.g. JSON, ...) avoid printing structured data
+ - make sure to assign structured data to a variable and reuse it in later steps if needed
+
+## Output format
+
+For the initial plan, use the following format:
+
+ Plan:
+ ...
+
+For each step, use the following format:
+
+ Thoughts:
+ ...
+
+ Action:
+ ```python
+ ...
+ ```
+
+For the final answer, use the following format:
+
+ Final answer:
+ ...
+"""
+
+EXAMPLE_PYTHON_PACKAGES = """\
+- PyGithub (for interacting with GitHub)
+- yfinance (for retrieving financial information)
+"""
+
+EXAMPLE_REST_APIS = """\
+- nomatim geocoding API (for geocoding locations like city names, ...)
+- open-meteo weather API (for retrieving weather information)
+"""
+
+EXECUTION_OUTPUT_TEMPLATE = """Here are the execution results of the code you generated:
+
+
+{execution_feedback}
+
+
+Continue with the next step required to answer the user query.
+"""
+
+
+EXECUTION_ERROR_TEMPLATE = """The code you generated produced an error during execution:
+
+
+{execution_feedback}
+
+
+Fix the error.
+"""
+
+if __name__ == "__main__":
+ print(
+ SYSTEM_TEMPLATE.format(python_modules="", python_packages=EXAMPLE_PYTHON_PACKAGES, rest_apis=EXAMPLE_REST_APIS)
+ )