Skip to content

Commit

Permalink
Improved Gemini 2 Thinking prompt (#39)
Browse files Browse the repository at this point in the history
  • Loading branch information
krasserm authored Feb 10, 2025
1 parent 841de05 commit 999804a
Show file tree
Hide file tree
Showing 5 changed files with 128 additions and 11 deletions.
36 changes: 26 additions & 10 deletions freeact/model/gemini/model/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@
from google.genai.types import GenerateContentConfig, ThinkingConfig

from freeact.model.base import CodeActModel, CodeActModelResponse, CodeActModelTurn, StreamRetry
from freeact.model.gemini.prompt import EXECUTION_ERROR_TEMPLATE, EXECUTION_OUTPUT_TEMPLATE, SYSTEM_TEMPLATE
from freeact.model.gemini.prompt import default, thinking

GeminiModelName = Literal[
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05" "gemini-2.0-flash-exp",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21",
]
Expand Down Expand Up @@ -89,28 +90,43 @@ def __init__(
max_tokens: int = 4096,
**kwargs,
):
self._model_name = model_name
is_thinking_model = "thinking" in model_name.lower()

if is_thinking_model:
# ------------------------------------------------------
# EXPERIMENTAL
# ------------------------------------------------------
self.system_template = thinking.SYSTEM_TEMPLATE.format(
python_modules=skill_sources or "",
python_packages=thinking.EXAMPLE_PYTHON_PACKAGES,
rest_apis=thinking.EXAMPLE_REST_APIS,
)
self.execution_error_template = thinking.EXECUTION_ERROR_TEMPLATE
self.execution_output_template = thinking.EXECUTION_OUTPUT_TEMPLATE
self.thinking_config = ThinkingConfig(include_thoughts=True)
else:
self.system_template = default.SYSTEM_TEMPLATE.format(python_modules=skill_sources or "")
self.execution_error_template = default.EXECUTION_ERROR_TEMPLATE
self.execution_output_template = default.EXECUTION_OUTPUT_TEMPLATE
self.thinking_config = None

self._client = genai.Client(**kwargs, http_options={"api_version": "v1alpha"})
self._chat = self._client.aio.chats.create(
model=model_name,
config=GenerateContentConfig(
temperature=temperature,
max_output_tokens=max_tokens,
response_modalities=["TEXT"],
system_instruction=SYSTEM_TEMPLATE.format(python_modules=skill_sources or ""),
thinking_config=ThinkingConfig(include_thoughts=True) if self.thinking else None,
system_instruction=self.system_template,
thinking_config=self.thinking_config,
),
)

@property
def thinking(self) -> bool:
return "thinking" in self._model_name.lower()

def request(self, user_query: str, **kwargs) -> GeminiTurn:
return GeminiTurn(self._chat, user_query)

def feedback(
self, feedback: str, is_error: bool, tool_use_id: str | None, tool_use_name: str | None, **kwargs
) -> GeminiTurn:
feedback_template = EXECUTION_ERROR_TEMPLATE if is_error else EXECUTION_OUTPUT_TEMPLATE
feedback_template = self.execution_error_template if is_error else self.execution_output_template
return GeminiTurn(self._chat, feedback_template.format(execution_feedback=feedback))
2 changes: 1 addition & 1 deletion freeact/model/gemini/model/live.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from freeact.model.base import CodeActModel, CodeActModelTurn, StreamRetry
from freeact.model.gemini.model.chat import GeminiModelName, GeminiResponse
from freeact.model.gemini.prompt import EXECUTION_ERROR_TEMPLATE, EXECUTION_OUTPUT_TEMPLATE, SYSTEM_TEMPLATE
from freeact.model.gemini.prompt.default import EXECUTION_ERROR_TEMPLATE, EXECUTION_OUTPUT_TEMPLATE, SYSTEM_TEMPLATE


class GeminiLiveTurn(CodeActModelTurn):
Expand Down
Empty file.
File renamed without changes.
101 changes: 101 additions & 0 deletions freeact/model/gemini/prompt/thinking.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
SYSTEM_TEMPLATE = """You are a ReAct agent that acts by generating Python code.
## Environment
Your code execution environment is an IPython notebook.
Generated code at each step is executed in a separate IPython notebook cell.
## Workflow
You will be given a user query. For that query follow this workflow:
1. Generate an initial plan of steps how to answer the query
2. Execute the plan step-by-step. At each step do the following (2.1. - 2.4.):
2.1. Generate your thoughts what to do in the current step
2.2. Generate Python code for the current step and then stop generating
2.3. Wait for a message from the user with code execution results
2.4. Process the code execution results and go back to 2.1. until you have a final answer
3. Finally, provide a final answer to the user query
## Tools
You can use tools in the Python code you generate. You have access to the following types of tools:
- [Skill modules](#skill-modules). These are provided as Python source code enclosed in ```python ...``` delimiters. At the top of each skill module is a line containing the module name which is needed for importing the definitions in this module.
- [Python packages](#python-packages). These are packages that are available on pypi.org. You can install them with `!pip install ...`.
- [REST APIs](#rest-apis). These are APIs that you should access with the Python `requests` package. The `requests` package is already installed.
### Skill modules
{python_modules}
### Python packages
{python_packages}
### REST APIs
{rest_apis}
## Important rules
- Always rely on code execution results to make decisions. Never guess an answer to a user query.
- When you use a tool from [skill modules](#skill-modules), make sure to import the module first.
- When you use a tool that returns unstructured data (e.g. text, ...) always print the data
- When you use a tool that returns structured data (e.g. JSON, ...) avoid printing structured data
- make sure to assign structured data to a variable and reuse it in later steps if needed
## Output format
For the initial plan, use the following format:
Plan:
...
For each step, use the following format:
Thoughts:
...
Action:
```python
...
```
For the final answer, use the following format:
Final answer:
...
"""

EXAMPLE_PYTHON_PACKAGES = """\
- PyGithub (for interacting with GitHub)
- yfinance (for retrieving financial information)
"""

EXAMPLE_REST_APIS = """\
- nomatim geocoding API (for geocoding locations like city names, ...)
- open-meteo weather API (for retrieving weather information)
"""

EXECUTION_OUTPUT_TEMPLATE = """Here are the execution results of the code you generated:
<execution-results>
{execution_feedback}
</execution-results>
Continue with the next step required to answer the user query.
"""


EXECUTION_ERROR_TEMPLATE = """The code you generated produced an error during execution:
<execution-error>
{execution_feedback}
</execution-error>
Fix the error.
"""

if __name__ == "__main__":
print(
SYSTEM_TEMPLATE.format(python_modules="", python_packages=EXAMPLE_PYTHON_PACKAGES, rest_apis=EXAMPLE_REST_APIS)
)

0 comments on commit 999804a

Please sign in to comment.