Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Stateless models #1976

Open
wants to merge 54 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
54 commits
Select commit Hold shift + click to select a range
77e2f4d
Add metrics to Message
ashpreetbedi Feb 1, 2025
ff22455
Update models
ashpreetbedi Feb 1, 2025
8c825b3
pre-process the tools for the model
ashpreetbedi Feb 1, 2025
7668e94
Update groq
ashpreetbedi Feb 2, 2025
bb2767e
fix
ashpreetbedi Feb 2, 2025
8e7d600
Use SessionMetrics
ashpreetbedi Feb 2, 2025
337b1a0
Update add_tools_to_model
dirkbrnd Feb 4, 2025
7135282
Update assistant role
ashpreetbedi Feb 4, 2025
110077d
Merge
dirkbrnd Feb 5, 2025
4e16e76
Make OpenAI and Groq and OpenAILike into the new structure
dirkbrnd Feb 5, 2025
1a9b1c0
Update Claude
dirkbrnd Feb 5, 2025
349f94a
Fix claude
dirkbrnd Feb 5, 2025
7e5e25f
Update
dirkbrnd Feb 5, 2025
421d224
Merge branch 'main' of https://github.com/agno-agi/agno into feat/sta…
dirkbrnd Feb 6, 2025
2012f68
Add bedrock
dirkbrnd Feb 6, 2025
f9c1c3d
Merge branch 'main' of https://github.com/agno-agi/agno into feat/sta…
dirkbrnd Feb 6, 2025
7a68f69
Update
dirkbrnd Feb 6, 2025
04ad756
Merge branch 'main' of https://github.com/agno-agi/agno into feat/sta…
dirkbrnd Feb 6, 2025
fc9cf43
Update Cohere to v2
dirkbrnd Feb 6, 2025
3d86669
update
dirkbrnd Feb 7, 2025
e13f472
Update how response stream works and add tests
dirkbrnd Feb 7, 2025
c2f0ac9
Update
dirkbrnd Feb 7, 2025
15d4630
Update
dirkbrnd Feb 7, 2025
70350b7
Update
dirkbrnd Feb 7, 2025
896ae72
Update
dirkbrnd Feb 7, 2025
acb3d2a
Update response_stream
ashpreetbedi Feb 7, 2025
e6af03d
Add Mistral
dirkbrnd Feb 7, 2025
d6b58e6
Update TogetherAI
dirkbrnd Feb 7, 2025
3184ef4
update
dirkbrnd Feb 7, 2025
ddd3fe8
Rearange the base class
dirkbrnd Feb 7, 2025
2090182
Fix for asyncstream
dirkbrnd Feb 7, 2025
27017fc
Fix tests for OpenaI
dirkbrnd Feb 7, 2025
8b2b8d8
Update tests
dirkbrnd Feb 7, 2025
9084437
update
dirkbrnd Feb 7, 2025
11d998b
Change to anthropic claude client for bedrock
dirkbrnd Feb 8, 2025
e36c006
Update style
dirkbrnd Feb 8, 2025
429b5bf
Add Azure AI Foundry models
dirkbrnd Feb 10, 2025
0de7397
Style update
dirkbrnd Feb 10, 2025
ea86645
Feat/stateless model ollama (#2045)
ysolanky Feb 10, 2025
60b5988
Fix for gemini
dirkbrnd Feb 10, 2025
483f056
update
dirkbrnd Feb 10, 2025
3f85533
Update
dirkbrnd Feb 10, 2025
5a2ef93
Update
dirkbrnd Feb 10, 2025
bba0126
Add error handling
dirkbrnd Feb 10, 2025
034b222
Fix gemini structured output
dirkbrnd Feb 10, 2025
958b457
async cookbooks
manthanguptaa Feb 10, 2025
cc758bd
update
manthanguptaa Feb 10, 2025
9d10ee8
Add vertex AI example
dirkbrnd Feb 10, 2025
38f8e12
Merge branch 'feat/stateless-models' of https://github.com/agno-agi/a…
dirkbrnd Feb 10, 2025
48afdf3
ollama tools async cookbooks
manthanguptaa Feb 10, 2025
c55a837
update
manthanguptaa Feb 10, 2025
063bd54
fix logging
ysolanky Feb 10, 2025
7105099
tool call result logging
ysolanky Feb 10, 2025
56547f0
Ollama and Gemini update
ysolanky Feb 10, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion agno.code-workspace
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,11 @@
"folders": [
{
"path": "."
}
},
{
"path": "../agno-docs"
},

],
"settings": {
"python.analysis.extraPaths": [
Expand Down
3 changes: 3 additions & 0 deletions cookbook/agent_concepts/other/agent_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,6 @@
# Print the metrics
print("---" * 5, "Aggregated Metrics", "---" * 5)
pprint(agent.run_response.metrics)
# Print the session metrics
print("---" * 5, "Session Metrics", "---" * 5)
pprint(agent.session_metrics)
2 changes: 1 addition & 1 deletion cookbook/examples/apps/agentic_rag/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import streamlit as st
from agentic_rag import get_agentic_rag_agent
from agno.agent.agent import Agent
from agno.agent import Agent
from agno.utils.log import logger


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import asyncio

from agno.agent.agent import Agent
from agno.agent import Agent
from agno.models.anthropic import Claude

agent = Agent(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import asyncio

from agno.agent.agent import Agent
from agno.agent import Agent
from agno.models.anthropic import Claude

agent = Agent(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import asyncio

from agno.agent.agent import Agent
from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.tools.duckduckgo import DuckDuckGoTools

Expand Down
2 changes: 1 addition & 1 deletion cookbook/models/anthropic/tool_use.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@
show_tool_calls=True,
markdown=True,
)
agent.print_response("Whats happening in France?", stream=True)
agent.print_response("Whats happening in France?")
13 changes: 13 additions & 0 deletions cookbook/models/anthropic/tool_use_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""Run `pip install duckduckgo-search` to install dependencies."""

from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.tools.duckduckgo import DuckDuckGoTools

agent = Agent(
model=Claude(id="claude-3-5-sonnet-20240620"),
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
)
agent.print_response("Whats happening in France?", stream=True)
15 changes: 15 additions & 0 deletions cookbook/models/aws_bedrock/claude/async_basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import asyncio

from agno.agent import Agent, RunResponse # noqa
from agno.models.aws.claude import Claude

agent = Agent(
model=Claude(id="anthropic.claude-3-5-sonnet-20240620-v1:0"), markdown=True
)

# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)

# Print the response in the terminal
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
17 changes: 17 additions & 0 deletions cookbook/models/aws_bedrock/claude/async_basic_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import asyncio
from typing import Iterator # noqa

from agno.agent import Agent, RunResponse # noqa
from agno.models.aws.claude import Claude

agent = Agent(
model=Claude(id="anthropic.claude-3-5-sonnet-20240620-v1:0"), markdown=True
)

# Get the response in a variable
# run_response: Iterator[RunResponse] = agent.run("Share a 2 sentence horror story", stream=True)
# for chunk in run_response:
# print(chunk.content)

# Print the response in the terminal
asyncio.run(agent.aprint_response("Share a 2 sentence horror story", stream=True))
18 changes: 18 additions & 0 deletions cookbook/models/aws_bedrock/claude/async_tool_use.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
"""
Async example using Claude with tool calls.
"""

import asyncio

from agno.agent import Agent
from agno.models.aws.claude import Claude
from agno.tools.duckduckgo import DuckDuckGoTools

agent = Agent(
model=Claude(id="anthropic.claude-3-5-sonnet-20240620-v1:0"),
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
)

asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
6 changes: 6 additions & 0 deletions cookbook/models/azure/ai_foundry/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
*.jpg
*.png
*.mp3
*.wav
*.mp4
*.mp3
70 changes: 70 additions & 0 deletions cookbook/models/azure/ai_foundry/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Azure AI Interface Cookbook

> Note: Fork and clone this repository if needed
>
> Note: This cookbook is for the Azure AI Interface model. It uses the `AzureAIFoundry` class with the `Phi-4` model. Please change the model ID to the one you want to use.

### 1. Create and activate a virtual environment

```shell
python3 -m venv ~/.venvs/aienv
source ~/.venvs/aienv/bin/activate
```

### 2. Export environment variables

Navigate to the Azure AI Foundry on the [Azure Portal](https://portal.azure.com/) and create a service. Then, using the Azure AI Foundry portal, create a deployment and set your environment variables.

```shell
export AZURE_API_KEY=***
export AZURE_ENDPOINT="https://<your-host-name>.services.ai.azure.com/models"
export AZURE_API_VERSION="2024-05-01-preview"
```

You can get the endpoint from the Azure AI Foundry portal. Click on the deployed model and copy the "Target URI"

### 3. Install libraries

```shell
pip install -U openai duckduckgo-search duckdb yfinance agno
```

### 4. Run basic Agent

- Streaming on

```shell
python cookbook/models/azure/openai/basic_stream.py
```

- Streaming off

```shell
python cookbook/models/azure/openai/basic.py
```

### 5. Run Agent with Tools

- DuckDuckGo Search

```shell
python cookbook/models/azure/openai/tool_use.py
```

### 6. Run Agent that returns structured output

```shell
python cookbook/models/azure/openai/structured_output.py
```

### 7. Run Agent that uses storage

```shell
python cookbook/models/azure/openai/storage.py
```

### 8. Run Agent that uses knowledge

```shell
python cookbook/models/azure/openai/knowledge.py
```
12 changes: 12 additions & 0 deletions cookbook/models/azure/ai_foundry/async_basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import asyncio

from agno.agent import Agent
from agno.models.azure.ai_foundry import AzureAIFoundry

agent = Agent(
model=AzureAIFoundry(id="Phi-4"),
description="You help people with their health and fitness goals.",
instructions=["Recipes should be under 5 ingredients"],
)
# -*- Print a response to the cli
asyncio.run(agent.aprint_response("Share a breakfast recipe.", markdown=True))
14 changes: 14 additions & 0 deletions cookbook/models/azure/ai_foundry/async_basic_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import asyncio

from agno.agent import Agent
from agno.models.azure.ai_foundry import AzureAIFoundry

assistant = Agent(
model=AzureAIFoundry(id="Phi-4"),
description="You help people with their health and fitness goals.",
instructions=["Recipes should be under 5 ingredients"],
)
# -*- Print a response to the cli
asyncio.run(
assistant.aprint_response("Share a breakfast recipe.", markdown=True, stream=True)
)
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from agno.agent import Agent, RunResponse # noqa
from agno.models.ollama import OllamaHermes
from agno.models.azure.ai_foundry import AzureAIFoundry

agent = Agent(model=OllamaHermes(id="hermes3"), markdown=True)
agent = Agent(model=AzureAIFoundry(id="Phi-4"), markdown=True)

# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)

# Print the response in the terminal
# Print the response on the terminal
agent.print_response("Share a 2 sentence horror story")
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
from typing import Iterator # noqa

from agno.agent import Agent, RunResponse # noqa
from agno.models.ollama import OllamaHermes
from agno.models.azure.ai_foundry import AzureAIFoundry

agent = Agent(model=OllamaHermes(id="hermes3"), markdown=True)
agent = Agent(model=AzureAIFoundry(id="Phi-4"), markdown=True)

# Get the response in a variable
# run_response: Iterator[RunResponse] = agent.run("Share a 2 sentence horror story", stream=True)
# for chunk in run_response:
# print(chunk.content)

# Print the response in the terminal
# Print the response on the terminal
agent.print_response("Share a 2 sentence horror story", stream=True)
11 changes: 11 additions & 0 deletions cookbook/models/azure/ai_foundry/demo_cohere.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from agno.agent import Agent, RunResponse # noqa
from agno.models.azure.ai_foundry import AzureAIFoundry

agent = Agent(model=AzureAIFoundry(id="Cohere-command-r-08-2024"), markdown=True)

# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)

# Print the response on the terminal
agent.print_response("Share a 2 sentence horror story")
11 changes: 11 additions & 0 deletions cookbook/models/azure/ai_foundry/demo_mistral.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from agno.agent import Agent, RunResponse # noqa
from agno.models.azure.ai_foundry import AzureAIFoundry

agent = Agent(model=AzureAIFoundry(id="Mistral-Large-2411"), markdown=True)

# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)

# Print the response on the terminal
agent.print_response("Share a 2 sentence horror story")
19 changes: 19 additions & 0 deletions cookbook/models/azure/ai_foundry/image_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from agno.agent import Agent
from agno.media import Image
from agno.models.azure.ai_foundry import AzureAIFoundry

agent = Agent(
model=AzureAIFoundry(id="Llama-3.2-11B-Vision-Instruct"),
markdown=True,
)

agent.print_response(
"Tell me about this image.",
images=[
Image(
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg",
detail="High",
)
],
stream=True,
)
24 changes: 24 additions & 0 deletions cookbook/models/azure/ai_foundry/image_agent_bytes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from pathlib import Path

from agno.agent import Agent
from agno.media import Image
from agno.models.azure.ai_foundry import AzureAIFoundry

agent = Agent(
model=AzureAIFoundry(id="Llama-3.2-11B-Vision-Instruct"),
markdown=True,
)

image_path = Path(__file__).parent.joinpath("sample.jpg")

# Read the image file content as bytes
with open(image_path, "rb") as img_file:
image_bytes = img_file.read()

agent.print_response(
"Tell me about this image.",
images=[
Image(content=image_bytes),
],
stream=True,
)
27 changes: 27 additions & 0 deletions cookbook/models/azure/ai_foundry/knowledge.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
"""Run `pip install duckduckgo-search sqlalchemy pgvector pypdf openai` to install dependencies."""

from agno.agent import Agent
from agno.embedder.azure_openai import AzureOpenAIEmbedder
from agno.knowledge.pdf_url import PDFUrlKnowledgeBase
from agno.models.azure.ai_foundry import AzureAIFoundry
from agno.vectordb.pgvector import PgVector

db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"

knowledge_base = PDFUrlKnowledgeBase(
urls=["https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
vector_db=PgVector(
table_name="recipes",
db_url=db_url,
embedder=AzureOpenAIEmbedder(),
),
)
knowledge_base.load(recreate=False) # Comment out after first run

agent = Agent(
model=AzureAIFoundry(id="Cohere-command-r-08-2024"),
knowledge=knowledge_base,
show_tool_calls=True,
debug_mode=True,
)
agent.print_response("How to make Thai curry?", markdown=True)
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@
"""Run `pip install duckduckgo-search sqlalchemy google.generativeai` to install dependencies."""
"""Run `pip install duckduckgo-search sqlalchemy anthropic` to install dependencies."""

from agno.agent import Agent
from agno.models.vertexai import Gemini
from agno.models.azure.ai_foundry import AzureAIFoundry
from agno.storage.agent.postgres import PostgresAgentStorage
from agno.tools.duckduckgo import DuckDuckGoTools

db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"

agent = Agent(
model=Gemini(id="gemini-2.0-flash-exp"),
model=AzureAIFoundry(id="Phi-4"),
storage=PostgresAgentStorage(table_name="agent_sessions", db_url=db_url),
tools=[DuckDuckGoTools()],
add_history_to_messages=True,
debug_mode=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
Loading
Loading