Skip to content

Commit

Permalink
Merge branch 'main' into examples/app/podcast-generator
Browse files Browse the repository at this point in the history
  • Loading branch information
anuragts authored Feb 14, 2025
2 parents 8a66a18 + f66cebd commit 0f96309
Show file tree
Hide file tree
Showing 5 changed files with 90 additions and 43 deletions.
14 changes: 14 additions & 0 deletions cookbook/models/ollama_tools/async_tool_use_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
"""Run `pip install duckduckgo-search` to install dependencies."""

from agno.agent import Agent
from agno.models.ollama import OllamaTools
from agno.tools.duckduckgo import DuckDuckGoTools
import asyncio

agent = Agent(
model=OllamaTools(id="llama3.1:8b"),
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
)
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
66 changes: 33 additions & 33 deletions libs/agno/agno/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -881,7 +881,6 @@ def run(
import time

time.sleep(delay)

if last_exception is not None:
raise Exception(
f"Failed after {num_attempts} attempts. Last error using {last_exception.model_name}({last_exception.model_id}): {str(last_exception)}"
Expand Down Expand Up @@ -1942,40 +1941,41 @@ def get_system_message(self) -> Optional[Message]:
f"<transfer_instructions>\n{self.get_transfer_instructions().strip()}\n</transfer_instructions>\n\n"
)
# 3.3.11 Then add memories to the system prompt
if self.memory.create_user_memories:
if self.memory.memories and len(self.memory.memories) > 0:
system_message_content += (
"You have access to memories from previous interactions with the user that you can use:\n\n"
)
system_message_content += "<memories_from_previous_interactions>"
for _memory in self.memory.memories:
system_message_content += f"\n- {_memory.memory}"
system_message_content += "\n</memories_from_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be updated in this conversation. "
"You should always prefer information from this conversation over the past memories.\n\n"
)
else:
system_message_content += (
"You have the capability to retain memories from previous interactions with the user, "
"but have not had any interactions with the user yet.\n"
"If the user asks about previous memories, you can let them know that you dont have any memory about the user because you haven't had any interactions yet.\n\n"
)
system_message_content += (
"You can add new memories using the `update_memory` tool.\n"
"If you use the `update_memory` tool, remember to pass on the response to the user.\n\n"
)
# 3.3.12 Then add a summary of the interaction to the system prompt
if self.memory.create_session_summary:
if self.memory.summary is not None:
system_message_content += "Here is a brief summary of your previous interactions if it helps:\n\n"
system_message_content += "<summary_of_previous_interactions>\n"
system_message_content += str(self.memory.summary)
system_message_content += "\n</summary_of_previous_interactions>\n\n"
if self.memory:
if self.memory.create_user_memories:
if self.memory.memories and len(self.memory.memories) > 0:
system_message_content += (
"You have access to memories from previous interactions with the user that you can use:\n\n"
)
system_message_content += "<memories_from_previous_interactions>"
for _memory in self.memory.memories:
system_message_content += f"\n- {_memory.memory}"
system_message_content += "\n</memories_from_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be updated in this conversation. "
"You should always prefer information from this conversation over the past memories.\n\n"
)
else:
system_message_content += (
"You have the capability to retain memories from previous interactions with the user, "
"but have not had any interactions with the user yet.\n"
"If the user asks about previous memories, you can let them know that you dont have any memory about the user because you haven't had any interactions yet.\n\n"
)
system_message_content += (
"Note: this information is from previous interactions and may be outdated. "
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
"You can add new memories using the `update_memory` tool.\n"
"If you use the `update_memory` tool, remember to pass on the response to the user.\n\n"
)
# 3.3.12 Then add a summary of the interaction to the system prompt
if self.memory.create_session_summary:
if self.memory.summary is not None:
system_message_content += "Here is a brief summary of your previous interactions if it helps:\n\n"
system_message_content += "<summary_of_previous_interactions>\n"
system_message_content += str(self.memory.summary)
system_message_content += "\n</summary_of_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be outdated. "
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
)

# Add the JSON output prompt if response_model is provided and structured_outputs is False
if self.response_model is not None and not self.structured_outputs:
Expand Down
4 changes: 2 additions & 2 deletions libs/agno/agno/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -768,7 +768,7 @@ def run_function_calls(
)
yield ModelResponse(
content=f"{fc.get_call_str()} completed in {function_call_timer.elapsed:.4f}s.",
tool_calls=[function_call_result.to_fc_result()],
tool_calls=[function_call_result.to_function_call_dict()],
event=ModelResponseEvent.tool_call_completed.value,
)

Expand Down Expand Up @@ -871,7 +871,7 @@ async def arun_function_calls(self, function_calls: List[FunctionCall], function
)
yield ModelResponse(
content=f"{fc.get_call_str()} completed in {function_call_timer.elapsed:.4f}s.",
tool_calls=[function_call_result.to_fc_result()],
tool_calls=[function_call_result.to_function_call_dict()],
event=ModelResponseEvent.tool_call_completed.value,
)

Expand Down
20 changes: 18 additions & 2 deletions libs/agno/agno/models/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,23 @@ def serialize_for_model(self) -> Dict[str, Any]:

def to_dict(self) -> Dict[str, Any]:
"""Returns the message as a dictionary."""
message_dict = self.model_dump(exclude_none=True)
message_dict = {
"content": self.content,
"reasoning_content": self.reasoning_content,
"from_history": self.from_history,
"stop_after_tool_call": self.stop_after_tool_call,
"role": self.role,
"name": self.name,
"tool_call_id": self.tool_call_id,
"tool_name": self.tool_name,
"tool_args": self.tool_args,
"tool_call_error": self.tool_call_error,
"tool_calls": self.tool_calls,
}
# Filter out None and empty collections
message_dict = {
k: v for k, v in message_dict.items() if v is not None and not (isinstance(v, (list, dict)) and len(v) == 0)
}

# Convert media objects to dictionaries
if self.images:
Expand All @@ -228,7 +244,7 @@ def to_dict(self) -> Dict[str, Any]:
message_dict["created_at"] = self.created_at
return message_dict

def to_fc_result(self) -> Dict[str, Any]:
def to_function_call_dict(self) -> Dict[str, Any]:
return {
"content": self.content,
"tool_call_id": self.tool_call_id,
Expand Down
29 changes: 23 additions & 6 deletions libs/agno/tests/integration/model/openai/test_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,12 @@
from agno.agent import Agent, AgentMemory, RunResponse # noqa
from agno.media import Image
from agno.models.openai import OpenAIChat
from agno.storage.agent.postgres import PostgresAgentStorage
from agno.tools.duckduckgo import DuckDuckGoTools


def test_basic():
agent = Agent(model=OpenAIChat(id="gpt-4o"), markdown=True)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), markdown=True)

# Print the response in the terminal
response: RunResponse = agent.run("Share a 2 sentence horror story")
Expand All @@ -30,7 +31,7 @@ def test_basic():


def test_basic_stream():
agent = Agent(model=OpenAIChat(id="gpt-4o"), markdown=True)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), markdown=True)

response_stream = agent.run("Share a 2 sentence horror story", stream=True)

Expand All @@ -45,7 +46,7 @@ def test_basic_stream():

def test_tool_use():
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
model=OpenAIChat(id="gpt-4o-mini"),
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
Expand All @@ -61,7 +62,7 @@ def test_tool_use():

def test_with_memory():
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
model=OpenAIChat(id="gpt-4o-mini"),
add_history_to_messages=True,
num_history_responses=5,
markdown=True,
Expand Down Expand Up @@ -100,7 +101,7 @@ class MovieScript(BaseModel):
plot: str = Field(..., description="Brief plot summary")

agent = Agent(
model=OpenAIChat(id="gpt-4o"),
model=OpenAIChat(id="gpt-4o-mini"),
response_model=MovieScript,
)

Expand All @@ -115,7 +116,7 @@ class MovieScript(BaseModel):

def test_image_input():
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
model=OpenAIChat(id="gpt-4o-mini"),
tools=[DuckDuckGoTools()],
markdown=True,
)
Expand All @@ -126,3 +127,19 @@ def test_image_input():
)

assert "golden" in response.content.lower()

def test_history_grows_exponentially():
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
storage=PostgresAgentStorage(table_name="agent_sessions", db_url=db_url),
add_history_to_messages=True,
)
agent.run("Hello")
assert len(agent.run_response.messages) == 2
agent.run("Hello 2")
assert len(agent.run_response.messages) == 4
agent.run("Hello 3")
assert len(agent.run_response.messages) == 6
agent.run("Hello 4")
assert len(agent.run_response.messages) == 8

0 comments on commit 0f96309

Please sign in to comment.