Skip to content

Commit

Permalink
Merge branch 'main' into lance-db-improvement
Browse files Browse the repository at this point in the history
  • Loading branch information
dirkbrnd authored Feb 14, 2025
2 parents 53d1a77 + f66cebd commit fd89fbf
Show file tree
Hide file tree
Showing 9 changed files with 224 additions and 130 deletions.
2 changes: 1 addition & 1 deletion cookbook/examples/apps/answer_engine/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ def main() -> None:
# Model selector
####################################################################
model_options = {
"o3-mini": "openai:o3-mini",
"gpt-4o": "openai:gpt-4o",
"o3-mini": "openai:o3-mini",
"gemini-2.0-flash-exp": "google:gemini-2.0-flash-exp",
"claude-3-5-sonnet": "anthropic:claude-3-5-sonnet-20241022",
"llama-3.3-70b": "groq:llama-3.3-70b-versatile",
Expand Down
14 changes: 14 additions & 0 deletions cookbook/models/ollama_tools/async_tool_use_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
"""Run `pip install duckduckgo-search` to install dependencies."""

from agno.agent import Agent
from agno.models.ollama import OllamaTools
from agno.tools.duckduckgo import DuckDuckGoTools
import asyncio

agent = Agent(
model=OllamaTools(id="llama3.1:8b"),
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
)
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
45 changes: 27 additions & 18 deletions cookbook/playground/multimodal_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,32 @@
),
)

ml_music_agent = Agent(
name="ModelsLab Music Agent",
agent_id="ml_music_agent",
model=OpenAIChat(id="gpt-4o"),
tools=[ModelsLabTools(wait_for_completion=True, file_type=FileType.MP3)],
description="You are an AI agent that can generate music using the ModelsLabs API.",
instructions=[
"When generating music, use the `generate_media` tool with detailed prompts that specify:",
"- The genre and style of music (e.g., classical, jazz, electronic)",
"- The instruments and sounds to include",
"- The tempo, mood and emotional qualities",
"- The structure (intro, verses, chorus, bridge, etc.)",
"Create rich, descriptive prompts that capture the desired musical elements.",
"Focus on generating high-quality, complete instrumental pieces.",
"Keep responses simple and only confirm when music is generated successfully.",
"Do not include any file names, URLs or technical details in responses.",
],
markdown=True,
debug_mode=True,
add_history_to_messages=True,
add_datetime_to_instructions=True,
storage=SqliteAgentStorage(
table_name="ml_music_agent", db_file=image_agent_storage_file
),
)

ml_video_agent = Agent(
name="ModelsLab Video Agent",
agent_id="ml_video_agent",
Expand Down Expand Up @@ -142,33 +168,16 @@
),
)

image_to_image_agent = Agent(
name="Image to Image Agent",
agent_id="image_to_image_agent",
model=OpenAIChat(id="gpt-4o"),
tools=[FalTools()],
markdown=True,
debug_mode=True,
instructions=[
"You have to use the `image_to_image` tool to generate the image.",
"You are an AI agent that can generate images using the Fal AI API.",
"You will be given a prompt and an image URL.",
"Don't provide the URL of the image in the response. Only describe what image was generated.",
],
storage=SqliteAgentStorage(
table_name="image_to_image_agent", db_file=image_agent_storage_file
),
)

app = Playground(
agents=[
image_agent,
ml_gif_agent,
ml_music_agent,
ml_video_agent,
fal_agent,
gif_agent,
audio_agent,
image_to_image_agent,
]
).get_app(use_async=False)

Expand Down
66 changes: 33 additions & 33 deletions libs/agno/agno/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -881,7 +881,6 @@ def run(
import time

time.sleep(delay)

if last_exception is not None:
raise Exception(
f"Failed after {num_attempts} attempts. Last error using {last_exception.model_name}({last_exception.model_id}): {str(last_exception)}"
Expand Down Expand Up @@ -1942,40 +1941,41 @@ def get_system_message(self) -> Optional[Message]:
f"<transfer_instructions>\n{self.get_transfer_instructions().strip()}\n</transfer_instructions>\n\n"
)
# 3.3.11 Then add memories to the system prompt
if self.memory.create_user_memories:
if self.memory.memories and len(self.memory.memories) > 0:
system_message_content += (
"You have access to memories from previous interactions with the user that you can use:\n\n"
)
system_message_content += "<memories_from_previous_interactions>"
for _memory in self.memory.memories:
system_message_content += f"\n- {_memory.memory}"
system_message_content += "\n</memories_from_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be updated in this conversation. "
"You should always prefer information from this conversation over the past memories.\n\n"
)
else:
system_message_content += (
"You have the capability to retain memories from previous interactions with the user, "
"but have not had any interactions with the user yet.\n"
"If the user asks about previous memories, you can let them know that you dont have any memory about the user because you haven't had any interactions yet.\n\n"
)
system_message_content += (
"You can add new memories using the `update_memory` tool.\n"
"If you use the `update_memory` tool, remember to pass on the response to the user.\n\n"
)
# 3.3.12 Then add a summary of the interaction to the system prompt
if self.memory.create_session_summary:
if self.memory.summary is not None:
system_message_content += "Here is a brief summary of your previous interactions if it helps:\n\n"
system_message_content += "<summary_of_previous_interactions>\n"
system_message_content += str(self.memory.summary)
system_message_content += "\n</summary_of_previous_interactions>\n\n"
if self.memory:
if self.memory.create_user_memories:
if self.memory.memories and len(self.memory.memories) > 0:
system_message_content += (
"You have access to memories from previous interactions with the user that you can use:\n\n"
)
system_message_content += "<memories_from_previous_interactions>"
for _memory in self.memory.memories:
system_message_content += f"\n- {_memory.memory}"
system_message_content += "\n</memories_from_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be updated in this conversation. "
"You should always prefer information from this conversation over the past memories.\n\n"
)
else:
system_message_content += (
"You have the capability to retain memories from previous interactions with the user, "
"but have not had any interactions with the user yet.\n"
"If the user asks about previous memories, you can let them know that you dont have any memory about the user because you haven't had any interactions yet.\n\n"
)
system_message_content += (
"Note: this information is from previous interactions and may be outdated. "
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
"You can add new memories using the `update_memory` tool.\n"
"If you use the `update_memory` tool, remember to pass on the response to the user.\n\n"
)
# 3.3.12 Then add a summary of the interaction to the system prompt
if self.memory.create_session_summary:
if self.memory.summary is not None:
system_message_content += "Here is a brief summary of your previous interactions if it helps:\n\n"
system_message_content += "<summary_of_previous_interactions>\n"
system_message_content += str(self.memory.summary)
system_message_content += "\n</summary_of_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be outdated. "
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
)

# Add the JSON output prompt if response_model is provided and structured_outputs is False
if self.response_model is not None and not self.structured_outputs:
Expand Down
4 changes: 2 additions & 2 deletions libs/agno/agno/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -768,7 +768,7 @@ def run_function_calls(
)
yield ModelResponse(
content=f"{fc.get_call_str()} completed in {function_call_timer.elapsed:.4f}s.",
tool_calls=[function_call_result.to_fc_result()],
tool_calls=[function_call_result.to_function_call_dict()],
event=ModelResponseEvent.tool_call_completed.value,
)

Expand Down Expand Up @@ -871,7 +871,7 @@ async def arun_function_calls(self, function_calls: List[FunctionCall], function
)
yield ModelResponse(
content=f"{fc.get_call_str()} completed in {function_call_timer.elapsed:.4f}s.",
tool_calls=[function_call_result.to_fc_result()],
tool_calls=[function_call_result.to_function_call_dict()],
event=ModelResponseEvent.tool_call_completed.value,
)

Expand Down
20 changes: 18 additions & 2 deletions libs/agno/agno/models/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,23 @@ def serialize_for_model(self) -> Dict[str, Any]:

def to_dict(self) -> Dict[str, Any]:
"""Returns the message as a dictionary."""
message_dict = self.model_dump(exclude_none=True)
message_dict = {
"content": self.content,
"reasoning_content": self.reasoning_content,
"from_history": self.from_history,
"stop_after_tool_call": self.stop_after_tool_call,
"role": self.role,
"name": self.name,
"tool_call_id": self.tool_call_id,
"tool_name": self.tool_name,
"tool_args": self.tool_args,
"tool_call_error": self.tool_call_error,
"tool_calls": self.tool_calls,
}
# Filter out None and empty collections
message_dict = {
k: v for k, v in message_dict.items() if v is not None and not (isinstance(v, (list, dict)) and len(v) == 0)
}

# Convert media objects to dictionaries
if self.images:
Expand All @@ -228,7 +244,7 @@ def to_dict(self) -> Dict[str, Any]:
message_dict["created_at"] = self.created_at
return message_dict

def to_fc_result(self) -> Dict[str, Any]:
def to_function_call_dict(self) -> Dict[str, Any]:
return {
"content": self.content,
"tool_call_id": self.tool_call_id,
Expand Down
1 change: 1 addition & 0 deletions libs/agno/agno/models/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,4 @@ class ModelResponse:
class FileType(str, Enum):
MP4 = "mp4"
GIF = "gif"
MP3 = "mp3"
Loading

0 comments on commit fd89fbf

Please sign in to comment.