diff --git a/config/feature_flags.yaml b/config/feature_flags.yaml
index 8db3b18e..28cb27b5 100644
--- a/config/feature_flags.yaml
+++ b/config/feature_flags.yaml
@@ -44,7 +44,7 @@ flags:
# Agent Zero - AI agent orchestration framework
agent_zero:
- enabled: false
+ enabled: true
description: "Agent Zero - Autonomous AI agents for task automation"
type: release
diff --git a/ushadow/backend/main.py b/ushadow/backend/main.py
index bbab13e7..b175254d 100644
--- a/ushadow/backend/main.py
+++ b/ushadow/backend/main.py
@@ -21,12 +21,13 @@
from src.routers import health, wizard, chronicle, auth, feature_flags
from src.routers import services, deployments, providers, instances, chat
-from src.routers import kubernetes, tailscale, unodes, docker
+from src.routers import kubernetes, tailscale, unodes, docker, agent_zero
from src.routers import settings as settings_api
from src.middleware import setup_middleware
from src.services.unode_manager import init_unode_manager, get_unode_manager
from src.services.deployment_manager import init_deployment_manager
from src.services.kubernetes_manager import init_kubernetes_manager
+from src.services.agent_zero import init_agent_zero_service
from src.services.feature_flags import create_feature_flag_service, set_feature_flag_service
from src.services.mcp_server import setup_mcp_server
from src.config.omegaconf_settings import get_settings_store
@@ -141,6 +142,10 @@ def send_telemetry():
await init_kubernetes_manager(db)
logger.info("✓ Kubernetes manager initialized")
+ # Initialize Agent Zero service
+ await init_agent_zero_service(db)
+ logger.info("✓ Agent Zero service initialized")
+
# Start background task for stale u-node checking
stale_check_task = asyncio.create_task(check_stale_unodes_task())
@@ -178,6 +183,7 @@ def send_telemetry():
app.include_router(providers.router, prefix="/api/providers", tags=["providers"])
app.include_router(instances.router, tags=["instances"])
app.include_router(chat.router, prefix="/api/chat", tags=["chat"])
+app.include_router(agent_zero.router, prefix="/api/agent-zero", tags=["agent-zero"])
app.include_router(deployments.router, tags=["deployments"])
app.include_router(tailscale.router, tags=["tailscale"])
diff --git a/ushadow/backend/src/models/agent.py b/ushadow/backend/src/models/agent.py
new file mode 100644
index 00000000..dd41320a
--- /dev/null
+++ b/ushadow/backend/src/models/agent.py
@@ -0,0 +1,177 @@
+"""
+Agent models for Agent Zero - autonomous AI agents for task automation.
+
+Agents are created from natural language descriptions in chat and can be
+triggered based on conversation context.
+"""
+
+from datetime import datetime
+from enum import Enum
+from typing import Dict, List, Optional, Any
+
+from pydantic import BaseModel, Field
+
+
+class AgentStatus(str, Enum):
+ """Status of an agent."""
+ ACTIVE = "active" # Agent is active and will respond to triggers
+ INACTIVE = "inactive" # Agent is paused/disabled
+ DRAFT = "draft" # Agent is being configured
+
+
+class AgentTrigger(BaseModel):
+ """
+ Defines when an agent should be activated.
+
+ Agents can be triggered by:
+ - Keywords/phrases in the conversation
+ - Explicit invocation by name
+ - Context matching (semantic similarity)
+ """
+ type: str = Field(
+ default="keyword",
+ description="Trigger type: keyword, context, explicit"
+ )
+ keywords: List[str] = Field(
+ default_factory=list,
+ description="Keywords/phrases that trigger the agent"
+ )
+ context_description: Optional[str] = Field(
+ default=None,
+ description="Description of context when agent should activate"
+ )
+ threshold: float = Field(
+ default=0.7,
+ description="Similarity threshold for context matching (0-1)"
+ )
+
+
+class AgentOutput(BaseModel):
+ """
+ Defines how an agent should structure its output.
+ """
+ format: str = Field(
+ default="markdown",
+ description="Output format: markdown, json, plain"
+ )
+ sections: List[str] = Field(
+ default_factory=list,
+ description="Required sections in the output"
+ )
+ include_sources: bool = Field(
+ default=False,
+ description="Whether to include sources/citations"
+ )
+
+
+class Agent(BaseModel):
+ """
+ An autonomous AI agent for task automation.
+
+ Agents are created from natural language descriptions and can be
+ triggered based on conversation context to perform specific tasks.
+ """
+ id: str = Field(..., description="Unique agent ID")
+ name: str = Field(..., description="Agent display name")
+ description: str = Field(..., description="What the agent does")
+
+ # What triggers this agent
+ trigger: AgentTrigger = Field(
+ default_factory=AgentTrigger,
+ description="When this agent should activate"
+ )
+
+ # What the agent does
+ system_prompt: str = Field(
+ default="",
+ description="System prompt for the agent's LLM"
+ )
+ instructions: str = Field(
+ default="",
+ description="Detailed instructions for the agent"
+ )
+
+ # Output configuration
+ output: AgentOutput = Field(
+ default_factory=AgentOutput,
+ description="How the agent should format output"
+ )
+
+ # Status
+ status: AgentStatus = Field(
+ default=AgentStatus.ACTIVE,
+ description="Current agent status"
+ )
+
+ # Metadata
+ created_at: Optional[datetime] = None
+ updated_at: Optional[datetime] = None
+ created_by: Optional[str] = None
+ last_used_at: Optional[datetime] = None
+ use_count: int = Field(default=0, description="Number of times agent was invoked")
+
+ # Tags for organization
+ tags: List[str] = Field(default_factory=list)
+ metadata: Dict[str, Any] = Field(default_factory=dict)
+
+ class Config:
+ use_enum_values = True
+
+
+class AgentCreate(BaseModel):
+ """Request to create a new agent."""
+ name: str = Field(..., min_length=1, max_length=100)
+ description: str = Field(..., min_length=1, max_length=500)
+ trigger: Optional[AgentTrigger] = None
+ system_prompt: Optional[str] = None
+ instructions: Optional[str] = None
+ output: Optional[AgentOutput] = None
+ tags: List[str] = Field(default_factory=list)
+
+
+class AgentUpdate(BaseModel):
+ """Request to update an agent."""
+ name: Optional[str] = None
+ description: Optional[str] = None
+ trigger: Optional[AgentTrigger] = None
+ system_prompt: Optional[str] = None
+ instructions: Optional[str] = None
+ output: Optional[AgentOutput] = None
+ status: Optional[AgentStatus] = None
+ tags: Optional[List[str]] = None
+
+
+class AgentFromChat(BaseModel):
+ """
+ Request to create an agent from natural language in chat.
+
+ The LLM will parse this to extract agent configuration.
+ """
+ user_request: str = Field(..., description="The user's natural language request")
+ conversation_context: Optional[List[Dict[str, str]]] = Field(
+ default=None,
+ description="Previous conversation messages for context"
+ )
+
+
+class AgentInvocation(BaseModel):
+ """
+ Record of an agent being invoked.
+ """
+ id: str = Field(..., description="Unique invocation ID")
+ agent_id: str = Field(..., description="The agent that was invoked")
+ trigger_type: str = Field(..., description="How the agent was triggered")
+ input_context: str = Field(..., description="The input that triggered the agent")
+ output: str = Field(..., description="The agent's response")
+ created_at: datetime = Field(default_factory=datetime.utcnow)
+ user_id: Optional[str] = None
+
+
+class AgentExecuteRequest(BaseModel):
+ """Request to execute an agent with specific input."""
+ agent_id: str = Field(..., description="Agent to execute")
+ input_text: str = Field(..., description="Input for the agent")
+ additional_context: Optional[Dict[str, Any]] = Field(
+ default=None,
+ description="Additional context to provide to the agent"
+ )
diff --git a/ushadow/backend/src/routers/agent_zero.py b/ushadow/backend/src/routers/agent_zero.py
new file mode 100644
index 00000000..efff9132
--- /dev/null
+++ b/ushadow/backend/src/routers/agent_zero.py
@@ -0,0 +1,227 @@
+"""
+Agent Zero Router - API endpoints for autonomous AI agents.
+
+Provides endpoints for:
+- Agent CRUD operations
+- Creating agents from natural language
+- Executing agents
+- Agent status and monitoring
+"""
+
+import logging
+from typing import List, Optional, Dict, Any
+
+from fastapi import APIRouter, HTTPException
+
+from src.models.agent import (
+ Agent,
+ AgentCreate,
+ AgentUpdate,
+ AgentStatus,
+ AgentFromChat,
+ AgentExecuteRequest,
+)
+from src.services.agent_zero import get_agent_zero_service
+
+logger = logging.getLogger(__name__)
+router = APIRouter()
+
+
+# =============================================================================
+# Status
+# =============================================================================
+
+@router.get("/status")
+async def get_status() -> Dict[str, Any]:
+ """Get Agent Zero service status."""
+ service = get_agent_zero_service()
+ if not service:
+ return {
+ "connected": False,
+ "error": "Agent Zero service not initialized",
+ }
+
+ return await service.get_status()
+
+
+# =============================================================================
+# Agent CRUD
+# =============================================================================
+
+@router.get("/agents", response_model=List[Agent])
+async def list_agents(
+ status: Optional[str] = None,
+) -> List[Agent]:
+ """List all agents, optionally filtered by status."""
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ agent_status = None
+ if status:
+ try:
+ agent_status = AgentStatus(status)
+ except ValueError:
+ raise HTTPException(status_code=400, detail=f"Invalid status: {status}")
+
+ return await service.list_agents(status=agent_status)
+
+
+@router.get("/agents/{agent_id}", response_model=Agent)
+async def get_agent(agent_id: str) -> Agent:
+ """Get an agent by ID."""
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ agent = await service.get_agent(agent_id)
+ if not agent:
+ raise HTTPException(status_code=404, detail=f"Agent not found: {agent_id}")
+
+ return agent
+
+
+@router.post("/agents", response_model=Agent)
+async def create_agent(data: AgentCreate) -> Agent:
+ """Create a new agent."""
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ return await service.create_agent(data)
+
+
+@router.put("/agents/{agent_id}", response_model=Agent)
+async def update_agent(agent_id: str, data: AgentUpdate) -> Agent:
+ """Update an agent."""
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ agent = await service.update_agent(agent_id, data)
+ if not agent:
+ raise HTTPException(status_code=404, detail=f"Agent not found: {agent_id}")
+
+ return agent
+
+
+@router.delete("/agents/{agent_id}")
+async def delete_agent(agent_id: str) -> Dict[str, Any]:
+ """Delete an agent."""
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ deleted = await service.delete_agent(agent_id)
+ if not deleted:
+ raise HTTPException(status_code=404, detail=f"Agent not found: {agent_id}")
+
+ return {"success": True, "message": f"Agent {agent_id} deleted"}
+
+
+# =============================================================================
+# Natural Language Agent Creation
+# =============================================================================
+
+@router.post("/agents/from-chat")
+async def create_agent_from_chat(data: AgentFromChat) -> Dict[str, Any]:
+ """
+ Create an agent from a natural language description.
+
+ The LLM will parse the user's request and create an appropriate agent.
+ """
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ agent, message = await service.create_agent_from_chat(
+ data.user_request,
+ data.conversation_context,
+ )
+
+ if agent:
+ return {
+ "success": True,
+ "agent": agent.model_dump(),
+ "message": message,
+ }
+ else:
+ return {
+ "success": False,
+ "agent": None,
+ "message": message,
+ }
+
+
+# =============================================================================
+# Agent Execution
+# =============================================================================
+
+@router.post("/agents/{agent_id}/execute")
+async def execute_agent(agent_id: str, data: AgentExecuteRequest) -> Dict[str, Any]:
+ """Execute an agent with the given input."""
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ output, invocation = await service.execute_agent(
+ agent_id,
+ data.input_text,
+ data.additional_context,
+ )
+
+ return {
+ "success": invocation is not None,
+ "output": output,
+ "invocation_id": invocation.id if invocation else None,
+ }
+
+
+@router.post("/process-message")
+async def process_chat_message(
+ message: str,
+ conversation_context: Optional[List[Dict[str, str]]] = None,
+) -> Dict[str, Any]:
+ """
+ Process a chat message to detect agent creation requests or triggers.
+
+ This endpoint is used by the chat system to integrate agent functionality.
+ """
+ service = get_agent_zero_service()
+ if not service:
+ return {"action": None}
+
+ result = await service.process_chat_message(message, conversation_context)
+ return {"action": result}
+
+
+# =============================================================================
+# Agent Activation
+# =============================================================================
+
+@router.post("/agents/{agent_id}/activate")
+async def activate_agent(agent_id: str) -> Dict[str, Any]:
+ """Activate an agent (set status to active)."""
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ agent = await service.update_agent(agent_id, AgentUpdate(status=AgentStatus.ACTIVE))
+ if not agent:
+ raise HTTPException(status_code=404, detail=f"Agent not found: {agent_id}")
+
+ return {"success": True, "agent": agent.model_dump()}
+
+
+@router.post("/agents/{agent_id}/deactivate")
+async def deactivate_agent(agent_id: str) -> Dict[str, Any]:
+ """Deactivate an agent (set status to inactive)."""
+ service = get_agent_zero_service()
+ if not service:
+ raise HTTPException(status_code=503, detail="Agent Zero service not available")
+
+ agent = await service.update_agent(agent_id, AgentUpdate(status=AgentStatus.INACTIVE))
+ if not agent:
+ raise HTTPException(status_code=404, detail=f"Agent not found: {agent_id}")
+
+ return {"success": True, "agent": agent.model_dump()}
diff --git a/ushadow/backend/src/routers/chat.py b/ushadow/backend/src/routers/chat.py
index 4937d34c..b70fc6d9 100644
--- a/ushadow/backend/src/routers/chat.py
+++ b/ushadow/backend/src/routers/chat.py
@@ -5,6 +5,7 @@
- Uses the selected LLM provider via LiteLLM
- Optionally enriches context with OpenMemory
- Streams responses using Server-Sent Events (SSE)
+- Integrates with Agent Zero for autonomous agents
The streaming format is compatible with assistant-ui's data stream protocol.
"""
@@ -20,6 +21,7 @@
from pydantic import BaseModel
from src.services.llm_client import get_llm_client
+from src.services.agent_zero import get_agent_zero_service
from src.config.omegaconf_settings import get_settings_store
logger = logging.getLogger(__name__)
@@ -42,6 +44,7 @@ class ChatRequest(BaseModel):
messages: List[ChatMessage]
system: Optional[str] = None # System prompt
use_memory: bool = True # Whether to fetch context from OpenMemory
+ use_agents: bool = True # Whether to check for Agent Zero triggers
user_id: Optional[str] = None # User ID for memory lookup
temperature: Optional[float] = None
max_tokens: Optional[int] = None
@@ -53,6 +56,8 @@ class ChatStatus(BaseModel):
provider: Optional[str] = None
model: Optional[str] = None
memory_available: bool = False
+ agents_available: bool = False
+ active_agents: int = 0
error: Optional[str] = None
@@ -125,6 +130,49 @@ async def check_memory_available() -> bool:
return False
+async def check_agents_available() -> tuple[bool, int]:
+ """Check if Agent Zero service is available and get active agent count."""
+ service = get_agent_zero_service()
+ if not service:
+ return False, 0
+
+ try:
+ status = await service.get_status()
+ return status.get("connected", False), status.get("active_agents", 0)
+ except Exception:
+ return False, 0
+
+
+# =============================================================================
+# Agent Zero Integration
+# =============================================================================
+
+async def process_agent_action(
+ user_message: str,
+ conversation_context: Optional[List[Dict[str, str]]] = None,
+ user_id: Optional[str] = None,
+) -> Optional[Dict[str, Any]]:
+ """
+ Check if the user message should trigger an agent action.
+
+ Returns:
+ Dict with agent action info, or None if no action needed.
+ """
+ service = get_agent_zero_service()
+ if not service:
+ return None
+
+ try:
+ return await service.process_chat_message(
+ user_message,
+ conversation_context,
+ user_id,
+ )
+ except Exception as e:
+ logger.warning(f"Agent processing error: {e}")
+ return None
+
+
# =============================================================================
# Streaming Helpers
# =============================================================================
@@ -148,6 +196,12 @@ def format_finish_message(finish_reason: str = "stop") -> str:
return f"d:{json.dumps({'finishReason': finish_reason})}\n"
+def format_agent_event(event_type: str, data: Dict[str, Any]) -> str:
+ """Format an agent event in AI SDK data stream format."""
+ # Use custom event type 'a' for agent events
+ return f"a:{json.dumps({'type': event_type, **data})}\n"
+
+
# =============================================================================
# Endpoints
# =============================================================================
@@ -165,12 +219,15 @@ async def get_chat_status() -> ChatStatus:
config = await llm.get_llm_config()
is_configured = await llm.is_configured()
memory_available = await check_memory_available()
+ agents_available, active_agents = await check_agents_available()
return ChatStatus(
configured=is_configured,
provider=config.get("provider_id"),
model=config.get("model"),
- memory_available=memory_available
+ memory_available=memory_available,
+ agents_available=agents_available,
+ active_agents=active_agents,
)
except Exception as e:
logger.error(f"Error getting chat status: {e}")
@@ -187,6 +244,10 @@ async def chat(request: ChatRequest):
Accepts messages and returns a streaming response compatible with
assistant-ui's data stream protocol.
+
+ Integrates with Agent Zero to:
+ - Detect agent creation requests and create agents
+ - Trigger existing agents based on conversation context
"""
llm = get_llm_client()
@@ -197,6 +258,26 @@ async def chat(request: ChatRequest):
detail="LLM not configured. Please set up an LLM provider in settings."
)
+ # Get the last user message for agent processing
+ last_user_message = next(
+ (m.content for m in reversed(request.messages) if m.role == "user"),
+ None
+ )
+
+ # Check for agent actions if enabled
+ agent_action = None
+ if request.use_agents and last_user_message:
+ # Build conversation context for agent processing
+ conversation_context = [
+ {"role": m.role, "content": m.content}
+ for m in request.messages[:-1] # Exclude the last message
+ ]
+ agent_action = await process_agent_action(
+ last_user_message,
+ conversation_context,
+ request.user_id,
+ )
+
# Build messages list
messages: List[Dict[str, str]] = []
@@ -208,10 +289,6 @@ async def chat(request: ChatRequest):
memory_context = []
if request.use_memory and request.messages:
user_id = request.user_id or "default"
- last_user_message = next(
- (m.content for m in reversed(request.messages) if m.role == "user"),
- None
- )
if last_user_message:
memory_context = await fetch_memory_context(
last_user_message,
@@ -238,6 +315,50 @@ async def chat(request: ChatRequest):
async def generate():
"""Stream response chunks."""
try:
+ # Handle agent actions
+ if agent_action:
+ action_type = agent_action.get("type")
+
+ if action_type == "agent_created":
+ # Emit agent created event
+ yield format_agent_event("agent_created", {
+ "agent": agent_action.get("agent"),
+ })
+ # Stream the confirmation message
+ message = agent_action.get("message", "Agent created!")
+ for chunk in message.split(" "):
+ yield format_text_delta(chunk + " ")
+ yield format_finish_message("stop")
+ return
+
+ elif action_type == "agent_triggered":
+ # Emit agent triggered event
+ yield format_agent_event("agent_triggered", {
+ "agent": agent_action.get("agent"),
+ "confidence": agent_action.get("confidence"),
+ })
+ # Stream the agent's output
+ output = agent_action.get("output", "")
+ # Stream word by word for a natural feel
+ words = output.split(" ")
+ for i, word in enumerate(words):
+ if i < len(words) - 1:
+ yield format_text_delta(word + " ")
+ else:
+ yield format_text_delta(word)
+ yield format_finish_message("stop")
+ return
+
+ elif action_type == "agent_creation_failed":
+ # Stream the error message but continue with normal chat
+ message = agent_action.get("message", "")
+ if message:
+ for chunk in message.split(" "):
+ yield format_text_delta(chunk + " ")
+ yield format_finish_message("stop")
+ return
+
+ # Normal chat flow
async for chunk in llm.stream_completion(
messages=messages,
temperature=request.temperature,
diff --git a/ushadow/backend/src/services/agent_zero.py b/ushadow/backend/src/services/agent_zero.py
new file mode 100644
index 00000000..fc37a3d5
--- /dev/null
+++ b/ushadow/backend/src/services/agent_zero.py
@@ -0,0 +1,555 @@
+"""
+Agent Zero Service - Autonomous AI agents for task automation.
+
+This service manages the lifecycle of agents:
+- Creating agents from natural language descriptions
+- Detecting when agents should be triggered
+- Executing agents with appropriate context
+- Storing agent definitions and invocations
+"""
+
+import json
+import logging
+import re
+import uuid
+from datetime import datetime
+from typing import Dict, List, Optional, Any, Tuple
+
+from motor.motor_asyncio import AsyncIOMotorDatabase
+
+from src.models.agent import (
+ Agent,
+ AgentCreate,
+ AgentUpdate,
+ AgentStatus,
+ AgentTrigger,
+ AgentOutput,
+ AgentInvocation,
+)
+from src.services.llm_client import get_llm_client
+
+logger = logging.getLogger(__name__)
+
+# System prompt for parsing agent creation requests
+AGENT_CREATION_PARSER_PROMPT = """You are an AI assistant that helps create autonomous agents from natural language descriptions.
+
+When a user describes what they want an agent to do, extract the following information and return it as JSON:
+
+{
+ "should_create_agent": true/false, // Is the user actually requesting an agent?
+ "name": "Agent name",
+ "description": "Brief description of what the agent does",
+ "trigger_keywords": ["keyword1", "keyword2"], // Words/phrases that should trigger this agent
+ "trigger_context": "Description of the context when this agent should activate",
+ "system_prompt": "The system prompt for the agent's LLM",
+ "instructions": "Detailed step-by-step instructions for the agent",
+ "output_sections": ["section1", "section2"], // What sections the output should have
+ "output_format": "markdown" // or "json", "plain"
+}
+
+Guidelines:
+- Set should_create_agent to false if the user is just asking a question or not requesting an agent
+- Extract meaningful trigger keywords from phrases like "when I am having...", "whenever...", "during..."
+- Create a clear, specific system prompt that defines the agent's role
+- Break down the task into clear instructions
+- Identify output sections based on what the user wants to see
+
+Example input: "when I am having a book review club then I want a summary of the main plot points of the book and a synopsis of the characters and motivations"
+
+Example output:
+{
+ "should_create_agent": true,
+ "name": "Book Review Club Assistant",
+ "description": "Provides book summaries with plot points, character synopses, and character motivations for book club discussions",
+ "trigger_keywords": ["book review club", "book club", "book discussion"],
+ "trigger_context": "When the user mentions having a book review club or book discussion session",
+ "system_prompt": "You are a literary analysis assistant specializing in book reviews. Your role is to provide comprehensive yet concise summaries that facilitate book club discussions.",
+ "instructions": "1. Identify the book being discussed\\n2. Summarize the main plot points without spoiling key twists\\n3. Create character profiles with their motivations\\n4. Highlight themes for discussion",
+ "output_sections": ["Main Plot Points", "Character Synopsis", "Character Motivations", "Discussion Themes"],
+ "output_format": "markdown"
+}
+
+Return only valid JSON, no other text."""
+
+# System prompt for detecting if an agent should be triggered
+AGENT_TRIGGER_DETECTION_PROMPT = """You are an AI that determines if a user's message should trigger a specific agent.
+
+Given:
+- The user's message
+- An agent's trigger keywords and context description
+
+Respond with JSON:
+{
+ "should_trigger": true/false,
+ "confidence": 0.0-1.0,
+ "reason": "Brief explanation"
+}
+
+Be conservative - only trigger if there's a clear match. Return only valid JSON."""
+
+
+class AgentZeroService:
+ """
+ Service for managing Agent Zero agents.
+
+ Provides CRUD operations for agents, natural language agent creation,
+ trigger detection, and agent execution.
+ """
+
+ def __init__(self, db: AsyncIOMotorDatabase):
+ self.db = db
+ self.agents_collection = db["agents"]
+ self.invocations_collection = db["agent_invocations"]
+ self._llm = get_llm_client()
+
+ # =========================================================================
+ # CRUD Operations
+ # =========================================================================
+
+ async def create_agent(self, data: AgentCreate, user_id: Optional[str] = None) -> Agent:
+ """Create a new agent from structured data."""
+ agent_id = str(uuid.uuid4())[:8]
+ now = datetime.utcnow()
+
+ agent = Agent(
+ id=agent_id,
+ name=data.name,
+ description=data.description,
+ trigger=data.trigger or AgentTrigger(),
+ system_prompt=data.system_prompt or "",
+ instructions=data.instructions or "",
+ output=data.output or AgentOutput(),
+ status=AgentStatus.ACTIVE,
+ created_at=now,
+ updated_at=now,
+ created_by=user_id,
+ tags=data.tags,
+ )
+
+ await self.agents_collection.insert_one(agent.model_dump())
+ logger.info(f"Created agent: {agent.name} (id={agent_id})")
+ return agent
+
+ async def get_agent(self, agent_id: str) -> Optional[Agent]:
+ """Get an agent by ID."""
+ doc = await self.agents_collection.find_one({"id": agent_id})
+ return Agent(**doc) if doc else None
+
+ async def list_agents(
+ self,
+ status: Optional[AgentStatus] = None,
+ user_id: Optional[str] = None,
+ ) -> List[Agent]:
+ """List all agents, optionally filtered by status or user."""
+ query: Dict[str, Any] = {}
+ if status:
+ query["status"] = status.value if isinstance(status, AgentStatus) else status
+ if user_id:
+ query["created_by"] = user_id
+
+ cursor = self.agents_collection.find(query).sort("created_at", -1)
+ docs = await cursor.to_list(length=100)
+ return [Agent(**doc) for doc in docs]
+
+ async def update_agent(self, agent_id: str, data: AgentUpdate) -> Optional[Agent]:
+ """Update an agent."""
+ update_data = data.model_dump(exclude_unset=True)
+ if not update_data:
+ return await self.get_agent(agent_id)
+
+ update_data["updated_at"] = datetime.utcnow()
+
+ result = await self.agents_collection.update_one(
+ {"id": agent_id},
+ {"$set": update_data}
+ )
+
+ if result.modified_count == 0:
+ return None
+
+ return await self.get_agent(agent_id)
+
+ async def delete_agent(self, agent_id: str) -> bool:
+ """Delete an agent."""
+ result = await self.agents_collection.delete_one({"id": agent_id})
+ return result.deleted_count > 0
+
+ # =========================================================================
+ # Natural Language Agent Creation
+ # =========================================================================
+
+ async def create_agent_from_chat(
+ self,
+ user_request: str,
+ conversation_context: Optional[List[Dict[str, str]]] = None,
+ user_id: Optional[str] = None,
+ ) -> Tuple[Optional[Agent], str]:
+ """
+ Create an agent from a natural language description.
+
+ Returns:
+ Tuple of (Agent if created, explanation message)
+ """
+ # Build messages for the LLM
+ messages = [
+ {"role": "system", "content": AGENT_CREATION_PARSER_PROMPT},
+ ]
+
+ # Add conversation context if provided
+ if conversation_context:
+ context_str = "\n".join(
+ f"{m['role']}: {m['content']}" for m in conversation_context[-5:]
+ )
+ messages.append({
+ "role": "user",
+ "content": f"Previous conversation context:\n{context_str}\n\nUser's agent request:\n{user_request}"
+ })
+ else:
+ messages.append({"role": "user", "content": user_request})
+
+ try:
+ # Get LLM response
+ response = await self._llm.completion(
+ messages=messages,
+ temperature=0.3, # Lower temperature for more consistent parsing
+ max_tokens=1000,
+ )
+
+ content = response.choices[0].message.content.strip()
+
+ # Parse JSON from response
+ # Try to extract JSON if it's wrapped in markdown code blocks
+ json_match = re.search(r'```(?:json)?\s*([\s\S]*?)\s*```', content)
+ if json_match:
+ content = json_match.group(1)
+
+ parsed = json.loads(content)
+
+ if not parsed.get("should_create_agent", False):
+ return None, "I didn't detect a request to create an agent. Could you describe what you'd like the agent to do?"
+
+ # Create the agent
+ agent_data = AgentCreate(
+ name=parsed.get("name", "Unnamed Agent"),
+ description=parsed.get("description", user_request[:200]),
+ trigger=AgentTrigger(
+ type="keyword",
+ keywords=parsed.get("trigger_keywords", []),
+ context_description=parsed.get("trigger_context"),
+ ),
+ system_prompt=parsed.get("system_prompt", ""),
+ instructions=parsed.get("instructions", ""),
+ output=AgentOutput(
+ format=parsed.get("output_format", "markdown"),
+ sections=parsed.get("output_sections", []),
+ ),
+ )
+
+ agent = await self.create_agent(agent_data, user_id)
+
+ # Build confirmation message
+ trigger_info = ""
+ if agent.trigger.keywords:
+ trigger_info = f" It will activate when you mention: {', '.join(agent.trigger.keywords)}."
+
+ return agent, (
+ f"I've created the **{agent.name}** agent for you.{trigger_info}\n\n"
+ f"**What it does:** {agent.description}\n\n"
+ f"**Output sections:** {', '.join(agent.output.sections) if agent.output.sections else 'Free-form response'}"
+ )
+
+ except json.JSONDecodeError as e:
+ logger.error(f"Failed to parse agent creation response: {e}")
+ return None, "I had trouble understanding your request. Could you describe the agent you want in more detail?"
+ except Exception as e:
+ logger.error(f"Error creating agent from chat: {e}")
+ return None, f"Sorry, I encountered an error creating the agent: {str(e)}"
+
+ # =========================================================================
+ # Trigger Detection
+ # =========================================================================
+
+ async def detect_triggered_agents(
+ self,
+ user_message: str,
+ conversation_context: Optional[List[Dict[str, str]]] = None,
+ ) -> List[Tuple[Agent, float]]:
+ """
+ Detect which agents should be triggered by a user message.
+
+ Returns:
+ List of (Agent, confidence) tuples, sorted by confidence descending.
+ """
+ # Get all active agents
+ agents = await self.list_agents(status=AgentStatus.ACTIVE)
+ if not agents:
+ return []
+
+ triggered: List[Tuple[Agent, float]] = []
+
+ # Check each agent
+ for agent in agents:
+ # First, quick keyword check
+ message_lower = user_message.lower()
+ keyword_match = any(
+ kw.lower() in message_lower
+ for kw in agent.trigger.keywords
+ )
+
+ if keyword_match:
+ # Keyword match - high confidence
+ triggered.append((agent, 0.9))
+ continue
+
+ # If no keyword match but agent has context description,
+ # use LLM for semantic matching
+ if agent.trigger.context_description:
+ confidence = await self._check_context_trigger(
+ user_message,
+ agent,
+ conversation_context,
+ )
+ if confidence >= agent.trigger.threshold:
+ triggered.append((agent, confidence))
+
+ # Sort by confidence
+ triggered.sort(key=lambda x: x[1], reverse=True)
+ return triggered
+
+ async def _check_context_trigger(
+ self,
+ user_message: str,
+ agent: Agent,
+ conversation_context: Optional[List[Dict[str, str]]] = None,
+ ) -> float:
+ """Check if a message semantically matches an agent's trigger context."""
+ messages = [
+ {"role": "system", "content": AGENT_TRIGGER_DETECTION_PROMPT},
+ {
+ "role": "user",
+ "content": f"""User message: "{user_message}"
+
+Agent trigger keywords: {agent.trigger.keywords}
+Agent trigger context: {agent.trigger.context_description}
+
+Should this agent be triggered?"""
+ }
+ ]
+
+ try:
+ response = await self._llm.completion(
+ messages=messages,
+ temperature=0.1,
+ max_tokens=200,
+ )
+
+ content = response.choices[0].message.content.strip()
+ json_match = re.search(r'```(?:json)?\s*([\s\S]*?)\s*```', content)
+ if json_match:
+ content = json_match.group(1)
+
+ parsed = json.loads(content)
+ if parsed.get("should_trigger", False):
+ return parsed.get("confidence", 0.5)
+ return 0.0
+
+ except Exception as e:
+ logger.warning(f"Error checking trigger context: {e}")
+ return 0.0
+
+ # =========================================================================
+ # Agent Execution
+ # =========================================================================
+
+ async def execute_agent(
+ self,
+ agent_id: str,
+ input_text: str,
+ additional_context: Optional[Dict[str, Any]] = None,
+ user_id: Optional[str] = None,
+ ) -> Tuple[str, Optional[AgentInvocation]]:
+ """
+ Execute an agent with the given input.
+
+ Returns:
+ Tuple of (response text, invocation record)
+ """
+ agent = await self.get_agent(agent_id)
+ if not agent:
+ return "Agent not found.", None
+
+ if agent.status != AgentStatus.ACTIVE:
+ return f"Agent '{agent.name}' is not active.", None
+
+ # Build the agent's prompt
+ system_content = agent.system_prompt or f"You are an AI assistant named '{agent.name}'."
+
+ if agent.instructions:
+ system_content += f"\n\nInstructions:\n{agent.instructions}"
+
+ if agent.output.sections:
+ system_content += f"\n\nYour response should include the following sections: {', '.join(agent.output.sections)}"
+
+ if agent.output.format == "json":
+ system_content += "\n\nRespond with valid JSON."
+ elif agent.output.format == "markdown":
+ system_content += "\n\nUse markdown formatting in your response."
+
+ messages = [
+ {"role": "system", "content": system_content},
+ {"role": "user", "content": input_text},
+ ]
+
+ # Add additional context if provided
+ if additional_context:
+ context_str = "\n".join(f"- {k}: {v}" for k, v in additional_context.items())
+ messages[0]["content"] += f"\n\nAdditional context:\n{context_str}"
+
+ try:
+ response = await self._llm.completion(
+ messages=messages,
+ temperature=0.7,
+ max_tokens=2000,
+ )
+
+ output = response.choices[0].message.content
+
+ # Record the invocation
+ invocation = AgentInvocation(
+ id=str(uuid.uuid4())[:8],
+ agent_id=agent_id,
+ trigger_type="explicit",
+ input_context=input_text[:500],
+ output=output[:2000],
+ user_id=user_id,
+ )
+
+ await self.invocations_collection.insert_one(invocation.model_dump())
+
+ # Update agent usage stats
+ await self.agents_collection.update_one(
+ {"id": agent_id},
+ {
+ "$set": {"last_used_at": datetime.utcnow()},
+ "$inc": {"use_count": 1}
+ }
+ )
+
+ return output, invocation
+
+ except Exception as e:
+ logger.error(f"Error executing agent {agent_id}: {e}")
+ return f"Error executing agent: {str(e)}", None
+
+ # =========================================================================
+ # Chat Integration
+ # =========================================================================
+
+ async def process_chat_message(
+ self,
+ user_message: str,
+ conversation_context: Optional[List[Dict[str, str]]] = None,
+ user_id: Optional[str] = None,
+ ) -> Optional[Dict[str, Any]]:
+ """
+ Process a chat message to check for agent creation requests or triggers.
+
+ Returns:
+ Dict with 'type' ('agent_created' or 'agent_triggered') and relevant data,
+ or None if no agent action needed.
+ """
+ message_lower = user_message.lower()
+
+ # Check for agent creation intent
+ creation_phrases = [
+ "create an agent",
+ "make an agent",
+ "i want an agent",
+ "when i am",
+ "whenever i",
+ "when i'm",
+ "i want a",
+ "create a helper",
+ "make a helper",
+ "add an agent",
+ ]
+
+ if any(phrase in message_lower for phrase in creation_phrases):
+ agent, message = await self.create_agent_from_chat(
+ user_message,
+ conversation_context,
+ user_id,
+ )
+ if agent:
+ return {
+ "type": "agent_created",
+ "agent": agent.model_dump(),
+ "message": message,
+ }
+ else:
+ return {
+ "type": "agent_creation_failed",
+ "message": message,
+ }
+
+ # Check for triggered agents
+ triggered = await self.detect_triggered_agents(
+ user_message,
+ conversation_context,
+ )
+
+ if triggered:
+ # Execute the highest confidence agent
+ agent, confidence = triggered[0]
+ output, invocation = await self.execute_agent(
+ agent.id,
+ user_message,
+ user_id=user_id,
+ )
+
+ return {
+ "type": "agent_triggered",
+ "agent": agent.model_dump(),
+ "confidence": confidence,
+ "output": output,
+ "invocation_id": invocation.id if invocation else None,
+ }
+
+ return None
+
+ async def get_status(self) -> Dict[str, Any]:
+ """Get Agent Zero service status."""
+ agent_count = await self.agents_collection.count_documents({})
+ active_count = await self.agents_collection.count_documents(
+ {"status": AgentStatus.ACTIVE.value}
+ )
+
+ return {
+ "connected": True,
+ "agent_count": agent_count,
+ "active_agents": active_count,
+ }
+
+
+# Global service instance
+_agent_service: Optional[AgentZeroService] = None
+
+
+async def init_agent_zero_service(db: AsyncIOMotorDatabase) -> AgentZeroService:
+ """Initialize the Agent Zero service with database connection."""
+ global _agent_service
+ _agent_service = AgentZeroService(db)
+
+ # Verify the service is working
+ try:
+ status = await _agent_service.get_status()
+ logger.info(f"Agent Zero service initialized - {status.get('agent_count', 0)} agents, {status.get('active_agents', 0)} active")
+ except Exception as e:
+ logger.warning(f"Agent Zero service initialized but status check failed: {e}")
+
+ return _agent_service
+
+
+def get_agent_zero_service() -> Optional[AgentZeroService]:
+ """Get the Agent Zero service instance."""
+ return _agent_service
diff --git a/ushadow/frontend/src/pages/AgentZeroPage.tsx b/ushadow/frontend/src/pages/AgentZeroPage.tsx
index 232cfe1f..ad2807c7 100644
--- a/ushadow/frontend/src/pages/AgentZeroPage.tsx
+++ b/ushadow/frontend/src/pages/AgentZeroPage.tsx
@@ -1,8 +1,86 @@
-import { Bot, Plus, Cpu } from 'lucide-react'
+import { useState, useEffect } from 'react'
+import { Bot, Plus, Cpu, Play, Pause, Trash2, MessageSquare, RefreshCw, Clock, Zap } from 'lucide-react'
+import { Link } from 'react-router-dom'
import { useTheme } from '../contexts/ThemeContext'
+import { agentZeroApi } from '../services/api'
+import type { Agent, AgentZeroStatus } from '../services/api'
export default function AgentZeroPage() {
const { isDark } = useTheme()
+ const [status, setStatus] = useState
+ {status.agent_count}
+
+ {status.active_agents}
+
- Create and deploy autonomous agents to handle complex tasks and workflows.
-
+ {agent.description}
+
- Automate complex multi-step tasks with intelligent agents
+ Describe what you want in chat and agents are created automatically
- Agents maintain context across conversations and sessions
+ Agents automatically activate when relevant keywords appear in your chat
- Connect agents with MCP servers and external tools
+ Agents produce organized responses with customizable output sections
- Track agent performance and task completion
+ Monitor how often each agent is used and when it was last active
- Autonomous agent orchestration and management
+ Autonomous agents that activate based on your conversations
- No Active Agents
-
-
+ Your Agents
+
+ {agents.map(agent => (
+
+ {agent.name}
+
+
+ {agent.status}
+
+