From 0466d710a9c92fb1eb503094f305a5b3767c2322 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Tue, 21 Oct 2025 18:01:58 +0800
Subject: [PATCH 01/30] feat: add schedule configuration for recurring tasks
and related functionality
---
python/valuecell/core/coordinate/models.py | 5 +-
python/valuecell/core/coordinate/planner.py | 5 ++
.../core/coordinate/planner_prompts.py | 65 ++++++++++++++++++-
python/valuecell/core/coordinate/temporal.py | 49 ++++++++++++++
python/valuecell/core/task/models.py | 14 ++++
5 files changed, 135 insertions(+), 3 deletions(-)
create mode 100644 python/valuecell/core/coordinate/temporal.py
diff --git a/python/valuecell/core/coordinate/models.py b/python/valuecell/core/coordinate/models.py
index 6ff3269f8..6cff4792b 100644
--- a/python/valuecell/core/coordinate/models.py
+++ b/python/valuecell/core/coordinate/models.py
@@ -3,7 +3,7 @@
from pydantic import BaseModel, Field
from valuecell.core.task import Task
-from valuecell.core.task.models import TaskPattern
+from valuecell.core.task.models import ScheduleConfig, TaskPattern
class ExecutionPlan(BaseModel):
@@ -39,6 +39,9 @@ class _TaskBrief(BaseModel):
pattern: TaskPattern = Field(
default=TaskPattern.ONCE, description="Task execution pattern"
)
+ schedule_config: Optional[ScheduleConfig] = Field(
+ None, description="Schedule configuration for recurring tasks"
+ )
class PlannerInput(BaseModel):
diff --git a/python/valuecell/core/coordinate/planner.py b/python/valuecell/core/coordinate/planner.py
index af6015c96..246cb454c 100644
--- a/python/valuecell/core/coordinate/planner.py
+++ b/python/valuecell/core/coordinate/planner.py
@@ -25,6 +25,7 @@
PLANNER_INSTRUCTION,
)
from valuecell.core.task import Task, TaskPattern, TaskStatus
+from valuecell.core.task.models import ScheduleConfig
from valuecell.core.types import UserInput
from valuecell.utils import generate_uuid
from valuecell.utils.env import agent_debug_mode_enabled
@@ -228,6 +229,7 @@ async def _analyze_input_and_create_tasks(
conversation_id=user_input.meta.conversation_id,
thread_id=thread_id,
pattern=task.pattern,
+ schedule_config=task.schedule_config,
handoff_from_super_agent=(not user_input.target_agent_name),
)
for task in plan_raw.tasks
@@ -241,6 +243,7 @@ def _create_task(
conversation_id: str | None = None,
thread_id: str | None = None,
pattern: TaskPattern = TaskPattern.ONCE,
+ schedule_config: Optional[ScheduleConfig] = None,
handoff_from_super_agent: bool = False,
) -> Task:
"""
@@ -252,6 +255,7 @@ def _create_task(
agent_name: Name of the agent to execute the task
query: Query/prompt for the agent
pattern: Execution pattern (once or recurring)
+ schedule_config: Schedule configuration for recurring tasks
Returns:
Task: Configured task ready for execution.
@@ -268,6 +272,7 @@ def _create_task(
status=TaskStatus.PENDING,
query=query,
pattern=pattern,
+ schedule_config=schedule_config,
handoff_from_super_agent=handoff_from_super_agent,
)
diff --git a/python/valuecell/core/coordinate/planner_prompts.py b/python/valuecell/core/coordinate/planner_prompts.py
index f135bf64e..09e40b8a5 100644
--- a/python/valuecell/core/coordinate/planner_prompts.py
+++ b/python/valuecell/core/coordinate/planner_prompts.py
@@ -30,7 +30,13 @@
- If the query suggests recurring monitoring or periodic updates, DO NOT create tasks yet. Return `adequate: false` and ask for confirmation in `reason` (e.g., "Do you want regular updates on this, or a one-time analysis?").
- After explicit confirmation, create a single task with `pattern: recurring` and keep the original query unchanged.
-5) Agent targeting policy
+5) Schedule configuration for recurring tasks
+- If the user specifies a time interval (e.g., "every hour", "every 30 minutes"), set `schedule_config.interval_minutes` accordingly.
+- If the user specifies a daily time (e.g., "every day at 9 AM", "daily at 14:00"), set `schedule_config.daily_time` in HH:MM format (24-hour).
+- Only one of `interval_minutes` or `daily_time` should be set, not both.
+- If no schedule is specified for a recurring task, leave `schedule_config` as null (system will use default behavior).
+
+6) Agent targeting policy
- Trust the specified agent's capabilities; do not over-validate or split into multiple tasks.
"""
@@ -41,6 +47,7 @@
- Default to pass-through: create a single task addressed to the provided `target_agent_name`, or to the best-fit agent identified via `tool_get_enabled_agents` when the target is unspecified (fall back to "ResearchAgent" only if no clear match is found).
- Set `pattern` to `once` unless the user explicitly confirms recurring intent.
+- For recurring tasks, parse schedule information from the query and populate `schedule_config` if time interval or daily time is specified.
- Avoid query optimization and task splitting.
@@ -60,7 +67,11 @@
{
"query": "User's original query, unchanged",
"agent_name": "target_agent_name (or best-fit agent selected via tool_get_enabled_agents when not provided)",
- "pattern": "once" | "recurring"
+ "pattern": "once" | "recurring",
+ "schedule_config": {
+ "interval_minutes": ,
+ "daily_time": ""
+ } (optional, only for recurring tasks with explicit schedule)
}
],
"adequate": true/false,
@@ -171,5 +182,55 @@
}
+
+Input:
+{
+ "target_agent_name": "ResearchAgent",
+ "query": "Check Tesla stock price every hour and alert me if there's significant change"
+}
+
+Output:
+{
+ "tasks": [
+ {
+ "query": "Check Tesla stock price every hour and alert me if there's significant change",
+ "agent_name": "ResearchAgent",
+ "pattern": "recurring",
+ "schedule_config": {
+ "interval_minutes": 60,
+ "daily_time": null
+ }
+ }
+ ],
+ "adequate": true,
+ "reason": "Created recurring task with hourly interval as specified."
+}
+
+
+
+Input:
+{
+ "target_agent_name": "ResearchAgent",
+ "query": "Analyze market trends every day at 9 AM"
+}
+
+Output:
+{
+ "tasks": [
+ {
+ "query": "Analyze market trends every day at 9 AM",
+ "agent_name": "ResearchAgent",
+ "pattern": "recurring",
+ "schedule_config": {
+ "interval_minutes": null,
+ "daily_time": "09:00"
+ }
+ }
+ ],
+ "adequate": true,
+ "reason": "Created recurring task scheduled for 9 AM daily."
+}
+
+
"""
diff --git a/python/valuecell/core/coordinate/temporal.py b/python/valuecell/core/coordinate/temporal.py
new file mode 100644
index 000000000..b80e534a0
--- /dev/null
+++ b/python/valuecell/core/coordinate/temporal.py
@@ -0,0 +1,49 @@
+from datetime import datetime, timedelta
+
+from typing import Optional
+from loguru import logger
+
+
+def calculate_next_execution_delay(schedule_config) -> Optional[float]:
+ """Calculate the delay in seconds until the next scheduled execution.
+
+ Args:
+ schedule_config: ScheduleConfig with interval_minutes or daily_time.
+
+ Returns:
+ Delay in seconds until next execution, or None if no schedule configured.
+ """
+ if not schedule_config:
+ return None
+
+ now = datetime.now()
+
+ # Interval-based scheduling
+ if schedule_config.interval_minutes:
+ return schedule_config.interval_minutes * 60
+
+ # Daily time-based scheduling
+ if schedule_config.daily_time:
+ try:
+ # Parse HH:MM format
+ target_hour, target_minute = map(int, schedule_config.daily_time.split(":"))
+
+ # Create target datetime for today
+ target_time = now.replace(
+ hour=target_hour, minute=target_minute, second=0, microsecond=0
+ )
+
+ # If target time has passed today, schedule for tomorrow
+ if target_time <= now:
+ target_time += timedelta(days=1)
+
+ # Calculate delay in seconds
+ delay = (target_time - now).total_seconds()
+ return delay
+ except (ValueError, AttributeError) as e:
+ logger.error(
+ f"Invalid daily_time format: {schedule_config.daily_time}, error: {e}"
+ )
+ return None
+
+ return None
diff --git a/python/valuecell/core/task/models.py b/python/valuecell/core/task/models.py
index 8e28ca524..c379987ea 100644
--- a/python/valuecell/core/task/models.py
+++ b/python/valuecell/core/task/models.py
@@ -25,6 +25,17 @@ class TaskPattern(str, Enum):
RECURRING = "recurring" # Recurring task
+class ScheduleConfig(BaseModel):
+ """Schedule configuration for recurring tasks"""
+
+ interval_minutes: Optional[int] = Field(
+ None, description="Interval in minutes for recurring execution (e.g., 60 for every hour)"
+ )
+ daily_time: Optional[str] = Field(
+ None, description="Daily execution time in HH:MM format (e.g., '09:00' for 9 AM)"
+ )
+
+
class Task(BaseModel):
"""Task data model"""
@@ -50,6 +61,9 @@ class Task(BaseModel):
pattern: TaskPattern = Field(
default=TaskPattern.ONCE, description="Task execution pattern"
)
+ schedule_config: Optional[ScheduleConfig] = Field(
+ None, description="Schedule configuration for recurring tasks"
+ )
handoff_from_super_agent: bool = Field(
False,
description="Indicates if the task was handed over from a super agent",
From 5c6fb3955e4794878711f029c2eeb9f3ec17c195 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Tue, 21 Oct 2025 18:02:05 +0800
Subject: [PATCH 02/30] feat: implement task execution with optional scheduling
support
---
.../valuecell/core/coordinate/orchestrator.py | 155 ++++++++++++------
1 file changed, 103 insertions(+), 52 deletions(-)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 291ba8d34..7bfc5a035 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -35,6 +35,7 @@
SuperAgentDecision,
SuperAgentOutcome,
)
+from valuecell.core.coordinate.temporal import calculate_next_execution_delay
from valuecell.core.task import Task, TaskManager
from valuecell.core.task.models import TaskPattern
from valuecell.core.types import (
@@ -625,6 +626,72 @@ async def _cleanup_expired_contexts(
# ==================== Plan and Task Execution Methods ====================
+ async def _execute_single_task_run(
+ self, task: Task, metadata: dict
+ ) -> AsyncGenerator[BaseResponse, None]:
+ """Execute a single run of a task (may be called multiple times for scheduled tasks).
+
+ Args:
+ task: The task to execute
+ metadata: Execution metadata
+
+ Yields:
+ BaseResponse objects from task execution
+ """
+ conversation_id = task.conversation_id
+ thread_id = task.thread_id
+ task_id = task.task_id
+
+ # Get agent connection
+ agent_name = task.agent_name
+ client = await self.agent_connections.get_client(agent_name)
+ if not client:
+ raise RuntimeError(f"Could not connect to agent {agent_name}")
+ # agent_card = await self.agent_connections.start_agent(
+ # agent_name,
+ # with_listener=False,
+ # )
+ # streaming = agent_card.supports_streaming
+
+ # Send message to agent
+ remote_response = await client.send_message(
+ task.query,
+ conversation_id=conversation_id,
+ metadata=metadata,
+ )
+
+ # Process streaming responses
+ async for remote_task, event in remote_response:
+ if event is None and remote_task.status.state == TaskState.submitted:
+ task.remote_task_ids.append(remote_task.id)
+ yield self._response_factory.task_started(
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=task_id,
+ )
+ continue
+
+ if isinstance(event, TaskStatusUpdateEvent):
+ result: RouteResult = await handle_status_update(
+ self._response_factory, task, thread_id, event
+ )
+ for r in result.responses:
+ r = self._response_buffer.annotate(r)
+ yield r
+ # Apply side effects
+ for eff in result.side_effects:
+ if eff.kind == SideEffectKind.FAIL_TASK:
+ await self.task_manager.fail_task(task_id, eff.reason or "")
+ if result.done:
+ return
+ continue
+
+ if isinstance(event, TaskArtifactUpdateEvent):
+ logger.info(
+ f"Received unexpected artifact update for task {task_id}: {event}"
+ )
+ continue
+
async def _execute_plan_with_input_support(
self,
plan: ExecutionPlan,
@@ -709,11 +776,14 @@ async def _execute_task_with_input_support(
self, task: Task, thread_id: str, metadata: Optional[dict] = None
) -> AsyncGenerator[BaseResponse, None]:
"""
- Execute a single task with user input interruption support.
+ Execute a single task with user input interruption support and optional scheduling.
+
+ For tasks with schedule_config, this method will repeatedly execute the task
+ according to the configured interval or daily time.
Args:
- task: The task to execute
- query: The query/prompt for the task
+ task: The task to execute (may include schedule_config for recurring execution)
+ thread_id: Thread ID for conversation tracking
metadata: Execution metadata
"""
try:
@@ -722,25 +792,18 @@ async def _execute_task_with_input_support(
conversation_id = task.conversation_id
await self.task_manager.start_task(task_id)
- # Get agent connection
- agent_name = task.agent_name
- agent_card = await self.agent_connections.start_agent(
- agent_name,
- with_listener=False,
- )
- client = await self.agent_connections.get_client(agent_name)
- if not client:
- raise RuntimeError(f"Could not connect to agent {agent_name}")
# Configure A2A metadata
metadata = metadata or {}
if task.pattern != TaskPattern.ONCE:
metadata["notify"] = True
- # Configure Agno metadata, reference: https://docs.agno.com/examples/concepts/agent/other/agent_run_metadata#agent-run-metadata
+ # Configure Agno metadata
+ # reference: https://docs.agno.com/examples/concepts/agent/other/agent_run_metadata#agent-run-metadata
metadata[METADATA] = {}
- # Configure Agno dependencies, reference: https://docs.agno.com/concepts/teams/dependencies#dependencies
+ # Configure Agno dependencies
+ # reference: https://docs.agno.com/concepts/teams/dependencies#dependencies
metadata[DEPENDENCIES] = {
USER_PROFILE: {},
CURRENT_CONTEXT: {},
@@ -748,45 +811,33 @@ async def _execute_task_with_input_support(
TIMEZONE: get_current_timezone(),
}
- # Send message to agent
- remote_response = await client.send_message(
- task.query,
- conversation_id=conversation_id,
- metadata=metadata,
- streaming=agent_card.capabilities.streaming,
- )
-
- # Process streaming responses
- async for remote_task, event in remote_response:
- if event is None and remote_task.status.state == TaskState.submitted:
- task.remote_task_ids.append(remote_task.id)
- yield self._response_factory.task_started(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task_id,
- )
- continue
+ # Execute task with optional scheduling loop
+ while True:
+ # Execute a single run of the task
+ async for response in self._execute_single_task_run(task, metadata):
+ yield response
- if isinstance(event, TaskStatusUpdateEvent):
- result: RouteResult = await handle_status_update(
- self._response_factory, task, thread_id, event
- )
- for r in result.responses:
- r = self._response_buffer.annotate(r)
- yield r
- # Apply side effects
- for eff in result.side_effects:
- if eff.kind == SideEffectKind.FAIL_TASK:
- await self.task_manager.fail_task(task_id, eff.reason or "")
- if result.done:
- return
- continue
-
- if isinstance(event, TaskArtifactUpdateEvent):
- logger.info(
- f"Received unexpected artifact update for task {task_id}: {event}"
- )
- continue
+ # Check if this is a scheduled recurring task
+ if not task.schedule_config:
+ break # One-time task, exit loop
+
+ delay = calculate_next_execution_delay(task.schedule_config)
+ if not delay:
+ break # No valid schedule, exit loop
+
+ # Schedule next execution
+ logger.info(f"Task {task_id} scheduled to run again in {delay} seconds")
+ # TODO: yield scheduled task waiting message
+ # next_run_time = datetime.now() + timedelta(seconds=delay)
+ # yield self._response_factory.message_response_general(
+ # event="scheduled_task_waiting",
+ # conversation_id=conversation_id,
+ # thread_id=thread_id,
+ # task_id=task_id,
+ # content=f"Next execution scheduled at {next_run_time.strftime('%Y-%m-%d %H:%M:%S')}",
+ # )
+ # Wait for the next scheduled execution
+ await asyncio.sleep(delay)
# Complete task successfully
await self.task_manager.complete_task(task_id)
From be340fe8a463d59a46dea96fbb6e6417723514d6 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Tue, 21 Oct 2025 18:02:11 +0800
Subject: [PATCH 03/30] fix: change default streaming parameter to True in
send_message method
---
python/valuecell/core/agent/client.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/python/valuecell/core/agent/client.py b/python/valuecell/core/agent/client.py
index 6bb386c34..4cb8d2e0c 100644
--- a/python/valuecell/core/agent/client.py
+++ b/python/valuecell/core/agent/client.py
@@ -74,7 +74,7 @@ async def send_message(
query: str,
conversation_id: str = None,
metadata: dict = None,
- streaming: bool = False,
+ streaming: bool = True,
) -> AsyncIterator[RemoteAgentResponse]:
"""Send a message to the remote agent and return an async iterator.
From 12793568e1dd014cb6807d8061097661f1c5f4bf Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Tue, 21 Oct 2025 18:21:11 +0800
Subject: [PATCH 04/30] feat: add guidance message support for inadequate
execution plans
---
python/valuecell/core/coordinate/models.py | 7 +++++
.../valuecell/core/coordinate/orchestrator.py | 14 ++++++++++
python/valuecell/core/coordinate/planner.py | 26 +++++++++++-------
.../core/coordinate/planner_prompts.py | 27 ++++++++++++++++---
4 files changed, 61 insertions(+), 13 deletions(-)
diff --git a/python/valuecell/core/coordinate/models.py b/python/valuecell/core/coordinate/models.py
index 6cff4792b..89ed4ac3b 100644
--- a/python/valuecell/core/coordinate/models.py
+++ b/python/valuecell/core/coordinate/models.py
@@ -24,6 +24,9 @@ class ExecutionPlan(BaseModel):
)
tasks: List[Task] = Field(default_factory=list, description="Tasks to execute")
created_at: str = Field(..., description="Plan creation timestamp")
+ guidance_message: Optional[str] = Field(
+ None, description="Guidance message to user when plan is inadequate or requires clarification"
+ )
class _TaskBrief(BaseModel):
@@ -74,3 +77,7 @@ class PlannerResponse(BaseModel):
description="true if information is adequate for task execution, false if more input is needed",
)
reason: str = Field(..., description="Reason for the planning decision")
+ guidance_message: Optional[str] = Field(
+ None,
+ description="User-friendly guidance message when adequate is false or tasks is empty. Should provide clear direction on what is needed."
+ )
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 7bfc5a035..ca1f7e46c 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -713,6 +713,20 @@ async def _execute_plan_with_input_support(
Streaming `BaseResponse` objects produced by each task execution.
"""
+ # Check if plan has guidance message (inadequate plan)
+ if plan.guidance_message:
+ # Send guidance message to user and return
+ response = self._response_factory.message_response_general(
+ event=StreamResponseEvent.MESSAGE_CHUNK,
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=generate_task_id(),
+ content=plan.guidance_message,
+ )
+ await self._persist_from_buffer(response)
+ yield response
+ return
+
for task in plan.tasks:
subagent_conversation_item_id = generate_item_id()
if task.handoff_from_super_agent:
diff --git a/python/valuecell/core/coordinate/planner.py b/python/valuecell/core/coordinate/planner.py
index 246cb454c..0148806af 100644
--- a/python/valuecell/core/coordinate/planner.py
+++ b/python/valuecell/core/coordinate/planner.py
@@ -123,13 +123,14 @@ async def create_plan(
)
# Analyze input and create appropriate tasks
- tasks = await self._analyze_input_and_create_tasks(
+ tasks, guidance_message = await self._analyze_input_and_create_tasks(
user_input,
conversation_id,
user_input_callback,
thread_id,
)
plan.tasks = tasks
+ plan.guidance_message = guidance_message
return plan
@@ -139,7 +140,7 @@ async def _analyze_input_and_create_tasks(
conversation_id: str,
user_input_callback: Callable,
thread_id: str,
- ) -> List[Task]:
+ ) -> tuple[List[Task], Optional[str]]:
"""
Analyze user input and produce a list of `Task` objects.
@@ -154,7 +155,8 @@ async def _analyze_input_and_create_tasks(
user_input_callback: Async callback used for Human-in-the-Loop.
Returns:
- A list of `Task` objects derived from the planner response.
+ A tuple of (list of Task objects, optional guidance message).
+ If plan is inadequate, returns empty list with guidance message.
"""
# Create planning agent with appropriate tools and instructions
agent = Agent(
@@ -177,6 +179,7 @@ async def _analyze_input_and_create_tasks(
add_history_to_context=True,
num_history_runs=3,
read_chat_history=True,
+ session_id=conversation_id,
enable_session_summaries=True,
)
@@ -215,13 +218,16 @@ async def _analyze_input_and_create_tasks(
# Parse planning result and create tasks
plan_raw = run_response.content
logger.info(f"Planner produced plan: {plan_raw}")
+
+ # Check if plan is inadequate or has no tasks
if not plan_raw.adequate or not plan_raw.tasks:
- # If information is still inadequate, return empty task list
- raise ValueError(
- "Planner indicated information is inadequate or produced no tasks."
- f" Reason: {plan_raw.reason}"
- )
- return [
+ # Use guidance_message from planner, or fall back to reason
+ guidance_message = plan_raw.guidance_message or plan_raw.reason
+ logger.info(f"Planner needs user guidance: {guidance_message}")
+ return [], guidance_message # Return empty task list with guidance
+
+ # Create tasks from planner response
+ tasks = [
self._create_task(
user_input.meta.user_id,
task.agent_name,
@@ -234,6 +240,8 @@ async def _analyze_input_and_create_tasks(
)
for task in plan_raw.tasks
]
+
+ return tasks, None # Return tasks with no guidance message
def _create_task(
self,
diff --git a/python/valuecell/core/coordinate/planner_prompts.py b/python/valuecell/core/coordinate/planner_prompts.py
index 09e40b8a5..4cd223330 100644
--- a/python/valuecell/core/coordinate/planner_prompts.py
+++ b/python/valuecell/core/coordinate/planner_prompts.py
@@ -52,8 +52,9 @@
-- If the request is clearly unusable (illegal content or impossible instruction), return `adequate: false` with a short reason and no tasks.
-- If the request suggests recurring monitoring, return `adequate: false` with a confirmation question; after explicit confirmation, create a single `recurring` task with the original query unchanged.
+- If the request is clearly unusable (illegal content or impossible instruction), return `adequate: false` with a short reason and no tasks. Provide a `guidance_message` explaining why the request cannot be processed.
+- If the request suggests recurring monitoring, return `adequate: false` with a confirmation question in `guidance_message`; after explicit confirmation, create a single `recurring` task with the original query unchanged.
+- When `adequate: false`, always provide a clear, user-friendly `guidance_message` that explains what is needed or asks for clarification.
@@ -75,7 +76,8 @@
}
],
"adequate": true/false,
- "reason": "Brief explanation of planning decision"
+ "reason": "Brief explanation of planning decision",
+ "guidance_message": "User-friendly message when adequate is false (optional, required when adequate is false)"
}
@@ -158,7 +160,8 @@
{
"tasks": [],
"adequate": false,
- "reason": "This suggests recurring monitoring. Do you want regular updates on this, or a one-time analysis?"
+ "reason": "This suggests recurring monitoring. Need user confirmation.",
+ "guidance_message": "I understand you want to monitor Apple's quarterly earnings. Do you want me to set up a recurring task that checks for updates regularly, or would you prefer a one-time analysis of their latest earnings?"
}
// Step 2: user confirms
@@ -232,5 +235,21 @@
}
+
+Input:
+{
+ "target_agent_name": null,
+ "query": "Help me hack into someone's account"
+}
+
+Output:
+{
+ "tasks": [],
+ "adequate": false,
+ "reason": "Request involves illegal activity.",
+ "guidance_message": "I cannot assist with requests that involve illegal activities such as unauthorized access to accounts. If you have a legitimate security concern, please consider contacting the appropriate authorities or the account owner directly."
+}
+
+
"""
From 71f7f842ea6856a89d76d4b5ece3284a3d38ef77 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Tue, 21 Oct 2025 19:06:09 +0800
Subject: [PATCH 05/30] refactor: comment out notify metadata configuration for
non-ONE task patterns
---
python/valuecell/core/coordinate/orchestrator.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index ca1f7e46c..aa05e7fca 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -809,8 +809,8 @@ async def _execute_task_with_input_support(
# Configure A2A metadata
metadata = metadata or {}
- if task.pattern != TaskPattern.ONCE:
- metadata["notify"] = True
+ # if task.pattern != TaskPattern.ONCE:
+ # metadata["notify"] = True
# Configure Agno metadata
# reference: https://docs.agno.com/examples/concepts/agent/other/agent_run_metadata#agent-run-metadata
From 7cdae478778f83ceb009eb590e45f1b6c79e59b4 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Tue, 21 Oct 2025 19:06:17 +0800
Subject: [PATCH 06/30] feat: enhance ExecutionPlanner with persistent agent
configuration and improved guidance for recurring tasks
---
python/valuecell/core/coordinate/planner.py | 58 +++++-----
.../core/coordinate/planner_prompts.py | 100 +++++++++++++++---
2 files changed, 113 insertions(+), 45 deletions(-)
diff --git a/python/valuecell/core/coordinate/planner.py b/python/valuecell/core/coordinate/planner.py
index 0148806af..33e1d7eb0 100644
--- a/python/valuecell/core/coordinate/planner.py
+++ b/python/valuecell/core/coordinate/planner.py
@@ -89,6 +89,28 @@ def __init__(
agent_connections: RemoteConnections,
):
self.agent_connections = agent_connections
+ self.planner_agent = Agent(
+ model=get_model("PLANNER_MODEL_ID"),
+ tools=[
+ # TODO: enable UserControlFlowTools when stable
+ # UserControlFlowTools(),
+ self.tool_get_enabled_agents,
+ ],
+ debug_mode=agent_debug_mode_enabled(),
+ instructions=[PLANNER_INSTRUCTION],
+ # output format
+ markdown=False,
+ use_json_mode=True,
+ output_schema=PlannerResponse,
+ expected_output=PLANNER_EXPECTED_OUTPUT,
+ # context
+ db=InMemoryDb(),
+ add_datetime_to_context=True,
+ add_history_to_context=True,
+ num_history_runs=5,
+ read_chat_history=True,
+ enable_session_summaries=True,
+ )
async def create_plan(
self,
@@ -158,38 +180,14 @@ async def _analyze_input_and_create_tasks(
A tuple of (list of Task objects, optional guidance message).
If plan is inadequate, returns empty list with guidance message.
"""
- # Create planning agent with appropriate tools and instructions
- agent = Agent(
- model=get_model("PLANNER_MODEL_ID"),
- tools=[
- # TODO: enable UserControlFlowTools when stable
- # UserControlFlowTools(),
- self.tool_get_enabled_agents,
- ],
- debug_mode=agent_debug_mode_enabled(),
- instructions=[PLANNER_INSTRUCTION],
- # output format
- markdown=False,
- use_json_mode=True,
- output_schema=PlannerResponse,
- expected_output=PLANNER_EXPECTED_OUTPUT,
- # context
- db=InMemoryDb(),
- add_datetime_to_context=True,
- add_history_to_context=True,
- num_history_runs=3,
- read_chat_history=True,
- session_id=conversation_id,
- enable_session_summaries=True,
- )
-
# Execute planning with the agent
- run_response = agent.run(
+ run_response = self.planner_agent.run(
PlannerInput(
target_agent_name=user_input.target_agent_name,
query=user_input.query,
),
session_id=conversation_id,
+ user_id=user_input.meta.user_id,
)
# Handle user input requests through Human-in-the-Loop workflow
@@ -206,7 +204,7 @@ async def _analyze_input_and_create_tasks(
field.value = user_value
# Continue agent execution with updated inputs
- run_response = agent.continue_run(
+ run_response = self.planner_agent.continue_run(
# TODO: rollback to `run_id=run_response.run_id` when bug fixed by Agno
run_response=run_response,
updated_tools=run_response.tools,
@@ -218,14 +216,14 @@ async def _analyze_input_and_create_tasks(
# Parse planning result and create tasks
plan_raw = run_response.content
logger.info(f"Planner produced plan: {plan_raw}")
-
+
# Check if plan is inadequate or has no tasks
if not plan_raw.adequate or not plan_raw.tasks:
# Use guidance_message from planner, or fall back to reason
guidance_message = plan_raw.guidance_message or plan_raw.reason
logger.info(f"Planner needs user guidance: {guidance_message}")
return [], guidance_message # Return empty task list with guidance
-
+
# Create tasks from planner response
tasks = [
self._create_task(
@@ -240,7 +238,7 @@ async def _analyze_input_and_create_tasks(
)
for task in plan_raw.tasks
]
-
+
return tasks, None # Return tasks with no guidance message
def _create_task(
diff --git a/python/valuecell/core/coordinate/planner_prompts.py b/python/valuecell/core/coordinate/planner_prompts.py
index 4cd223330..8d48f5181 100644
--- a/python/valuecell/core/coordinate/planner_prompts.py
+++ b/python/valuecell/core/coordinate/planner_prompts.py
@@ -24,11 +24,24 @@
- Only block when the request is clearly unusable (e.g., illegal content or impossible instruction). In that case, return `adequate: false` with a short reason and no tasks.
3) Contextual and preference statements
-- Treat short/contextual replies (e.g., "Go on", "yes", "tell me more") and user preferences/rules (e.g., "do not provide investment advice") as valid inputs; forward them unchanged as a single task.
-
-4) Recurring intent confirmation
-- If the query suggests recurring monitoring or periodic updates, DO NOT create tasks yet. Return `adequate: false` and ask for confirmation in `reason` (e.g., "Do you want regular updates on this, or a one-time analysis?").
-- After explicit confirmation, create a single task with `pattern: recurring` and keep the original query unchanged.
+- Treat short/contextual replies (e.g., "Go on", "tell me more") and user preferences/rules (e.g., "do not provide investment advice") as valid inputs; forward them unchanged as a single task.
+- IMPORTANT: If the previous interaction was waiting for user confirmation (adequate: false with guidance_message asking for confirmation), then treat confirmation responses (e.g., "yes", "confirm", "ok", "proceed") as confirmations, NOT as contextual statements to be forwarded.
+
+4) Recurring intent and schedule confirmation
+- If the query suggests recurring monitoring WITHOUT a specific schedule, return `adequate: false` with a confirmation question in `guidance_message`.
+- If the query explicitly specifies a schedule (e.g., "every hour", "daily at 9 AM"), you MUST confirm with the user first:
+ * Return `adequate: false` with a clear confirmation request in `guidance_message`
+ * The message should describe the task and the exact schedule being set up
+ * Store the original query in session history for reference
+ * After user confirms (e.g., "yes", "confirm", "ok", "proceed"), extract the CORE task requirement from the original query, removing time-related phrases
+ * IMPORTANT: The task `query` field should contain ONLY the core task description WITHOUT time/schedule information
+ * CRITICAL: Convert the query into a SINGLE-EXECUTION form that the remote agent can complete independently:
+ - Remove words suggesting continuous monitoring or notification: "alert", "notify", "remind", "inform", "send notification", "let me know", "tell me when"
+ - Transform into a direct query or analysis request: "Check X and report significant changes" → "Check X for significant changes"
+ - The query should be actionable in one execution cycle without requiring the agent to establish ongoing monitoring
+ * Schedule information should be stored in `schedule_config` separately, NOT in the query text
+ * The confirmation response itself should NOT be used as the task query
+ * If user declines or provides corrections, adjust the plan accordingly
5) Schedule configuration for recurring tasks
- If the user specifies a time interval (e.g., "every hour", "every 30 minutes"), set `schedule_config.interval_minutes` accordingly.
@@ -47,13 +60,18 @@
- Default to pass-through: create a single task addressed to the provided `target_agent_name`, or to the best-fit agent identified via `tool_get_enabled_agents` when the target is unspecified (fall back to "ResearchAgent" only if no clear match is found).
- Set `pattern` to `once` unless the user explicitly confirms recurring intent.
-- For recurring tasks, parse schedule information from the query and populate `schedule_config` if time interval or daily time is specified.
-- Avoid query optimization and task splitting.
+- For recurring tasks with schedules: extract the core task requirement and transform it into a single-execution form:
+ * Remove time-related phrases (these go into `schedule_config`)
+ * Remove notification/monitoring verbs: "alert", "notify", "remind", "inform", "send notification", "let me know", "tell me when"
+ * Convert to direct action: "Monitor X and notify if Y" → "Check X for Y"
+ * The query should be executable once without implying ongoing monitoring
+- Avoid query optimization and task splitting, but DO transform queries for scheduled tasks into single-execution form.
- If the request is clearly unusable (illegal content or impossible instruction), return `adequate: false` with a short reason and no tasks. Provide a `guidance_message` explaining why the request cannot be processed.
-- If the request suggests recurring monitoring, return `adequate: false` with a confirmation question in `guidance_message`; after explicit confirmation, create a single `recurring` task with the original query unchanged.
+- If the request suggests recurring monitoring or scheduled tasks, return `adequate: false` with a confirmation question in `guidance_message`.
+- When waiting for confirmation: check conversation history to detect if the previous response was a confirmation request. If yes, and user responds with confirmation words (yes/ok/confirm/proceed), use the ORIGINAL query from history to create the task, NOT the confirmation response itself.
- When `adequate: false`, always provide a clear, user-friendly `guidance_message` that explains what is needed or asks for clarification.
@@ -128,6 +146,7 @@
+// Normal contextual continuation (NOT a confirmation scenario)
Input:
{
"target_agent_name": "ResearchAgent",
@@ -164,7 +183,8 @@
"guidance_message": "I understand you want to monitor Apple's quarterly earnings. Do you want me to set up a recurring task that checks for updates regularly, or would you prefer a one-time analysis of their latest earnings?"
}
-// Step 2: user confirms
+// Step 2: user confirms with simple "yes"
+// IMPORTANT: Use conversation history to retrieve the ORIGINAL query, not "Yes, set up regular updates"
Input:
{
"target_agent_name": "ResearchAgent",
@@ -175,28 +195,46 @@
{
"tasks": [
{
- "query": "Yes, set up regular updates",
+ "query": "Monitor Apple's quarterly earnings and notify me each time they release results",
"agent_name": "ResearchAgent",
"pattern": "recurring"
}
],
"adequate": true,
- "reason": "User confirmed recurring intent; created a single recurring task with the original query."
+ "reason": "User confirmed recurring intent; created recurring task with the ORIGINAL query from history."
}
+// Step 1: Detect schedule and request confirmation
Input:
{
"target_agent_name": "ResearchAgent",
"query": "Check Tesla stock price every hour and alert me if there's significant change"
}
+Output:
+{
+ "tasks": [],
+ "adequate": false,
+ "reason": "Scheduled task requires user confirmation.",
+ "guidance_message": "I understand you want to check Tesla's stock price every hour and get alerts on significant changes. This will set up a recurring task that runs automatically every 60 minutes. Should I proceed with this scheduled task?"
+}
+
+// Step 2: User confirms
+// IMPORTANT: Extract core task WITHOUT time phrases AND convert to single-execution form.
+// Remove "alert me" (notification intent) - agent should just check and report findings.
+Input:
+{
+ "target_agent_name": "ResearchAgent",
+ "query": "Yes, please proceed"
+}
+
Output:
{
"tasks": [
{
- "query": "Check Tesla stock price every hour and alert me if there's significant change",
+ "query": "Check Tesla stock price for significant changes",
"agent_name": "ResearchAgent",
"pattern": "recurring",
"schedule_config": {
@@ -206,22 +244,39 @@
}
],
"adequate": true,
- "reason": "Created recurring task with hourly interval as specified."
+ "reason": "User confirmed scheduled task. Created recurring task with single-execution query (removed 'every hour' and 'alert me')."
}
+// Step 1: Detect daily schedule and request confirmation
Input:
{
"target_agent_name": "ResearchAgent",
"query": "Analyze market trends every day at 9 AM"
}
+Output:
+{
+ "tasks": [],
+ "adequate": false,
+ "reason": "Scheduled task requires user confirmation.",
+ "guidance_message": "I understand you want to analyze market trends every day at 9:00 AM. This will set up a recurring task that runs automatically at the same time each day. Should I proceed with this scheduled task?"
+}
+
+// Step 2: User confirms
+// IMPORTANT: Extract core task WITHOUT time phrases. "every day at 9 AM" goes to schedule_config, not query.
+Input:
+{
+ "target_agent_name": "ResearchAgent",
+ "query": "Yes, set it up"
+}
+
Output:
{
"tasks": [
{
- "query": "Analyze market trends every day at 9 AM",
+ "query": "Analyze market trends",
"agent_name": "ResearchAgent",
"pattern": "recurring",
"schedule_config": {
@@ -231,10 +286,25 @@
}
],
"adequate": true,
- "reason": "Created recurring task scheduled for 9 AM daily."
+ "reason": "User confirmed scheduled task. Created recurring task with core requirement only (removed 'every day at 9 AM' from query)."
}
+
+// Examples of transforming queries into single-execution form for scheduled tasks:
+// Original: "Monitor AAPL stock and notify me if it drops below $150"
+// Transformed: "Check AAPL stock price relative to $150 threshold"
+//
+// Original: "Keep track of Bitcoin price and let me know when it reaches $50k"
+// Transformed: "Check Bitcoin price relative to $50k target"
+//
+// Original: "Watch for new AI research papers and alert me about important ones"
+// Transformed: "Find and evaluate new AI research papers for importance"
+//
+// Original: "Send me a reminder to review my portfolio"
+// Transformed: "Review portfolio and provide analysis"
+
+
Input:
{
From fb1c806718203dec2e3c1f709c277548ac7ab1bb Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Wed, 22 Oct 2025 10:06:08 +0800
Subject: [PATCH 07/30] feat: implement task cancellation support in
orchestrator and planner, enhance task management capabilities
---
.../valuecell/core/coordinate/orchestrator.py | 23 ++++--
python/valuecell/core/coordinate/planner.py | 75 +++++++++++++++++++
.../core/coordinate/planner_prompts.py | 28 +++++++
python/valuecell/core/task/manager.py | 8 ++
4 files changed, 129 insertions(+), 5 deletions(-)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index a51aab770..b2ecbf8ea 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -160,8 +160,8 @@ def __init__(self):
# Initialize execution context management
self._execution_contexts: Dict[str, ExecutionContext] = {}
- # Initialize planner
- self.planner = ExecutionPlanner(self.agent_connections)
+ # Initialize planner with task_manager for task cancellation support
+ self.planner = ExecutionPlanner(self.agent_connections, self.task_manager)
# Initialize Super Agent (triage/frontline agent)
self.super_agent = SuperAgent()
@@ -670,7 +670,7 @@ async def _execute_single_task_run(
conversation_id=conversation_id,
thread_id=thread_id,
task_id=task_id,
- agent_name=agent_name
+ agent_name=agent_name,
)
continue
@@ -837,6 +837,11 @@ async def _execute_task_with_input_support(
# Execute task with optional scheduling loop
while True:
+ # Check if task was cancelled
+ if task.is_finished():
+ logger.info(f"Task {task_id} was cancelled, stopping execution")
+ break
+
# Execute a single run of the task
async for response in self._execute_single_task_run(task, metadata):
yield response
@@ -860,8 +865,16 @@ async def _execute_task_with_input_support(
# task_id=task_id,
# content=f"Next execution scheduled at {next_run_time.strftime('%Y-%m-%d %H:%M:%S')}",
# )
- # Wait for the next scheduled execution
- await asyncio.sleep(delay)
+ # Wait for the next scheduled execution (check cancellation periodically)
+ for _ in range(int(delay / ASYNC_SLEEP_INTERVAL)):
+ if task.is_finished():
+ logger.info(f"Task {task_id} was cancelled during sleep")
+ break
+ await asyncio.sleep(ASYNC_SLEEP_INTERVAL)
+
+ # Final check after sleep
+ if task.is_finished():
+ break
# Complete task successfully
await self.task_manager.complete_task(task_id)
diff --git a/python/valuecell/core/coordinate/planner.py b/python/valuecell/core/coordinate/planner.py
index 33e1d7eb0..65becb1de 100644
--- a/python/valuecell/core/coordinate/planner.py
+++ b/python/valuecell/core/coordinate/planner.py
@@ -25,6 +25,7 @@
PLANNER_INSTRUCTION,
)
from valuecell.core.task import Task, TaskPattern, TaskStatus
+from valuecell.core.task.manager import TaskManager
from valuecell.core.task.models import ScheduleConfig
from valuecell.core.types import UserInput
from valuecell.utils import generate_uuid
@@ -87,14 +88,18 @@ class ExecutionPlanner:
def __init__(
self,
agent_connections: RemoteConnections,
+ task_manager: TaskManager,
):
self.agent_connections = agent_connections
+ self.task_manager = task_manager
self.planner_agent = Agent(
model=get_model("PLANNER_MODEL_ID"),
tools=[
# TODO: enable UserControlFlowTools when stable
# UserControlFlowTools(),
self.tool_get_enabled_agents,
+ self.tool_get_active_tasks,
+ self.tool_cancel_task,
],
debug_mode=agent_debug_mode_enabled(),
instructions=[PLANNER_INSTRUCTION],
@@ -313,6 +318,76 @@ def tool_get_enabled_agents(self) -> str:
parts.append((f"{agent_name}>\n"))
return "\n".join(parts)
+ def tool_get_active_tasks(self, conversation_id: str) -> str:
+ """
+ Get all active (non-finished) tasks for a conversation.
+
+ This function returns a list of active tasks that can be cancelled by the user.
+ Use this when the user wants to view or cancel existing tasks.
+
+ Args:
+ conversation_id: The conversation ID to get tasks for
+
+ Returns:
+ str: A formatted list of active tasks with their details
+ """
+ if not self.task_manager:
+ return "Task manager not available."
+
+ tasks = self.task_manager.get_active_tasks(conversation_id)
+ if not tasks:
+ return "No active tasks found for this conversation."
+
+ parts = ["Active tasks:"]
+ for i, task in enumerate(tasks, 1):
+ schedule_info = ""
+ if task.schedule_config:
+ if task.schedule_config.interval_minutes:
+ schedule_info = (
+ f" (every {task.schedule_config.interval_minutes} min)"
+ )
+ elif task.schedule_config.daily_time:
+ schedule_info = f" (daily at {task.schedule_config.daily_time})"
+
+ parts.append(
+ f"{i}. Task ID: {task.task_id}\n"
+ f" Agent: {task.agent_name}\n"
+ f" Query: {task.query}\n"
+ f" Pattern: {task.pattern.value}{schedule_info}\n"
+ f" Status: {task.status.value}"
+ )
+ return "\n".join(parts)
+
+ async def tool_cancel_task(self, task_id: str) -> str:
+ """
+ Cancel a specific task by its task ID.
+
+ Use this when the user explicitly requests to cancel or stop a task.
+
+ Args:
+ task_id: The ID of the task to cancel
+
+ Returns:
+ str: Confirmation message indicating success or failure
+ """
+ if not self.task_manager:
+ return "Task manager not available."
+
+ task = self.task_manager._get_task(task_id)
+ if not task:
+ return f"Task {task_id} not found."
+
+ if task.is_finished():
+ return f"Task {task_id} is already finished and cannot be cancelled."
+
+ # Mark task for cancellation (the actual cancellation will be handled by orchestrator)
+ # We just mark it here and return success
+ task.cancel_task()
+ task.updated_at = datetime.now()
+ return (
+ f"Successfully cancelled task {task_id} ({task.agent_name}: {task.query})."
+ )
+
def agentcard_to_prompt(card: AgentCard):
"""Convert an AgentCard to an LLM-friendly prompt string.
diff --git a/python/valuecell/core/coordinate/planner_prompts.py b/python/valuecell/core/coordinate/planner_prompts.py
index 8d48f5181..81f6a4a41 100644
--- a/python/valuecell/core/coordinate/planner_prompts.py
+++ b/python/valuecell/core/coordinate/planner_prompts.py
@@ -51,6 +51,13 @@
6) Agent targeting policy
- Trust the specified agent's capabilities; do not over-validate or split into multiple tasks.
+
+7) Task cancellation
+- If the user requests to cancel, stop, or terminate a task, use `tool_get_active_tasks` to retrieve active tasks for the current conversation.
+- If the user specifies which task to cancel (by description or context), identify the matching task and use `tool_cancel_task` with the task_id.
+- If multiple active tasks exist and the user doesn't specify which one, list the active tasks in `guidance_message` and ask for clarification.
+- After successfully cancelling a task, return `adequate: false` with a confirmation message in `guidance_message` (no new tasks needed).
+- Cancellation keywords: "cancel", "stop", "terminate", "kill", "abort", "remove" followed by task/job context.
"""
@@ -305,6 +312,27 @@
// Transformed: "Review portfolio and provide analysis"
+
+// User wants to cancel a scheduled task
+Input:
+{
+ "target_agent_name": null,
+ "query": "Cancel the Tesla stock monitoring task"
+}
+
+// Step 1: Get active tasks and cancel the matching one
+// tool_get_active_tasks returns: Task ID abc123, Agent: ResearchAgent, Query: "Check Tesla stock price for significant changes", Pattern: recurring (every 60 min)
+// tool_cancel_task(abc123) returns: "Successfully cancelled task abc123."
+
+Output:
+{
+ "tasks": [],
+ "adequate": false,
+ "reason": "Task cancellation completed.",
+ "guidance_message": "I've cancelled the Tesla stock monitoring task (Task ID: abc123). The task will no longer run."
+}
+
+
Input:
{
diff --git a/python/valuecell/core/task/manager.py b/python/valuecell/core/task/manager.py
index 0b48426b1..8625d2c88 100644
--- a/python/valuecell/core/task/manager.py
+++ b/python/valuecell/core/task/manager.py
@@ -26,6 +26,14 @@ async def update_task(self, task: Task) -> None:
def _get_task(self, task_id: str) -> Task | None:
return self._tasks.get(task_id)
+ def get_active_tasks(self, conversation_id: str) -> list[Task]:
+ """Get all active (non-finished) tasks for a conversation"""
+ return [
+ task
+ for task in self._tasks.values()
+ if task.conversation_id == conversation_id and not task.is_finished()
+ ]
+
# Task status management
async def start_task(self, task_id: str) -> bool:
"""Start task execution"""
From 9cc7d6269de26ef7894673aaf9f59a989a3082b8 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Wed, 22 Oct 2025 13:33:22 +0800
Subject: [PATCH 08/30] Revert "feat: implement task cancellation support in
orchestrator and planner, enhance task management capabilities"
This reverts commit fb1c806718203dec2e3c1f709c277548ac7ab1bb.
---
.../valuecell/core/coordinate/orchestrator.py | 23 ++----
python/valuecell/core/coordinate/planner.py | 75 -------------------
.../core/coordinate/planner_prompts.py | 28 -------
python/valuecell/core/task/manager.py | 8 --
4 files changed, 5 insertions(+), 129 deletions(-)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index b2ecbf8ea..a51aab770 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -160,8 +160,8 @@ def __init__(self):
# Initialize execution context management
self._execution_contexts: Dict[str, ExecutionContext] = {}
- # Initialize planner with task_manager for task cancellation support
- self.planner = ExecutionPlanner(self.agent_connections, self.task_manager)
+ # Initialize planner
+ self.planner = ExecutionPlanner(self.agent_connections)
# Initialize Super Agent (triage/frontline agent)
self.super_agent = SuperAgent()
@@ -670,7 +670,7 @@ async def _execute_single_task_run(
conversation_id=conversation_id,
thread_id=thread_id,
task_id=task_id,
- agent_name=agent_name,
+ agent_name=agent_name
)
continue
@@ -837,11 +837,6 @@ async def _execute_task_with_input_support(
# Execute task with optional scheduling loop
while True:
- # Check if task was cancelled
- if task.is_finished():
- logger.info(f"Task {task_id} was cancelled, stopping execution")
- break
-
# Execute a single run of the task
async for response in self._execute_single_task_run(task, metadata):
yield response
@@ -865,16 +860,8 @@ async def _execute_task_with_input_support(
# task_id=task_id,
# content=f"Next execution scheduled at {next_run_time.strftime('%Y-%m-%d %H:%M:%S')}",
# )
- # Wait for the next scheduled execution (check cancellation periodically)
- for _ in range(int(delay / ASYNC_SLEEP_INTERVAL)):
- if task.is_finished():
- logger.info(f"Task {task_id} was cancelled during sleep")
- break
- await asyncio.sleep(ASYNC_SLEEP_INTERVAL)
-
- # Final check after sleep
- if task.is_finished():
- break
+ # Wait for the next scheduled execution
+ await asyncio.sleep(delay)
# Complete task successfully
await self.task_manager.complete_task(task_id)
diff --git a/python/valuecell/core/coordinate/planner.py b/python/valuecell/core/coordinate/planner.py
index 65becb1de..33e1d7eb0 100644
--- a/python/valuecell/core/coordinate/planner.py
+++ b/python/valuecell/core/coordinate/planner.py
@@ -25,7 +25,6 @@
PLANNER_INSTRUCTION,
)
from valuecell.core.task import Task, TaskPattern, TaskStatus
-from valuecell.core.task.manager import TaskManager
from valuecell.core.task.models import ScheduleConfig
from valuecell.core.types import UserInput
from valuecell.utils import generate_uuid
@@ -88,18 +87,14 @@ class ExecutionPlanner:
def __init__(
self,
agent_connections: RemoteConnections,
- task_manager: TaskManager,
):
self.agent_connections = agent_connections
- self.task_manager = task_manager
self.planner_agent = Agent(
model=get_model("PLANNER_MODEL_ID"),
tools=[
# TODO: enable UserControlFlowTools when stable
# UserControlFlowTools(),
self.tool_get_enabled_agents,
- self.tool_get_active_tasks,
- self.tool_cancel_task,
],
debug_mode=agent_debug_mode_enabled(),
instructions=[PLANNER_INSTRUCTION],
@@ -318,76 +313,6 @@ def tool_get_enabled_agents(self) -> str:
parts.append((f"{agent_name}>\n"))
return "\n".join(parts)
- def tool_get_active_tasks(self, conversation_id: str) -> str:
- """
- Get all active (non-finished) tasks for a conversation.
-
- This function returns a list of active tasks that can be cancelled by the user.
- Use this when the user wants to view or cancel existing tasks.
-
- Args:
- conversation_id: The conversation ID to get tasks for
-
- Returns:
- str: A formatted list of active tasks with their details
- """
- if not self.task_manager:
- return "Task manager not available."
-
- tasks = self.task_manager.get_active_tasks(conversation_id)
- if not tasks:
- return "No active tasks found for this conversation."
-
- parts = ["Active tasks:"]
- for i, task in enumerate(tasks, 1):
- schedule_info = ""
- if task.schedule_config:
- if task.schedule_config.interval_minutes:
- schedule_info = (
- f" (every {task.schedule_config.interval_minutes} min)"
- )
- elif task.schedule_config.daily_time:
- schedule_info = f" (daily at {task.schedule_config.daily_time})"
-
- parts.append(
- f"{i}. Task ID: {task.task_id}\n"
- f" Agent: {task.agent_name}\n"
- f" Query: {task.query}\n"
- f" Pattern: {task.pattern.value}{schedule_info}\n"
- f" Status: {task.status.value}"
- )
- return "\n".join(parts)
-
- async def tool_cancel_task(self, task_id: str) -> str:
- """
- Cancel a specific task by its task ID.
-
- Use this when the user explicitly requests to cancel or stop a task.
-
- Args:
- task_id: The ID of the task to cancel
-
- Returns:
- str: Confirmation message indicating success or failure
- """
- if not self.task_manager:
- return "Task manager not available."
-
- task = self.task_manager._get_task(task_id)
- if not task:
- return f"Task {task_id} not found."
-
- if task.is_finished():
- return f"Task {task_id} is already finished and cannot be cancelled."
-
- # Mark task for cancellation (the actual cancellation will be handled by orchestrator)
- # We just mark it here and return success
- task.cancel_task()
- task.updated_at = datetime.now()
- return (
- f"Successfully cancelled task {task_id} ({task.agent_name}: {task.query})."
- )
-
def agentcard_to_prompt(card: AgentCard):
"""Convert an AgentCard to an LLM-friendly prompt string.
diff --git a/python/valuecell/core/coordinate/planner_prompts.py b/python/valuecell/core/coordinate/planner_prompts.py
index 81f6a4a41..8d48f5181 100644
--- a/python/valuecell/core/coordinate/planner_prompts.py
+++ b/python/valuecell/core/coordinate/planner_prompts.py
@@ -51,13 +51,6 @@
6) Agent targeting policy
- Trust the specified agent's capabilities; do not over-validate or split into multiple tasks.
-
-7) Task cancellation
-- If the user requests to cancel, stop, or terminate a task, use `tool_get_active_tasks` to retrieve active tasks for the current conversation.
-- If the user specifies which task to cancel (by description or context), identify the matching task and use `tool_cancel_task` with the task_id.
-- If multiple active tasks exist and the user doesn't specify which one, list the active tasks in `guidance_message` and ask for clarification.
-- After successfully cancelling a task, return `adequate: false` with a confirmation message in `guidance_message` (no new tasks needed).
-- Cancellation keywords: "cancel", "stop", "terminate", "kill", "abort", "remove" followed by task/job context.
"""
@@ -312,27 +305,6 @@
// Transformed: "Review portfolio and provide analysis"
-
-// User wants to cancel a scheduled task
-Input:
-{
- "target_agent_name": null,
- "query": "Cancel the Tesla stock monitoring task"
-}
-
-// Step 1: Get active tasks and cancel the matching one
-// tool_get_active_tasks returns: Task ID abc123, Agent: ResearchAgent, Query: "Check Tesla stock price for significant changes", Pattern: recurring (every 60 min)
-// tool_cancel_task(abc123) returns: "Successfully cancelled task abc123."
-
-Output:
-{
- "tasks": [],
- "adequate": false,
- "reason": "Task cancellation completed.",
- "guidance_message": "I've cancelled the Tesla stock monitoring task (Task ID: abc123). The task will no longer run."
-}
-
-
Input:
{
diff --git a/python/valuecell/core/task/manager.py b/python/valuecell/core/task/manager.py
index 8625d2c88..0b48426b1 100644
--- a/python/valuecell/core/task/manager.py
+++ b/python/valuecell/core/task/manager.py
@@ -26,14 +26,6 @@ async def update_task(self, task: Task) -> None:
def _get_task(self, task_id: str) -> Task | None:
return self._tasks.get(task_id)
- def get_active_tasks(self, conversation_id: str) -> list[Task]:
- """Get all active (non-finished) tasks for a conversation"""
- return [
- task
- for task in self._tasks.values()
- if task.conversation_id == conversation_id and not task.is_finished()
- ]
-
# Task status management
async def start_task(self, task_id: str) -> bool:
"""Start task execution"""
From 4a4cb25b763330b9633db42a6482b287e6fdd771 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Wed, 22 Oct 2025 13:34:09 +0800
Subject: [PATCH 09/30] feat: add task cancellation checks during execution and
sleep intervals
---
.../valuecell/core/coordinate/orchestrator.py | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index a51aab770..16911d95c 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -837,6 +837,11 @@ async def _execute_task_with_input_support(
# Execute task with optional scheduling loop
while True:
+ # Check if task was cancelled
+ if task.is_finished():
+ logger.info(f"Task {task_id} was cancelled, stopping execution")
+ break
+
# Execute a single run of the task
async for response in self._execute_single_task_run(task, metadata):
yield response
@@ -860,8 +865,16 @@ async def _execute_task_with_input_support(
# task_id=task_id,
# content=f"Next execution scheduled at {next_run_time.strftime('%Y-%m-%d %H:%M:%S')}",
# )
- # Wait for the next scheduled execution
- await asyncio.sleep(delay)
+ # Wait for the next scheduled execution (check cancellation periodically)
+ for _ in range(int(delay / ASYNC_SLEEP_INTERVAL)):
+ if task.is_finished():
+ logger.info(f"Task {task_id} was cancelled during sleep")
+ break
+ await asyncio.sleep(ASYNC_SLEEP_INTERVAL)
+
+ # Final check after sleep
+ if task.is_finished():
+ break
# Complete task successfully
await self.task_manager.complete_task(task_id)
From 7bc644d8d38c41b4d1646fd6ce4d056141bbc03a Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Wed, 22 Oct 2025 15:03:33 +0800
Subject: [PATCH 10/30] feat: add title field to tasks and enhance scheduled
task handling in orchestrator and planner
---
python/valuecell/core/coordinate/models.py | 3 ++
.../valuecell/core/coordinate/orchestrator.py | 19 ++++++++-
python/valuecell/core/coordinate/planner.py | 41 +++++++++----------
.../core/coordinate/planner_prompts.py | 8 ++++
python/valuecell/core/task/models.py | 10 ++++-
python/valuecell/core/types.py | 11 +++++
6 files changed, 67 insertions(+), 25 deletions(-)
diff --git a/python/valuecell/core/coordinate/models.py b/python/valuecell/core/coordinate/models.py
index 89ed4ac3b..f740a01e6 100644
--- a/python/valuecell/core/coordinate/models.py
+++ b/python/valuecell/core/coordinate/models.py
@@ -37,6 +37,9 @@ class _TaskBrief(BaseModel):
before being converted to a full Task object.
"""
+ title: str = Field(
+ ..., description="A concise task title or summary (<=10 words or characters)"
+ )
query: str = Field(..., description="The task to be performed")
agent_name: str = Field(..., description="Name of the agent executing this task")
pattern: TaskPattern = Field(
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 16911d95c..13e5ccd84 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -41,6 +41,7 @@
BaseResponse,
ComponentType,
ConversationItemEvent,
+ ScheduledTaskComponentContent,
StreamResponseEvent,
SubagentConversationPhase,
UserInput,
@@ -670,7 +671,7 @@ async def _execute_single_task_run(
conversation_id=conversation_id,
thread_id=thread_id,
task_id=task_id,
- agent_name=agent_name
+ agent_name=agent_name,
)
continue
@@ -835,6 +836,22 @@ async def _execute_task_with_input_support(
TIMEZONE: get_current_timezone(),
}
+ if task.schedule_config:
+ yield self._response_factory.component_generator(
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=task_id,
+ content=ScheduledTaskComponentContent(
+ task_id=task_id,
+ task_title=task.title,
+ ).model_dump_json(exclude_none=True),
+ component_type=ComponentType.SCHEDULED_TASK_CONTROLLER.value,
+ agent_name=task.agent_name,
+ )
+ yield self._response_factory.done(
+ conversation_id=conversation_id, thread_id=thread_id
+ )
+
# Execute task with optional scheduling loop
while True:
# Check if task was cancelled
diff --git a/python/valuecell/core/coordinate/planner.py b/python/valuecell/core/coordinate/planner.py
index 33e1d7eb0..bec8ffc2f 100644
--- a/python/valuecell/core/coordinate/planner.py
+++ b/python/valuecell/core/coordinate/planner.py
@@ -24,8 +24,7 @@
PLANNER_EXPECTED_OUTPUT,
PLANNER_INSTRUCTION,
)
-from valuecell.core.task import Task, TaskPattern, TaskStatus
-from valuecell.core.task.models import ScheduleConfig
+from valuecell.core.task import Task, TaskStatus
from valuecell.core.types import UserInput
from valuecell.utils import generate_uuid
from valuecell.utils.env import agent_debug_mode_enabled
@@ -225,31 +224,26 @@ async def _analyze_input_and_create_tasks(
return [], guidance_message # Return empty task list with guidance
# Create tasks from planner response
- tasks = [
- self._create_task(
- user_input.meta.user_id,
- task.agent_name,
- task.query,
- conversation_id=user_input.meta.conversation_id,
- thread_id=thread_id,
- pattern=task.pattern,
- schedule_config=task.schedule_config,
- handoff_from_super_agent=(not user_input.target_agent_name),
+ tasks = []
+ for t in plan_raw.tasks:
+ tasks.append(
+ self._create_task(
+ t,
+ user_input.meta.user_id,
+ conversation_id=user_input.meta.conversation_id,
+ thread_id=thread_id,
+ handoff_from_super_agent=(not user_input.target_agent_name),
+ )
)
- for task in plan_raw.tasks
- ]
return tasks, None # Return tasks with no guidance message
def _create_task(
self,
+ task_brief,
user_id: str,
- agent_name: str,
- query: str,
conversation_id: str | None = None,
thread_id: str | None = None,
- pattern: TaskPattern = TaskPattern.ONCE,
- schedule_config: Optional[ScheduleConfig] = None,
handoff_from_super_agent: bool = False,
) -> Task:
"""
@@ -266,6 +260,8 @@ def _create_task(
Returns:
Task: Configured task ready for execution.
"""
+ # task_brief is a _TaskBrief model instance
+
if handoff_from_super_agent:
conversation_id = generate_conversation_id()
thread_id = generate_thread_id()
@@ -274,11 +270,12 @@ def _create_task(
conversation_id=conversation_id,
thread_id=thread_id,
user_id=user_id,
- agent_name=agent_name,
+ agent_name=task_brief.agent_name,
status=TaskStatus.PENDING,
- query=query,
- pattern=pattern,
- schedule_config=schedule_config,
+ title=task_brief.title,
+ query=task_brief.query,
+ pattern=task_brief.pattern,
+ schedule_config=task_brief.schedule_config,
handoff_from_super_agent=handoff_from_super_agent,
)
diff --git a/python/valuecell/core/coordinate/planner_prompts.py b/python/valuecell/core/coordinate/planner_prompts.py
index 8d48f5181..3f3fa1638 100644
--- a/python/valuecell/core/coordinate/planner_prompts.py
+++ b/python/valuecell/core/coordinate/planner_prompts.py
@@ -60,6 +60,7 @@
- Default to pass-through: create a single task addressed to the provided `target_agent_name`, or to the best-fit agent identified via `tool_get_enabled_agents` when the target is unspecified (fall back to "ResearchAgent" only if no clear match is found).
- Set `pattern` to `once` unless the user explicitly confirms recurring intent.
+- For each task, also provide a concise `title` summarizing the task. Keep it short: no more than 10 words (if space-delimited) or 10 characters (for CJK/no-space text).
- For recurring tasks with schedules: extract the core task requirement and transform it into a single-execution form:
* Remove time-related phrases (these go into `schedule_config`)
* Remove notification/monitoring verbs: "alert", "notify", "remind", "inform", "send notification", "let me know", "tell me when"
@@ -84,6 +85,7 @@
{
"tasks": [
{
+ "title": "Short task title (<= 10 words or characters)",
"query": "User's original query, unchanged",
"agent_name": "target_agent_name (or best-fit agent selected via tool_get_enabled_agents when not provided)",
"pattern": "once" | "recurring",
@@ -114,6 +116,7 @@
{
"tasks": [
{
+ "title": "Tesla Q3 revenue",
"query": "What was Tesla's Q3 2024 revenue?",
"agent_name": "ResearchAgent",
"pattern": "once"
@@ -135,6 +138,7 @@
{
"tasks": [
{
+ "title": "Market trends",
"query": "Analyze the latest market trends",
"agent_name": "ResearchAgent",
"pattern": "once"
@@ -157,6 +161,7 @@
{
"tasks": [
{
+ "title": "Go on",
"query": "Go on",
"agent_name": "ResearchAgent",
"pattern": "once"
@@ -195,6 +200,7 @@
{
"tasks": [
{
+ "title": "Apple earnings monitor",
"query": "Monitor Apple's quarterly earnings and notify me each time they release results",
"agent_name": "ResearchAgent",
"pattern": "recurring"
@@ -234,6 +240,7 @@
{
"tasks": [
{
+ "title": "Tesla price check",
"query": "Check Tesla stock price for significant changes",
"agent_name": "ResearchAgent",
"pattern": "recurring",
@@ -276,6 +283,7 @@
{
"tasks": [
{
+ "title": "Market trends",
"query": "Analyze market trends",
"agent_name": "ResearchAgent",
"pattern": "recurring",
diff --git a/python/valuecell/core/task/models.py b/python/valuecell/core/task/models.py
index c379987ea..b23bdd612 100644
--- a/python/valuecell/core/task/models.py
+++ b/python/valuecell/core/task/models.py
@@ -29,10 +29,12 @@ class ScheduleConfig(BaseModel):
"""Schedule configuration for recurring tasks"""
interval_minutes: Optional[int] = Field(
- None, description="Interval in minutes for recurring execution (e.g., 60 for every hour)"
+ None,
+ description="Interval in minutes for recurring execution (e.g., 60 for every hour)",
)
daily_time: Optional[str] = Field(
- None, description="Daily execution time in HH:MM format (e.g., '09:00' for 9 AM)"
+ None,
+ description="Daily execution time in HH:MM format (e.g., '09:00' for 9 AM)",
)
@@ -46,6 +48,10 @@ class Task(BaseModel):
default_factory=list,
description="Task identifier determined by the remote agent after submission",
)
+ title: str = Field(
+ ...,
+ description="A concise task title or summary (<=10 words or characters)",
+ )
query: str = Field(..., description="The task to be performed")
conversation_id: str = Field(
..., description="Conversation ID this task belongs to"
diff --git a/python/valuecell/core/types.py b/python/valuecell/core/types.py
index cdfbe8b3b..d45ec70a5 100644
--- a/python/valuecell/core/types.py
+++ b/python/valuecell/core/types.py
@@ -152,10 +152,21 @@ class ComponentType(str, Enum):
REPORT = "report"
PROFILE = "profile"
SUBAGENT_CONVERSATION = "subagent_conversation"
+ SCHEDULED_TASK_CONTROLLER = "scheduled_task_controller"
+ SCHEDULED_TASK_RESULT = "scheduled_task_result"
FILTERED_LINE_CHART = "filtered_line_chart"
FILTERED_CARD_PUSH_NOTIFICATION = "filtered_card_push_notification"
+class ScheduledTaskComponentContent(BaseModel):
+ """Scheduled task component extended content."""
+
+ task_id: Optional[str] = Field(None, description="The scheduled task ID")
+ task_title: Optional[str] = Field(None, description="The scheduled task title")
+ result: Optional[str] = Field(None, description="The scheduled task result")
+ create_time: Optional[str] = Field(None, description="The scheduled task created time")
+
+
class SubagentConversationPhase(str, Enum):
"""Phases for subagent conversation component."""
From dc83345ffab26beb457f213f9621a7ecd4635f3d Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Wed, 22 Oct 2025 15:49:14 +0800
Subject: [PATCH 11/30] feat: refactor EventPredicates methods for consistency
and add ScheduledTaskResultAccumulator for task output handling
---
python/valuecell/core/agent/responses.py | 20 +++---
.../valuecell/core/coordinate/orchestrator.py | 72 ++++++++++++++++++-
2 files changed, 80 insertions(+), 12 deletions(-)
diff --git a/python/valuecell/core/agent/responses.py b/python/valuecell/core/agent/responses.py
index 21c89f7ca..690eef12f 100644
--- a/python/valuecell/core/agent/responses.py
+++ b/python/valuecell/core/agent/responses.py
@@ -225,7 +225,7 @@ class EventPredicates:
"""
@staticmethod
- def is_task_completed(response_type) -> bool:
+ def is_task_completed(response_event) -> bool:
"""Check if the response type indicates task completion.
Args:
@@ -234,12 +234,12 @@ def is_task_completed(response_type) -> bool:
Returns:
True if the event indicates task completion
"""
- return response_type in {
+ return response_event in {
TaskStatusEvent.TASK_COMPLETED,
}
@staticmethod
- def is_task_failed(response_type) -> bool:
+ def is_task_failed(response_event) -> bool:
"""Check if the response type indicates task failure.
Args:
@@ -248,12 +248,12 @@ def is_task_failed(response_type) -> bool:
Returns:
True if the event indicates task failure
"""
- return response_type in {
+ return response_event in {
TaskStatusEvent.TASK_FAILED,
}
@staticmethod
- def is_tool_call(response_type) -> bool:
+ def is_tool_call(response_event) -> bool:
"""Check if the response type indicates a tool call event.
Args:
@@ -262,13 +262,13 @@ def is_tool_call(response_type) -> bool:
Returns:
True if the event is related to tool calls
"""
- return response_type in {
+ return response_event in {
StreamResponseEvent.TOOL_CALL_STARTED,
StreamResponseEvent.TOOL_CALL_COMPLETED,
}
@staticmethod
- def is_reasoning(response_type) -> bool:
+ def is_reasoning(response_event) -> bool:
"""Check if the response type indicates a reasoning event.
Args:
@@ -277,14 +277,14 @@ def is_reasoning(response_type) -> bool:
Returns:
True if the event is related to reasoning
"""
- return response_type in {
+ return response_event in {
StreamResponseEvent.REASONING_STARTED,
StreamResponseEvent.REASONING,
StreamResponseEvent.REASONING_COMPLETED,
}
@staticmethod
- def is_message(response_type) -> bool:
+ def is_message(response_event) -> bool:
"""Check if the response type indicates a message event.
Args:
@@ -293,7 +293,7 @@ def is_message(response_type) -> bool:
Returns:
True if the event is a message-related event
"""
- return response_type in {
+ return response_event in {
StreamResponseEvent.MESSAGE_CHUNK,
NotifyResponseEvent.MESSAGE,
}
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 13e5ccd84..df9292e58 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -1,11 +1,13 @@
import asyncio
import json
import logging
-from typing import AsyncGenerator, Dict, Optional
+from datetime import datetime, timezone
+from typing import AsyncGenerator, Dict, Iterable, List, Optional
from a2a.types import TaskArtifactUpdateEvent, TaskState, TaskStatusUpdateEvent
from valuecell.core.agent.connect import RemoteConnections
+from valuecell.core.agent.responses import EventPredicates
from valuecell.core.constants import (
CURRENT_CONTEXT,
DEPENDENCIES,
@@ -94,6 +96,67 @@ def get_metadata(self, key: str, default=None):
return self.metadata.get(key, default)
+class ScheduledTaskResultAccumulator:
+ """Collect streaming output for a scheduled task run."""
+
+ def __init__(self, task: Task):
+ self._task = task
+ self._buffer: List[str] = []
+
+ @property
+ def enabled(self) -> bool:
+ return self._task.schedule_config is not None
+
+ def consume(self, responses: Iterable[BaseResponse]) -> List[BaseResponse]:
+ if not self.enabled:
+ return list(responses)
+
+ passthrough: List[BaseResponse] = []
+ for resp in responses:
+ event = resp.event
+
+ if EventPredicates.is_message(event):
+ payload = resp.data.payload
+ content = payload.content if payload else None
+ if content:
+ self._buffer.append(content)
+ continue
+
+ if EventPredicates.is_reasoning(event):
+ continue
+
+ if EventPredicates.is_tool_call(event):
+ continue
+
+ # passthrough.append(resp)
+
+ return passthrough
+
+ def finalize(self, response_factory: ResponseFactory) -> Optional[BaseResponse]:
+ if not self.enabled:
+ return None
+
+ content = "".join(self._buffer).strip()
+ if not content:
+ content = "Task completed without output."
+
+ component_payload = ScheduledTaskComponentContent(
+ task_id=self._task.task_id,
+ task_title=self._task.title,
+ result=content,
+ create_time=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S"),
+ ).model_dump_json(exclude_none=True)
+
+ return response_factory.component_generator(
+ conversation_id=self._task.conversation_id,
+ thread_id=self._task.thread_id,
+ task_id=self._task.task_id,
+ content=component_payload,
+ component_type=ComponentType.SCHEDULED_TASK_RESULT.value,
+ agent_name=self._task.agent_name,
+ )
+
+
class UserInputManager:
"""Manage pending Human-in-the-Loop user input requests.
@@ -644,6 +707,7 @@ async def _execute_single_task_run(
conversation_id = task.conversation_id
thread_id = task.thread_id
task_id = task.task_id
+ aggregator = ScheduledTaskResultAccumulator(task)
# Get agent connection
agent_name = task.agent_name
@@ -679,7 +743,7 @@ async def _execute_single_task_run(
result: RouteResult = await handle_status_update(
self._response_factory, task, thread_id, event
)
- for r in result.responses:
+ for r in aggregator.consume(result.responses):
r = self._response_buffer.annotate(r)
yield r
# Apply side effects
@@ -696,6 +760,10 @@ async def _execute_single_task_run(
)
continue
+ final_component = aggregator.finalize(self._response_factory)
+ if final_component is not None:
+ yield final_component
+
async def _execute_plan_with_input_support(
self,
plan: ExecutionPlan,
From f048097b59e819cacf47446fc81273dedbf3a65c Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Wed, 22 Oct 2025 15:50:08 +0800
Subject: [PATCH 12/30] fix format
---
.../valuecell/core/conversation/tests/test_conv_manager.py | 4 +++-
python/valuecell/core/coordinate/models.py | 7 ++++---
python/valuecell/core/coordinate/temporal.py | 2 +-
python/valuecell/core/types.py | 4 +++-
4 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/python/valuecell/core/conversation/tests/test_conv_manager.py b/python/valuecell/core/conversation/tests/test_conv_manager.py
index 8d2cc2dd3..a7606245e 100644
--- a/python/valuecell/core/conversation/tests/test_conv_manager.py
+++ b/python/valuecell/core/conversation/tests/test_conv_manager.py
@@ -41,7 +41,9 @@ async def test_create_conversation_minimal(self):
manager = ConversationManager()
user_id = "user-123"
- with patch("valuecell.core.conversation.manager.generate_conversation_id") as mock_uuid:
+ with patch(
+ "valuecell.core.conversation.manager.generate_conversation_id"
+ ) as mock_uuid:
mock_uuid.return_value = "conv-generated-123"
result = await manager.create_conversation(user_id)
diff --git a/python/valuecell/core/coordinate/models.py b/python/valuecell/core/coordinate/models.py
index f740a01e6..f647e092e 100644
--- a/python/valuecell/core/coordinate/models.py
+++ b/python/valuecell/core/coordinate/models.py
@@ -25,7 +25,8 @@ class ExecutionPlan(BaseModel):
tasks: List[Task] = Field(default_factory=list, description="Tasks to execute")
created_at: str = Field(..., description="Plan creation timestamp")
guidance_message: Optional[str] = Field(
- None, description="Guidance message to user when plan is inadequate or requires clarification"
+ None,
+ description="Guidance message to user when plan is inadequate or requires clarification",
)
@@ -81,6 +82,6 @@ class PlannerResponse(BaseModel):
)
reason: str = Field(..., description="Reason for the planning decision")
guidance_message: Optional[str] = Field(
- None,
- description="User-friendly guidance message when adequate is false or tasks is empty. Should provide clear direction on what is needed."
+ None,
+ description="User-friendly guidance message when adequate is false or tasks is empty. Should provide clear direction on what is needed.",
)
diff --git a/python/valuecell/core/coordinate/temporal.py b/python/valuecell/core/coordinate/temporal.py
index b80e534a0..55ccd06ac 100644
--- a/python/valuecell/core/coordinate/temporal.py
+++ b/python/valuecell/core/coordinate/temporal.py
@@ -1,6 +1,6 @@
from datetime import datetime, timedelta
-
from typing import Optional
+
from loguru import logger
diff --git a/python/valuecell/core/types.py b/python/valuecell/core/types.py
index d45ec70a5..9d77654d5 100644
--- a/python/valuecell/core/types.py
+++ b/python/valuecell/core/types.py
@@ -164,7 +164,9 @@ class ScheduledTaskComponentContent(BaseModel):
task_id: Optional[str] = Field(None, description="The scheduled task ID")
task_title: Optional[str] = Field(None, description="The scheduled task title")
result: Optional[str] = Field(None, description="The scheduled task result")
- create_time: Optional[str] = Field(None, description="The scheduled task created time")
+ create_time: Optional[str] = Field(
+ None, description="The scheduled task created time"
+ )
class SubagentConversationPhase(str, Enum):
From 9b63dd582deffa70e2b76143c1ce20c206342cb1 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Wed, 22 Oct 2025 15:52:30 +0800
Subject: [PATCH 13/30] refactor: remove unused scheduled task waiting message
code
---
python/valuecell/core/coordinate/orchestrator.py | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index df9292e58..f23aaaba5 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -941,15 +941,6 @@ async def _execute_task_with_input_support(
# Schedule next execution
logger.info(f"Task {task_id} scheduled to run again in {delay} seconds")
- # TODO: yield scheduled task waiting message
- # next_run_time = datetime.now() + timedelta(seconds=delay)
- # yield self._response_factory.message_response_general(
- # event="scheduled_task_waiting",
- # conversation_id=conversation_id,
- # thread_id=thread_id,
- # task_id=task_id,
- # content=f"Next execution scheduled at {next_run_time.strftime('%Y-%m-%d %H:%M:%S')}",
- # )
# Wait for the next scheduled execution (check cancellation periodically)
for _ in range(int(delay / ASYNC_SLEEP_INTERVAL)):
if task.is_finished():
From ca614f1ad0d1e09c900801be8693e701ceb9a636 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Wed, 22 Oct 2025 23:57:04 +0800
Subject: [PATCH 14/30] feat: refactor user input processing to use a
background task for response streaming
---
.../valuecell/core/coordinate/orchestrator.py | 159 ++++++++++++------
1 file changed, 109 insertions(+), 50 deletions(-)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index f23aaaba5..0b1e40daa 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -240,62 +240,47 @@ async def process_user_input(
self, user_input: UserInput
) -> AsyncGenerator[BaseResponse, None]:
"""
- Main entry point for processing user input with optional
- Human-in-the-Loop interactions.
+ Stream responses for a user input, decoupled from the caller's lifetime.
- The orchestrator yields streaming `BaseResponse` objects that callers
- (for example, an HTTP SSE endpoint or WebSocket) can forward to the
- client. This method handles:
- - Starting new plans when no execution context exists
- - Resuming paused executions when conversation state requires input
- - Directly providing responses to existing pending prompts
-
- Args:
- user_input: The user's input, including conversation metadata.
-
- Yields:
- BaseResponse instances representing streaming chunks, status,
- or terminal messages for the request.
+ This function now spawns a background producer task that runs the
+ planning/execution pipeline and emits responses. The async generator
+ here simply consumes from a local queue. If the consumer disconnects,
+ the background task continues, ensuring scheduled tasks and long-running
+ plans proceed independently of the SSE connection.
"""
- conversation_id = user_input.meta.conversation_id
- user_id = user_input.meta.user_id
+ # Per-invocation queue and active flag
+ queue: asyncio.Queue[Optional[BaseResponse]] = asyncio.Queue()
+ active = {"value": True}
- try:
- # Ensure conversation exists
- conversation = await self.conversation_manager.get_conversation(
- conversation_id
- )
- if not conversation:
- await self.conversation_manager.create_conversation(
- user_id, conversation_id=conversation_id
- )
- conversation = await self.conversation_manager.get_conversation(
- conversation_id
- )
- yield self._response_factory.conversation_started(
- conversation_id=conversation_id
- )
+ async def emit(item: Optional[BaseResponse]):
+ # Drop emissions if the consumer has gone away
+ if not active["value"]:
+ return
+ try:
+ await queue.put(item)
+ except Exception:
+ # Never fail producer due to queue issues; just drop
+ pass
- # Handle conversation continuation vs new request
- if conversation.status == ConversationStatus.REQUIRE_USER_INPUT:
- async for response in self._handle_conversation_continuation(
- user_input
- ):
- yield response
- else:
- async for response in self._handle_new_request(user_input):
- yield response
+ # Start background producer
+ asyncio.create_task(self._run_session(user_input, emit))
- except Exception as e:
- logger.exception(
- f"Error processing user input for conversation {conversation_id}"
- )
- yield self._response_factory.system_failed(
- conversation_id,
- f"(Error) Error processing request: {str(e)}",
- )
+ try:
+ while True:
+ item = await queue.get()
+ if item is None:
+ break
+ yield item
+ except asyncio.CancelledError:
+ # Consumer cancelled; mark inactive so producer stops enqueuing
+ active["value"] = False
+ # Do not cancel producer; it should continue independently
+ raise
finally:
- yield self._response_factory.done(conversation_id)
+ # Mark inactive to stop further enqueues
+ active["value"] = False
+ # Best-effort: if producer already finished, nothing to do
+ # We deliberately do not cancel the producer to keep execution alive
async def provide_user_input(self, conversation_id: str, response: str):
"""Submit a user's response to a pending input request.
@@ -376,6 +361,80 @@ async def cleanup(self):
# ==================== Private Helper Methods ====================
+ async def _run_session(
+ self,
+ user_input: UserInput,
+ emit: callable,
+ ):
+ """Background session runner that produces responses and emits them.
+
+ It wraps the original processing pipeline and forwards each response to
+ the provided emitter. Completion is signaled with a final None.
+ """
+ try:
+ async for response in self._generate_responses(user_input):
+ await emit(response)
+ except Exception as e:
+ # The underlying pipeline already emits system_failed + done, so this
+ # path should be rare; still, don't crash the background task.
+ logger.exception(
+ f"Unhandled error in session runner for conversation {user_input.meta.conversation_id}: {e}"
+ )
+ finally:
+ # Signal completion to the consumer (if any)
+ try:
+ await emit(None)
+ except Exception:
+ pass
+
+ async def _generate_responses(
+ self, user_input: UserInput
+ ) -> AsyncGenerator[BaseResponse, None]:
+ """Generate responses for a user input (original pipeline extracted).
+
+ This contains the previous body of process_user_input unchanged in
+ behavior, yielding the same responses in the same order.
+ """
+ conversation_id = user_input.meta.conversation_id
+ user_id = user_input.meta.user_id
+
+ try:
+ # Ensure conversation exists
+ conversation = await self.conversation_manager.get_conversation(
+ conversation_id
+ )
+ if not conversation:
+ await self.conversation_manager.create_conversation(
+ user_id, conversation_id=conversation_id
+ )
+ conversation = await self.conversation_manager.get_conversation(
+ conversation_id
+ )
+ yield self._response_factory.conversation_started(
+ conversation_id=conversation_id
+ )
+
+ # Handle conversation continuation vs new request
+ if conversation.status == ConversationStatus.REQUIRE_USER_INPUT:
+ async for response in self._handle_conversation_continuation(
+ user_input
+ ):
+ yield response
+ else:
+ async for response in self._handle_new_request(user_input):
+ yield response
+
+ except Exception as e:
+ logger.exception(
+ f"Error processing user input for conversation {conversation_id}"
+ )
+ yield self._response_factory.system_failed(
+ conversation_id,
+ f"(Error) Error processing request: {str(e)}",
+ )
+ finally:
+ yield self._response_factory.done(conversation_id)
+
async def _handle_user_input_request(self, request: UserInputRequest):
"""Register an incoming `UserInputRequest` produced by the planner.
From a3b898b27d33110a9e6f1651425f2b68e65af68a Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Thu, 23 Oct 2025 16:46:52 +0800
Subject: [PATCH 15/30] refactor: break orchestrator into conversation, plan,
response, super_agent and task services
---
.../valuecell/core/conversation/__init__.py | 2 +
python/valuecell/core/conversation/service.py | 112 +++
python/valuecell/core/coordinate/__init__.py | 6 +-
.../valuecell/core/coordinate/orchestrator.py | 717 +++---------------
python/valuecell/core/coordinate/services.py | 89 +++
.../coordinate/tests/test_e2e_persistence.py | 8 +-
.../coordinate/tests/test_orchestrator.py | 285 ++-----
python/valuecell/core/plan/__init__.py | 8 +
.../core/{coordinate => plan}/models.py | 0
.../core/{coordinate => plan}/planner.py | 8 +-
.../planner_prompts.py => plan/prompts.py} | 0
python/valuecell/core/plan/service.py | 86 +++
python/valuecell/core/plan/tests/__init__.py | 0
.../tests/test_planner.py | 13 +-
python/valuecell/core/response/__init__.py | 7 +
.../response_buffer.py => response/buffer.py} | 0
.../response.py => response/factory.py} | 0
.../response_router.py => response/router.py} | 2 +-
python/valuecell/core/response/service.py | 94 +++
.../valuecell/core/response/tests/__init__.py | 0
.../tests/test_component_id.py | 2 +-
.../tests/test_response_buffer.py | 4 +-
.../tests/test_response_factory.py | 2 +-
.../tests/test_response_router.py | 4 +-
python/valuecell/core/super_agent/__init__.py | 10 +
.../super_agent.py => super_agent/core.py} | 2 +-
.../prompts.py} | 0
python/valuecell/core/super_agent/service.py | 21 +
.../core/super_agent/tests/__init__.py | 0
.../tests/test_super_agent.py | 2 +-
python/valuecell/core/task/__init__.py | 2 +
python/valuecell/core/task/executor.py | 355 +++++++++
python/valuecell/core/task/manager.py | 10 +-
python/valuecell/core/task/models.py | 10 +-
python/valuecell/core/task/service.py | 37 +
.../core/{coordinate => task}/temporal.py | 0
.../valuecell/core/task/tests/test_models.py | 8 +-
37 files changed, 1032 insertions(+), 874 deletions(-)
create mode 100644 python/valuecell/core/conversation/service.py
create mode 100644 python/valuecell/core/coordinate/services.py
create mode 100644 python/valuecell/core/plan/__init__.py
rename python/valuecell/core/{coordinate => plan}/models.py (100%)
rename python/valuecell/core/{coordinate => plan}/planner.py (99%)
rename python/valuecell/core/{coordinate/planner_prompts.py => plan/prompts.py} (100%)
create mode 100644 python/valuecell/core/plan/service.py
create mode 100644 python/valuecell/core/plan/tests/__init__.py
rename python/valuecell/core/{coordinate => plan}/tests/test_planner.py (91%)
create mode 100644 python/valuecell/core/response/__init__.py
rename python/valuecell/core/{coordinate/response_buffer.py => response/buffer.py} (100%)
rename python/valuecell/core/{coordinate/response.py => response/factory.py} (100%)
rename python/valuecell/core/{coordinate/response_router.py => response/router.py} (98%)
create mode 100644 python/valuecell/core/response/service.py
create mode 100644 python/valuecell/core/response/tests/__init__.py
rename python/valuecell/core/{coordinate => response}/tests/test_component_id.py (99%)
rename python/valuecell/core/{coordinate => response}/tests/test_response_buffer.py (99%)
rename python/valuecell/core/{coordinate => response}/tests/test_response_factory.py (98%)
rename python/valuecell/core/{coordinate => response}/tests/test_response_router.py (99%)
create mode 100644 python/valuecell/core/super_agent/__init__.py
rename python/valuecell/core/{coordinate/super_agent.py => super_agent/core.py} (97%)
rename python/valuecell/core/{coordinate/super_agent_prompts.py => super_agent/prompts.py} (100%)
create mode 100644 python/valuecell/core/super_agent/service.py
create mode 100644 python/valuecell/core/super_agent/tests/__init__.py
rename python/valuecell/core/{coordinate => super_agent}/tests/test_super_agent.py (95%)
create mode 100644 python/valuecell/core/task/executor.py
create mode 100644 python/valuecell/core/task/service.py
rename python/valuecell/core/{coordinate => task}/temporal.py (100%)
diff --git a/python/valuecell/core/conversation/__init__.py b/python/valuecell/core/conversation/__init__.py
index c55a4a75f..5369528ca 100644
--- a/python/valuecell/core/conversation/__init__.py
+++ b/python/valuecell/core/conversation/__init__.py
@@ -4,6 +4,7 @@
from .item_store import InMemoryItemStore, ItemStore, SQLiteItemStore
from .manager import ConversationManager
from .models import Conversation, ConversationStatus
+from .service import ConversationService
__all__ = [
# Models
@@ -11,6 +12,7 @@
"ConversationStatus",
# Conversation management
"ConversationManager",
+ "ConversationService",
# Conversation storage
"ConversationStore",
"InMemoryConversationStore",
diff --git a/python/valuecell/core/conversation/service.py b/python/valuecell/core/conversation/service.py
new file mode 100644
index 000000000..9b90e7610
--- /dev/null
+++ b/python/valuecell/core/conversation/service.py
@@ -0,0 +1,112 @@
+"""High-level service wrapper for conversation operations."""
+
+from __future__ import annotations
+
+from typing import List, Optional, Tuple
+
+from valuecell.core.conversation.manager import ConversationManager
+from valuecell.core.conversation.models import Conversation, ConversationStatus
+from valuecell.core.types import (
+ ConversationItem,
+ ConversationItemEvent,
+ ResponsePayload,
+ Role,
+)
+
+
+class ConversationService:
+ """Expose conversation operations without tying them to the orchestrator."""
+
+ def __init__(self, manager: ConversationManager) -> None:
+ self._manager = manager
+
+ @property
+ def manager(self) -> ConversationManager:
+ return self._manager
+
+ async def ensure_conversation(
+ self,
+ user_id: str,
+ conversation_id: str,
+ title: Optional[str] = None,
+ ) -> Tuple[Conversation, bool]:
+ """Return the conversation, creating it if it does not exist."""
+
+ conversation = await self._manager.get_conversation(conversation_id)
+ created = False
+ if conversation is None:
+ conversation = await self._manager.create_conversation(
+ user_id=user_id,
+ title=title,
+ conversation_id=conversation_id,
+ )
+ created = True
+ return conversation, created
+
+ async def get_conversation(self, conversation_id: str) -> Optional[Conversation]:
+ return await self._manager.get_conversation(conversation_id)
+
+ async def activate(self, conversation_id: str) -> bool:
+ conversation = await self._manager.get_conversation(conversation_id)
+ if not conversation:
+ return False
+ conversation.activate()
+ await self._manager.update_conversation(conversation)
+ return True
+
+ async def require_user_input(self, conversation_id: str) -> bool:
+ conversation = await self._manager.get_conversation(conversation_id)
+ if not conversation:
+ return False
+ conversation.require_user_input()
+ await self._manager.update_conversation(conversation)
+ return True
+
+ async def set_status(
+ self, conversation_id: str, status: ConversationStatus
+ ) -> bool:
+ conversation = await self._manager.get_conversation(conversation_id)
+ if not conversation:
+ return False
+ conversation.set_status(status)
+ await self._manager.update_conversation(conversation)
+ return True
+
+ async def add_item(
+ self,
+ *,
+ role: Role,
+ event: ConversationItemEvent,
+ conversation_id: str,
+ thread_id: Optional[str] = None,
+ task_id: Optional[str] = None,
+ payload: ResponsePayload = None,
+ item_id: Optional[str] = None,
+ agent_name: Optional[str] = None,
+ ) -> Optional[ConversationItem]:
+ """Persist a conversation item via the underlying manager."""
+
+ return await self._manager.add_item(
+ role=role,
+ event=event,
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=task_id,
+ payload=payload,
+ item_id=item_id,
+ agent_name=agent_name,
+ )
+
+ async def get_conversation_items(
+ self,
+ conversation_id: Optional[str] = None,
+ event: Optional[ConversationItemEvent] = None,
+ component_type: Optional[str] = None,
+ ) -> List[ConversationItem]:
+ """Load conversation items with optional filtering."""
+
+ return await self._manager.get_conversation_items(
+ conversation_id=conversation_id,
+ event=event,
+ component_type=component_type,
+ )
diff --git a/python/valuecell/core/coordinate/__init__.py b/python/valuecell/core/coordinate/__init__.py
index 1d9190417..42651890a 100644
--- a/python/valuecell/core/coordinate/__init__.py
+++ b/python/valuecell/core/coordinate/__init__.py
@@ -1,9 +1,7 @@
-from .models import ExecutionPlan
from .orchestrator import AgentOrchestrator
-from .planner import ExecutionPlanner
+from .services import AgentServiceBundle
__all__ = [
"AgentOrchestrator",
- "ExecutionPlanner",
- "ExecutionPlan",
+ "AgentServiceBundle",
]
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 0b1e40daa..4790a8cfa 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -1,58 +1,27 @@
import asyncio
-import json
-import logging
-from datetime import datetime, timezone
-from typing import AsyncGenerator, Dict, Iterable, List, Optional
-
-from a2a.types import TaskArtifactUpdateEvent, TaskState, TaskStatusUpdateEvent
-
-from valuecell.core.agent.connect import RemoteConnections
-from valuecell.core.agent.responses import EventPredicates
-from valuecell.core.constants import (
- CURRENT_CONTEXT,
- DEPENDENCIES,
- LANGUAGE,
- METADATA,
- ORIGINAL_USER_INPUT,
- PLANNING_TASK,
- TIMEZONE,
- USER_PROFILE,
-)
-from valuecell.core.conversation import (
- ConversationManager,
- ConversationStatus,
- SQLiteItemStore,
-)
-from valuecell.core.coordinate.models import ExecutionPlan
-from valuecell.core.coordinate.planner import ExecutionPlanner, UserInputRequest
-from valuecell.core.coordinate.response import ResponseFactory
-from valuecell.core.coordinate.response_buffer import ResponseBuffer, SaveItem
-from valuecell.core.coordinate.response_router import (
- RouteResult,
- SideEffectKind,
- handle_status_update,
-)
-from valuecell.core.coordinate.super_agent import (
- SuperAgent,
+from typing import AsyncGenerator, Dict, Optional
+
+from loguru import logger
+
+from valuecell.core.constants import ORIGINAL_USER_INPUT, PLANNING_TASK
+from valuecell.core.conversation import ConversationService, ConversationStatus
+from valuecell.core.plan import PlanService
+from valuecell.core.response import ResponseService
+from valuecell.core.super_agent import (
SuperAgentDecision,
SuperAgentOutcome,
+ SuperAgentService,
)
-from valuecell.core.coordinate.temporal import calculate_next_execution_delay
-from valuecell.core.task import Task, TaskManager
+from valuecell.core.task import TaskExecutor
from valuecell.core.types import (
BaseResponse,
- ComponentType,
ConversationItemEvent,
- ScheduledTaskComponentContent,
StreamResponseEvent,
- SubagentConversationPhase,
UserInput,
)
-from valuecell.utils import resolve_db_path
-from valuecell.utils.i18n_utils import get_current_language, get_current_timezone
-from valuecell.utils.uuid import generate_item_id, generate_task_id, generate_thread_id
+from valuecell.utils.uuid import generate_task_id, generate_thread_id
-logger = logging.getLogger(__name__)
+from .services import AgentServiceBundle
# Constants for configuration
DEFAULT_CONTEXT_TIMEOUT_SECONDS = 3600 # 1 hour
@@ -96,144 +65,34 @@ def get_metadata(self, key: str, default=None):
return self.metadata.get(key, default)
-class ScheduledTaskResultAccumulator:
- """Collect streaming output for a scheduled task run."""
-
- def __init__(self, task: Task):
- self._task = task
- self._buffer: List[str] = []
-
- @property
- def enabled(self) -> bool:
- return self._task.schedule_config is not None
-
- def consume(self, responses: Iterable[BaseResponse]) -> List[BaseResponse]:
- if not self.enabled:
- return list(responses)
-
- passthrough: List[BaseResponse] = []
- for resp in responses:
- event = resp.event
-
- if EventPredicates.is_message(event):
- payload = resp.data.payload
- content = payload.content if payload else None
- if content:
- self._buffer.append(content)
- continue
-
- if EventPredicates.is_reasoning(event):
- continue
-
- if EventPredicates.is_tool_call(event):
- continue
-
- # passthrough.append(resp)
-
- return passthrough
-
- def finalize(self, response_factory: ResponseFactory) -> Optional[BaseResponse]:
- if not self.enabled:
- return None
-
- content = "".join(self._buffer).strip()
- if not content:
- content = "Task completed without output."
-
- component_payload = ScheduledTaskComponentContent(
- task_id=self._task.task_id,
- task_title=self._task.title,
- result=content,
- create_time=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S"),
- ).model_dump_json(exclude_none=True)
-
- return response_factory.component_generator(
- conversation_id=self._task.conversation_id,
- thread_id=self._task.thread_id,
- task_id=self._task.task_id,
- content=component_payload,
- component_type=ComponentType.SCHEDULED_TASK_RESULT.value,
- agent_name=self._task.agent_name,
- )
-
-
-class UserInputManager:
- """Manage pending Human-in-the-Loop user input requests.
-
- This simple manager stores `UserInputRequest` objects keyed by
- `conversation_id`. Callers can add requests, query for prompts and provide
- responses which will wake any awaiting tasks.
- """
-
- def __init__(self):
- self._pending_requests: Dict[str, UserInputRequest] = {}
-
- def add_request(self, conversation_id: str, request: UserInputRequest):
- """Register a pending user input request for a conversation."""
- self._pending_requests[conversation_id] = request
-
- def has_pending_request(self, conversation_id: str) -> bool:
- """Check if there's a pending request for the conversation"""
- return conversation_id in self._pending_requests
-
- def get_request_prompt(self, conversation_id: str) -> Optional[str]:
- """Return the prompt text for a pending request, or None if none found."""
- request = self._pending_requests.get(conversation_id)
- return request.prompt if request else None
-
- def provide_response(self, conversation_id: str, response: str) -> bool:
- """Supply the user's response to a pending request and complete it.
-
- Returns True when the response was accepted and the pending request
- removed; False when no pending request existed for the conversation.
- """
- if conversation_id not in self._pending_requests:
- return False
-
- request = self._pending_requests[conversation_id]
- request.provide_response(response)
- del self._pending_requests[conversation_id]
- return True
-
- def clear_request(self, conversation_id: str):
- """Clear a pending request"""
- self._pending_requests.pop(conversation_id, None)
-
-
class AgentOrchestrator:
- """
- Orchestrates execution of user requests through multiple agents with Human-in-the-Loop support.
+ """Coordinate planning, execution, and persistence across services."""
- This class manages the entire lifecycle of user requests including:
- - Planning phase with user input collection
- - Task execution with interruption support
- - Conversation state management
- - Error handling and recovery
- """
-
- def __init__(self):
- self.conversation_manager = ConversationManager(
- item_store=SQLiteItemStore(resolve_db_path())
+ def __init__(
+ self,
+ conversation_service: ConversationService | None = None,
+ response_service: ResponseService | None = None,
+ plan_service: PlanService | None = None,
+ super_agent_service: SuperAgentService | None = None,
+ task_executor: TaskExecutor | None = None,
+ ) -> None:
+ services = AgentServiceBundle.compose(
+ conversation_service=conversation_service,
+ response_service=response_service,
+ plan_service=plan_service,
+ super_agent_service=super_agent_service,
+ task_executor=task_executor,
)
- self.task_manager = TaskManager()
- self.agent_connections = RemoteConnections()
- # Initialize user input management
- self.user_input_manager = UserInputManager()
+ self.conversation_service = services.conversation_service
+ self.response_service = services.response_service
+ self.super_agent_service = services.super_agent_service
+ self.plan_service = services.plan_service
+ self.task_executor = services.task_executor
- # Initialize execution context management
+ # Execution contexts keep track of paused planner runs.
self._execution_contexts: Dict[str, ExecutionContext] = {}
- # Initialize planner
- self.planner = ExecutionPlanner(self.agent_connections)
-
- # Initialize Super Agent (triage/frontline agent)
- self.super_agent = SuperAgent()
-
- self._response_factory = ResponseFactory()
- # Buffer for streaming responses -> persisted ConversationItems
- self._response_buffer = ResponseBuffer()
-
# ==================== Public API Methods ====================
async def process_user_input(
@@ -282,52 +141,6 @@ async def emit(item: Optional[BaseResponse]):
# Best-effort: if producer already finished, nothing to do
# We deliberately do not cancel the producer to keep execution alive
- async def provide_user_input(self, conversation_id: str, response: str):
- """Submit a user's response to a pending input request.
-
- When a planner has requested clarification (Human-in-the-Loop), the
- orchestrator stores a `UserInputRequest`. Calling this method provides
- the response, updates the conversation state to active, and wakes any
- awaiting planner logic.
-
- Args:
- conversation_id: Conversation where a pending input request exists.
- response: The textual response supplied by the user.
- """
- if self.user_input_manager.provide_response(conversation_id, response):
- # Update conversation status to active
- conversation = await self.conversation_manager.get_conversation(
- conversation_id
- )
- if conversation:
- conversation.activate()
- await self.conversation_manager.update_conversation(conversation)
-
- def has_pending_user_input(self, conversation_id: str) -> bool:
- """Return True if the conversation currently awaits user input."""
- return self.user_input_manager.has_pending_request(conversation_id)
-
- def get_user_input_prompt(self, conversation_id: str) -> Optional[str]:
- """Return the prompt text for a pending user-input request, or None.
-
- This is useful for displaying the outstanding prompt to the user or
- embedding it into UI flows.
- """
- return self.user_input_manager.get_request_prompt(conversation_id)
-
- async def close_conversation(self, conversation_id: str):
- """Close a conversation and clean up resources.
-
- This cancels any running tasks for the conversation, clears execution
- contexts and pending user-input requests, and resets conversation
- status to active when appropriate.
- """
- # Cancel any running tasks for this conversation
- await self.task_manager.cancel_conversation_tasks(conversation_id)
-
- # Clean up execution context
- await self._cancel_execution(conversation_id)
-
async def get_conversation_history(
self,
conversation_id: Optional[str] = None,
@@ -345,19 +158,11 @@ async def get_conversation_history(
A list of `BaseResponse` instances reconstructed from persisted
ConversationItems.
"""
- items = await self.conversation_manager.get_conversation_items(
- conversation_id=conversation_id, event=event, component_type=component_type
+ return await self.response_service.get_conversation_history(
+ conversation_id=conversation_id,
+ event=event,
+ component_type=component_type,
)
- return [self._response_factory.from_conversation_item(it) for it in items]
-
- async def cleanup(self):
- """Perform graceful cleanup of orchestrator-managed resources.
-
- This will remove expired execution contexts and stop all remote agent
- connections/listeners managed by the orchestrator.
- """
- await self._cleanup_expired_contexts()
- await self.agent_connections.stop_all()
# ==================== Private Helper Methods ====================
@@ -399,22 +204,16 @@ async def _generate_responses(
user_id = user_input.meta.user_id
try:
- # Ensure conversation exists
- conversation = await self.conversation_manager.get_conversation(
- conversation_id
+ conversation, created = await self.conversation_service.ensure_conversation(
+ user_id=user_id, conversation_id=conversation_id
)
- if not conversation:
- await self.conversation_manager.create_conversation(
- user_id, conversation_id=conversation_id
- )
- conversation = await self.conversation_manager.get_conversation(
- conversation_id
- )
- yield self._response_factory.conversation_started(
+
+ if created:
+ started = self.response_service.factory.conversation_started(
conversation_id=conversation_id
)
+ yield await self.response_service.emit(started)
- # Handle conversation continuation vs new request
if conversation.status == ConversationStatus.REQUIRE_USER_INPUT:
async for response in self._handle_conversation_continuation(
user_input
@@ -428,25 +227,12 @@ async def _generate_responses(
logger.exception(
f"Error processing user input for conversation {conversation_id}"
)
- yield self._response_factory.system_failed(
- conversation_id,
- f"(Error) Error processing request: {str(e)}",
+ failure = self.response_service.factory.system_failed(
+ conversation_id, f"(Error) Error processing request: {str(e)}"
)
+ yield await self.response_service.emit(failure)
finally:
- yield self._response_factory.done(conversation_id)
-
- async def _handle_user_input_request(self, request: UserInputRequest):
- """Register an incoming `UserInputRequest` produced by the planner.
-
- The planner may emit UserInputRequest objects when it requires
- clarification. This helper extracts the `conversation_id` from the
- request and registers it with the `UserInputManager` so callers can
- later provide the response.
- """
- # Extract conversation_id from request context
- conversation_id = getattr(request, "conversation_id", None)
- if conversation_id:
- self.user_input_manager.add_request(conversation_id, request)
+ yield self.response_service.factory.done(conversation_id)
async def _handle_conversation_continuation(
self, user_input: UserInput
@@ -465,36 +251,38 @@ async def _handle_conversation_continuation(
# Validate execution context exists
if conversation_id not in self._execution_contexts:
- yield self._response_factory.system_failed(
+ failure = self.response_service.factory.system_failed(
conversation_id,
"No execution context found for this conversation. The conversation may have expired.",
)
+ yield await self.response_service.emit(failure)
return
context = self._execution_contexts[conversation_id]
# Validate context integrity and user consistency
if not self._validate_execution_context(context, user_id):
- yield self._response_factory.system_failed(
+ failure = self.response_service.factory.system_failed(
conversation_id,
"Invalid execution context or user mismatch.",
)
+ yield await self.response_service.emit(failure)
await self._cancel_execution(conversation_id)
return
thread_id = generate_thread_id()
- response = self._response_factory.thread_started(
+ response = self.response_service.factory.thread_started(
conversation_id=conversation_id,
thread_id=thread_id,
user_query=user_input.query,
)
- await self._persist_from_buffer(response)
- yield response
+ yield await self.response_service.emit(response)
# Provide user response and resume execution
# If we are in an execution stage, store the pending response for resume
context.add_metadata(pending_response=user_input.query)
- await self.provide_user_input(conversation_id, user_input.query)
+ if self.plan_service.provide_user_response(conversation_id, user_input.query):
+ await self.conversation_service.activate(conversation_id)
context.thread_id = thread_id
# Resume based on execution stage
@@ -505,10 +293,11 @@ async def _handle_conversation_continuation(
yield response
# Resuming execution stage is not yet supported
else:
- yield self._response_factory.system_failed(
+ failure = self.response_service.factory.system_failed(
conversation_id,
"Resuming execution stage is not yet supported.",
)
+ yield await self.response_service.emit(failure)
async def _handle_new_request(
self, user_input: UserInput
@@ -520,28 +309,28 @@ async def _handle_new_request(
"""
conversation_id = user_input.meta.conversation_id
thread_id = generate_thread_id()
- response = self._response_factory.thread_started(
+ response = self.response_service.factory.thread_started(
conversation_id=conversation_id,
thread_id=thread_id,
user_query=user_input.query,
)
- await self._persist_from_buffer(response)
- yield response
+ yield await self.response_service.emit(response)
# 1) Super Agent triage phase (pre-planning) - skip if target agent is specified
- if user_input.target_agent_name == self.super_agent.name:
- super_outcome: SuperAgentOutcome = await self.super_agent.run(user_input)
+ if user_input.target_agent_name == self.super_agent_service.name:
+ super_outcome: SuperAgentOutcome = await self.super_agent_service.run(
+ user_input
+ )
if super_outcome.decision == SuperAgentDecision.ANSWER:
- ans = self._response_factory.message_response_general(
+ ans = self.response_service.factory.message_response_general(
StreamResponseEvent.MESSAGE_CHUNK,
conversation_id,
thread_id,
task_id=generate_task_id(),
content=super_outcome.answer_content,
- agent_name=self.super_agent.name,
+ agent_name=self.super_agent_service.name,
)
- await self._persist_from_buffer(ans)
- yield ans
+ yield await self.response_service.emit(ans)
return
if super_outcome.decision == SuperAgentDecision.HANDOFF_TO_PLANNER:
@@ -551,8 +340,8 @@ async def _handle_new_request(
# 2) Planner phase (existing logic)
# Create planning task with user input callback
context_aware_callback = self._create_context_aware_callback(conversation_id)
- planning_task = asyncio.create_task(
- self.planner.create_plan(user_input, context_aware_callback, thread_id)
+ planning_task = self.plan_service.start_planning_task(
+ user_input, thread_id, context_aware_callback
)
# Monitor planning progress
@@ -572,7 +361,7 @@ def _create_context_aware_callback(self, conversation_id: str):
async def context_aware_handle(request):
request.conversation_id = conversation_id
- await self._handle_user_input_request(request)
+ self.plan_service.register_user_input(conversation_id, request)
return context_aware_handle
@@ -596,7 +385,7 @@ async def _monitor_planning_task(
# Wait for planning completion or user input request
while not planning_task.done():
- if self.has_pending_user_input(conversation_id):
+ if self.plan_service.has_pending_request(conversation_id):
# Save planning context
context = ExecutionContext(
"planning", conversation_id, thread_id, user_id
@@ -609,32 +398,23 @@ async def _monitor_planning_task(
self._execution_contexts[conversation_id] = context
# Update conversation status and send user input request
- await self._request_user_input(conversation_id)
- response = self._response_factory.plan_require_user_input(
+ await self.conversation_service.require_user_input(conversation_id)
+ prompt = self.plan_service.get_request_prompt(conversation_id) or ""
+ response = self.response_service.factory.plan_require_user_input(
conversation_id,
thread_id,
- self.get_user_input_prompt(conversation_id),
+ prompt,
)
- await self._persist_from_buffer(response)
- yield response
+ yield await self.response_service.emit(response)
return
await asyncio.sleep(ASYNC_SLEEP_INTERVAL)
# Planning completed, execute plan
plan = await planning_task
- async for response in self._execute_plan_with_input_support(
- plan, conversation_id, thread_id
- ):
+ async for response in self.task_executor.execute_plan(plan, thread_id):
yield response
- async def _request_user_input(self, conversation_id: str):
- """Set conversation to require user input and send the request"""
- conversation = await self.conversation_manager.get_conversation(conversation_id)
- if conversation:
- conversation.require_user_input()
- await self.conversation_manager.update_conversation(conversation)
-
def _validate_execution_context(
self, context: ExecutionContext, user_id: str
) -> bool:
@@ -668,26 +448,26 @@ async def _continue_planning(
original_user_input = context.get_metadata(ORIGINAL_USER_INPUT)
if not all([planning_task, original_user_input]):
- yield self._response_factory.plan_failed(
+ failure = self.response_service.factory.plan_failed(
conversation_id,
thread_id,
"Invalid planning context - missing required data",
)
+ yield await self.response_service.emit(failure)
await self._cancel_execution(conversation_id)
return
# Continue monitoring planning task
while not planning_task.done():
- if self.has_pending_user_input(conversation_id):
+ if self.plan_service.has_pending_request(conversation_id):
# Still need more user input, send request
- prompt = self.get_user_input_prompt(conversation_id)
+ prompt = self.plan_service.get_request_prompt(conversation_id) or ""
# Ensure conversation is set to require user input again for repeated prompts
- await self._request_user_input(conversation_id)
- response = self._response_factory.plan_require_user_input(
+ await self.conversation_service.require_user_input(conversation_id)
+ response = self.response_service.factory.plan_require_user_input(
conversation_id, thread_id, prompt
)
- await self._persist_from_buffer(response)
- yield response
+ yield await self.response_service.emit(response)
return
await asyncio.sleep(ASYNC_SLEEP_INTERVAL)
@@ -696,9 +476,7 @@ async def _continue_planning(
plan = await planning_task
del self._execution_contexts[conversation_id]
- async for response in self._execute_plan_with_input_support(
- plan, conversation_id, thread_id
- ):
+ async for response in self.task_executor.execute_plan(plan, thread_id):
yield response
async def _cancel_execution(self, conversation_id: str):
@@ -709,25 +487,14 @@ async def _cancel_execution(self, conversation_id: str):
context and clears any pending user input. It also resets the
conversation's status back to active.
"""
- # Clean up execution context
if conversation_id in self._execution_contexts:
- context = self._execution_contexts[conversation_id]
-
- # Cancel planning task if it exists and is not done
+ context = self._execution_contexts.pop(conversation_id)
planning_task = context.get_metadata(PLANNING_TASK)
if planning_task and not planning_task.done():
planning_task.cancel()
- del self._execution_contexts[conversation_id]
-
- # Clear pending user input
- self.user_input_manager.clear_request(conversation_id)
-
- # Reset conversation status
- conversation = await self.conversation_manager.get_conversation(conversation_id)
- if conversation:
- conversation.activate()
- await self.conversation_manager.update_conversation(conversation)
+ self.plan_service.clear_pending_request(conversation_id)
+ await self.conversation_service.activate(conversation_id)
async def _cleanup_expired_contexts(
self, max_age_seconds: int = DEFAULT_CONTEXT_TIMEOUT_SECONDS
@@ -748,311 +515,3 @@ async def _cleanup_expired_contexts(
logger.warning(
f"Cleaned up expired execution context for conversation {conversation_id}"
)
-
- # ==================== Plan and Task Execution Methods ====================
-
- async def _execute_single_task_run(
- self, task: Task, metadata: dict
- ) -> AsyncGenerator[BaseResponse, None]:
- """Execute a single run of a task (may be called multiple times for scheduled tasks).
-
- Args:
- task: The task to execute
- metadata: Execution metadata
-
- Yields:
- BaseResponse objects from task execution
- """
- conversation_id = task.conversation_id
- thread_id = task.thread_id
- task_id = task.task_id
- aggregator = ScheduledTaskResultAccumulator(task)
-
- # Get agent connection
- agent_name = task.agent_name
- client = await self.agent_connections.get_client(agent_name)
- if not client:
- raise RuntimeError(f"Could not connect to agent {agent_name}")
- # agent_card = await self.agent_connections.start_agent(
- # agent_name,
- # with_listener=False,
- # )
- # streaming = agent_card.supports_streaming
-
- # Send message to agent
- remote_response = await client.send_message(
- task.query,
- conversation_id=conversation_id,
- metadata=metadata,
- )
-
- # Process streaming responses
- async for remote_task, event in remote_response:
- if event is None and remote_task.status.state == TaskState.submitted:
- task.remote_task_ids.append(remote_task.id)
- yield self._response_factory.task_started(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task_id,
- agent_name=agent_name,
- )
- continue
-
- if isinstance(event, TaskStatusUpdateEvent):
- result: RouteResult = await handle_status_update(
- self._response_factory, task, thread_id, event
- )
- for r in aggregator.consume(result.responses):
- r = self._response_buffer.annotate(r)
- yield r
- # Apply side effects
- for eff in result.side_effects:
- if eff.kind == SideEffectKind.FAIL_TASK:
- await self.task_manager.fail_task(task_id, eff.reason or "")
- if result.done:
- return
- continue
-
- if isinstance(event, TaskArtifactUpdateEvent):
- logger.info(
- f"Received unexpected artifact update for task {task_id}: {event}"
- )
- continue
-
- final_component = aggregator.finalize(self._response_factory)
- if final_component is not None:
- yield final_component
-
- async def _execute_plan_with_input_support(
- self,
- plan: ExecutionPlan,
- conversation_id: str,
- thread_id: str,
- metadata: Optional[dict] = None,
- ) -> AsyncGenerator[BaseResponse, None]:
- """
- Execute an execution plan with Human-in-the-Loop support.
-
- This method streams execution results and handles user input interruptions
- during task execution.
-
- Args:
- plan: The execution plan containing tasks to execute.
- metadata: Optional execution metadata containing conversation and user info.
-
- Yields:
- Streaming `BaseResponse` objects produced by each task execution.
- """
-
- # Check if plan has guidance message (inadequate plan)
- if plan.guidance_message:
- # Send guidance message to user and return
- response = self._response_factory.message_response_general(
- event=StreamResponseEvent.MESSAGE_CHUNK,
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=generate_task_id(),
- content=plan.guidance_message,
- )
- await self._persist_from_buffer(response)
- yield response
- return
-
- for task in plan.tasks:
- subagent_conversation_item_id = generate_item_id()
- subagent_component_content_dict = {
- "conversation_id": task.conversation_id,
- "agent_name": task.agent_name,
- "phase": SubagentConversationPhase.START.value,
- }
- await self.conversation_manager.create_conversation(
- plan.user_id, conversation_id=task.conversation_id
- )
- if task.handoff_from_super_agent:
- yield self._response_factory.component_generator(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task.task_id,
- content=json.dumps(subagent_component_content_dict),
- component_type=ComponentType.SUBAGENT_CONVERSATION.value,
- component_id=subagent_conversation_item_id,
- agent_name=task.agent_name,
- )
- yield self._response_factory.thread_started(
- conversation_id=task.conversation_id,
- thread_id=thread_id,
- user_query=task.query,
- )
- try:
- # Register the task with TaskManager (persist in-memory)
- await self.task_manager.update_task(task)
-
- # Execute task with input support
- async for response in self._execute_task_with_input_support(
- task, thread_id, metadata
- ):
- # Ensure buffered events carry a stable paragraph item_id
- annotated = self._response_buffer.annotate(response)
- # Accumulate based on event
- yield annotated
-
- # Persist via ResponseBuffer
- await self._persist_from_buffer(annotated)
-
- except Exception as e:
- error_msg = f"(Error) Error executing {task.task_id}: {str(e)}"
- logger.exception(f"Task execution failed: {error_msg}")
- yield self._response_factory.task_failed(
- conversation_id,
- thread_id,
- task.task_id,
- error_msg,
- agent_name=task.agent_name,
- )
- finally:
- if task.handoff_from_super_agent:
- subagent_component_content_dict["phase"] = (
- SubagentConversationPhase.END.value
- )
- yield self._response_factory.component_generator(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task.task_id,
- content=json.dumps(subagent_component_content_dict),
- component_type=ComponentType.SUBAGENT_CONVERSATION.value,
- component_id=subagent_conversation_item_id,
- agent_name=task.agent_name,
- )
-
- async def _execute_task_with_input_support(
- self, task: Task, thread_id: str, metadata: Optional[dict] = None
- ) -> AsyncGenerator[BaseResponse, None]:
- """
- Execute a single task with user input interruption support and optional scheduling.
-
- For tasks with schedule_config, this method will repeatedly execute the task
- according to the configured interval or daily time.
-
- Args:
- task: The task to execute (may include schedule_config for recurring execution)
- thread_id: Thread ID for conversation tracking
- metadata: Execution metadata
- """
- try:
- # Start task execution
- task_id = task.task_id
- conversation_id = task.conversation_id
-
- await self.task_manager.start_task(task_id)
-
- # Configure A2A metadata
- metadata = metadata or {}
- # if task.pattern != TaskPattern.ONCE:
- # metadata["notify"] = True
-
- # Configure Agno metadata
- # reference: https://docs.agno.com/examples/concepts/agent/other/agent_run_metadata#agent-run-metadata
- metadata[METADATA] = {}
-
- # Configure Agno dependencies
- # reference: https://docs.agno.com/concepts/teams/dependencies#dependencies
- metadata[DEPENDENCIES] = {
- USER_PROFILE: {},
- CURRENT_CONTEXT: {},
- LANGUAGE: get_current_language(),
- TIMEZONE: get_current_timezone(),
- }
-
- if task.schedule_config:
- yield self._response_factory.component_generator(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task_id,
- content=ScheduledTaskComponentContent(
- task_id=task_id,
- task_title=task.title,
- ).model_dump_json(exclude_none=True),
- component_type=ComponentType.SCHEDULED_TASK_CONTROLLER.value,
- agent_name=task.agent_name,
- )
- yield self._response_factory.done(
- conversation_id=conversation_id, thread_id=thread_id
- )
-
- # Execute task with optional scheduling loop
- while True:
- # Check if task was cancelled
- if task.is_finished():
- logger.info(f"Task {task_id} was cancelled, stopping execution")
- break
-
- # Execute a single run of the task
- async for response in self._execute_single_task_run(task, metadata):
- yield response
-
- # Check if this is a scheduled recurring task
- if not task.schedule_config:
- break # One-time task, exit loop
-
- delay = calculate_next_execution_delay(task.schedule_config)
- if not delay:
- break # No valid schedule, exit loop
-
- # Schedule next execution
- logger.info(f"Task {task_id} scheduled to run again in {delay} seconds")
- # Wait for the next scheduled execution (check cancellation periodically)
- for _ in range(int(delay / ASYNC_SLEEP_INTERVAL)):
- if task.is_finished():
- logger.info(f"Task {task_id} was cancelled during sleep")
- break
- await asyncio.sleep(ASYNC_SLEEP_INTERVAL)
-
- # Final check after sleep
- if task.is_finished():
- break
-
- # Complete task successfully
- await self.task_manager.complete_task(task_id)
- yield self._response_factory.task_completed(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task_id,
- agent_name=task.agent_name,
- )
- # Finalize buffered aggregates for this task (explicit flush at task end)
- items = self._response_buffer.flush_task(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task_id,
- )
- await self._persist_items(items)
-
- except Exception as e:
- # On failure, finalize any buffered aggregates for this task
- items = self._response_buffer.flush_task(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task_id,
- )
- await self._persist_items(items)
- await self.task_manager.fail_task(task_id, str(e))
- raise e
-
- async def _persist_from_buffer(self, response: BaseResponse):
- """Ingest a response into the buffer and persist any SaveMessages produced."""
- items = self._response_buffer.ingest(response)
- await self._persist_items(items)
-
- async def _persist_items(self, items: list[SaveItem]):
- """Persist a list of SaveItems to the conversation manager."""
- for it in items:
- await self.conversation_manager.add_item(
- role=it.role,
- event=it.event,
- conversation_id=it.conversation_id,
- thread_id=it.thread_id,
- task_id=it.task_id,
- payload=it.payload,
- item_id=it.item_id,
- agent_name=it.agent_name,
- )
diff --git a/python/valuecell/core/coordinate/services.py b/python/valuecell/core/coordinate/services.py
new file mode 100644
index 000000000..69ee32226
--- /dev/null
+++ b/python/valuecell/core/coordinate/services.py
@@ -0,0 +1,89 @@
+"""Helper utilities for composing orchestrator service dependencies."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from valuecell.core.agent.connect import RemoteConnections
+from valuecell.core.conversation import ConversationManager, SQLiteItemStore
+from valuecell.core.conversation.service import ConversationService
+from valuecell.core.plan.service import PlanService
+from valuecell.core.response.service import ResponseService
+from valuecell.core.super_agent import SuperAgentService
+from valuecell.core.task.executor import TaskExecutor
+from valuecell.core.task.service import TaskService
+from valuecell.utils import resolve_db_path
+
+
+@dataclass(frozen=True)
+class AgentServiceBundle:
+ """Aggregate all services required by ``AgentOrchestrator``.
+
+ The bundle guarantees that conversation-oriented objects share the same
+ ``ConversationManager`` instance so that persistence is consistent even
+ when individual services are overridden by callers. This also centralises
+ the default construction logic, reducing the amount of dependency wiring
+ inside the orchestrator itself.
+ """
+
+ agent_connections: RemoteConnections
+ conversation_service: ConversationService
+ response_service: ResponseService
+ task_service: TaskService
+ plan_service: PlanService
+ super_agent_service: SuperAgentService
+ task_executor: TaskExecutor
+
+ @property
+ def conversation_manager(self) -> ConversationManager:
+ """Expose the shared conversation manager used by all services."""
+
+ return self.conversation_service.manager
+
+ @classmethod
+ def compose(
+ cls,
+ *,
+ conversation_service: Optional[ConversationService] = None,
+ response_service: Optional[ResponseService] = None,
+ plan_service: Optional[PlanService] = None,
+ super_agent_service: Optional[SuperAgentService] = None,
+ task_executor: Optional[TaskExecutor] = None,
+ ) -> "AgentServiceBundle":
+ """Create a bundle, constructing any missing services with defaults."""
+
+ connections = RemoteConnections()
+
+ if conversation_service is not None:
+ conv_service = conversation_service
+ elif response_service is not None:
+ conv_service = response_service.conversation_service
+ else:
+ base_manager = ConversationManager(
+ item_store=SQLiteItemStore(resolve_db_path())
+ )
+ conv_service = ConversationService(manager=base_manager)
+
+ resp_service = response_service or ResponseService(
+ conversation_service=conv_service
+ )
+ t_service = TaskService()
+ p_service = plan_service or PlanService(connections)
+ sa_service = super_agent_service or SuperAgentService()
+ executor = task_executor or TaskExecutor(
+ agent_connections=connections,
+ task_service=t_service,
+ response_service=resp_service,
+ conversation_service=conv_service,
+ )
+
+ return cls(
+ agent_connections=connections,
+ conversation_service=conv_service,
+ response_service=resp_service,
+ task_service=t_service,
+ plan_service=p_service,
+ super_agent_service=sa_service,
+ task_executor=executor,
+ )
diff --git a/python/valuecell/core/coordinate/tests/test_e2e_persistence.py b/python/valuecell/core/coordinate/tests/test_e2e_persistence.py
index 3a3e4b762..4089b0e05 100644
--- a/python/valuecell/core/coordinate/tests/test_e2e_persistence.py
+++ b/python/valuecell/core/coordinate/tests/test_e2e_persistence.py
@@ -35,12 +35,14 @@ async def test_orchestrator_buffer_store_e2e(tmp_path, monkeypatch):
pass
# Verify persistence: at least 1 message exists for conversation
- msgs = await orch.conversation_manager.get_conversation_items(conversation_id)
+ msgs = await orch.conversation_service.manager.get_conversation_items(
+ conversation_id
+ )
assert isinstance(msgs, list)
assert len(msgs) >= 1
# Also verify we can count and fetch latest
- cnt = await orch.conversation_manager.get_item_count(conversation_id)
+ cnt = await orch.conversation_service.manager.get_item_count(conversation_id)
assert cnt == len(msgs)
- latest = await orch.conversation_manager.get_latest_item(conversation_id)
+ latest = await orch.conversation_service.manager.get_latest_item(conversation_id)
assert latest is not None
diff --git a/python/valuecell/core/coordinate/tests/test_orchestrator.py b/python/valuecell/core/coordinate/tests/test_orchestrator.py
index b6b288187..a25eb6ae4 100644
--- a/python/valuecell/core/coordinate/tests/test_orchestrator.py
+++ b/python/valuecell/core/coordinate/tests/test_orchestrator.py
@@ -25,17 +25,24 @@
TextPart,
)
-from valuecell.core.coordinate.models import ExecutionPlan
+from valuecell.core.agent.connect import RemoteConnections
+from valuecell.core.conversation import ConversationStatus
+from valuecell.core.conversation.service import ConversationService
from valuecell.core.coordinate.orchestrator import AgentOrchestrator
-from valuecell.core.coordinate.super_agent import (
+from valuecell.core.plan.models import ExecutionPlan
+from valuecell.core.plan.service import PlanService
+from valuecell.core.response.service import ResponseService
+from valuecell.core.super_agent import (
SuperAgentDecision,
SuperAgentOutcome,
+ SuperAgentService,
)
-from valuecell.core.conversation import ConversationStatus
-from valuecell.core.task import Task, TaskStatus as CoreTaskStatus
+from valuecell.core.task import Task
+from valuecell.core.task import TaskStatus as CoreTaskStatus
+from valuecell.core.task.executor import TaskExecutor
+from valuecell.core.task.service import TaskService
from valuecell.core.types import UserInput, UserInputMetadata
-
# -------------------------
# Fixtures
# -------------------------
@@ -178,13 +185,47 @@ def _mock_planner(sample_plan: ExecutionPlan) -> Mock:
@pytest.fixture(name="orchestrator")
def _orchestrator(
- mock_conversation_manager: Mock, mock_task_manager: Mock, mock_planner: Mock
+ mock_conversation_manager: Mock,
+ mock_task_manager: Mock,
+ mock_planner: Mock,
+ monkeypatch: pytest.MonkeyPatch,
) -> AgentOrchestrator:
- o = AgentOrchestrator()
- o.conversation_manager = mock_conversation_manager
- o.task_manager = mock_task_manager
- o.planner = mock_planner
- return o
+ agent_connections = Mock(spec=RemoteConnections)
+ agent_connections.get_client = AsyncMock()
+ agent_connections.start_agent = AsyncMock()
+
+ conversation_service = ConversationService(manager=mock_conversation_manager)
+ response_service = ResponseService(conversation_service=conversation_service)
+ task_service = TaskService(manager=mock_task_manager)
+ plan_service = PlanService(
+ agent_connections=agent_connections, execution_planner=mock_planner
+ )
+ super_agent_service = SuperAgentService()
+ task_executor = TaskExecutor(
+ agent_connections=agent_connections,
+ task_service=task_service,
+ response_service=response_service,
+ conversation_service=conversation_service,
+ )
+
+ bundle = SimpleNamespace(
+ agent_connections=agent_connections,
+ conversation_service=conversation_service,
+ response_service=response_service,
+ task_service=task_service,
+ plan_service=plan_service,
+ super_agent_service=super_agent_service,
+ task_executor=task_executor,
+ )
+
+ monkeypatch.setattr(
+ "valuecell.core.coordinate.orchestrator.AgentServiceBundle.compose",
+ Mock(return_value=bundle),
+ )
+
+ orchestrator = AgentOrchestrator()
+ orchestrator._testing_bundle = bundle # type: ignore[attr-defined]
+ return orchestrator
# -------------------------
@@ -249,13 +290,12 @@ async def test_happy_path_streaming(
mock_agent_client: Mock,
mock_agent_card_streaming: AgentCard,
sample_user_input: UserInput,
+ mock_task_manager: Mock,
):
- # Inject agent connections mock
- ac = Mock()
- ac.start_agent = AsyncMock(return_value=mock_agent_card_streaming)
- ac.get_client = AsyncMock(return_value=mock_agent_client)
- ac.stop_all = AsyncMock()
- orchestrator.agent_connections = ac
+ bundle = orchestrator._testing_bundle # type: ignore[attr-defined]
+ bundle.agent_connections.start_agent.return_value = mock_agent_card_streaming
+ bundle.agent_connections.get_client.return_value = mock_agent_client
+ bundle.agent_connections.stop_all = AsyncMock()
mock_agent_client.send_message.return_value = _make_streaming_response(
["Hello", " World"]
@@ -267,10 +307,9 @@ async def test_happy_path_streaming(
out.append(chunk)
# Minimal assertions
- orchestrator.task_manager.update_task.assert_called_once()
- orchestrator.task_manager.start_task.assert_called_once()
- ac.start_agent.assert_called_once()
- ac.get_client.assert_called_once_with("TestAgent")
+ mock_task_manager.update_task.assert_called_once()
+ mock_task_manager.start_task.assert_called_once()
+ bundle.agent_connections.get_client.assert_awaited_once_with("TestAgent")
mock_agent_client.send_message.assert_called_once()
# Should at least yield something (content or final)
assert len(out) >= 1
@@ -282,12 +321,12 @@ async def test_happy_path_non_streaming(
mock_agent_client: Mock,
mock_agent_card_non_streaming: AgentCard,
sample_user_input: UserInput,
+ mock_task_manager: Mock,
):
- ac = Mock()
- ac.start_agent = AsyncMock(return_value=mock_agent_card_non_streaming)
- ac.get_client = AsyncMock(return_value=mock_agent_client)
- ac.stop_all = AsyncMock()
- orchestrator.agent_connections = ac
+ bundle = orchestrator._testing_bundle # type: ignore[attr-defined]
+ bundle.agent_connections.start_agent.return_value = mock_agent_card_non_streaming
+ bundle.agent_connections.get_client.return_value = mock_agent_client
+ bundle.agent_connections.stop_all = AsyncMock()
mock_agent_client.send_message.return_value = _make_non_streaming_response()
@@ -295,8 +334,9 @@ async def test_happy_path_non_streaming(
async for chunk in orchestrator.process_user_input(sample_user_input):
out.append(chunk)
- orchestrator.task_manager.start_task.assert_called_once()
- orchestrator.task_manager.complete_task.assert_called_once()
+ mock_task_manager.start_task.assert_called_once()
+ mock_task_manager.complete_task.assert_called_once()
+ bundle.agent_connections.get_client.assert_awaited_once_with("TestAgent")
assert len(out) >= 1
@@ -304,10 +344,9 @@ async def test_happy_path_non_streaming(
async def test_planner_error(
orchestrator: AgentOrchestrator, sample_user_input: UserInput
):
- orchestrator.planner.create_plan.side_effect = RuntimeError("Planning failed")
-
- # Need agent connections to exist but won't be used
- orchestrator.agent_connections = Mock()
+ orchestrator.plan_service.planner.create_plan.side_effect = RuntimeError(
+ "Planning failed"
+ )
out = []
async for chunk in orchestrator.process_user_input(sample_user_input):
@@ -324,10 +363,11 @@ async def test_agent_connection_error(
sample_user_input: UserInput,
mock_agent_card_streaming: AgentCard,
):
- ac = Mock()
- ac.start_agent = AsyncMock(return_value=mock_agent_card_streaming)
- ac.get_client = AsyncMock(return_value=None) # Simulate connection failure
- orchestrator.agent_connections = ac
+ bundle = orchestrator._testing_bundle # type: ignore[attr-defined]
+ bundle.agent_connections.start_agent.return_value = mock_agent_card_streaming
+ bundle.agent_connections.get_client.return_value = (
+ None # Simulate connection failure
+ )
out = []
async for chunk in orchestrator.process_user_input(sample_user_input):
@@ -336,102 +376,6 @@ async def test_agent_connection_error(
assert any("(Error)" in c.data.payload.content for c in out if c.data.payload)
-@pytest.mark.asyncio
-async def test_continue_planning_metadata_retrieval(
- orchestrator: AgentOrchestrator, conversation_id: str, sample_user_input: UserInput
-):
- """Test that _continue_planning correctly retrieves metadata from context."""
- from valuecell.core.coordinate.orchestrator import ExecutionContext
- from valuecell.core.constants import PLANNING_TASK, ORIGINAL_USER_INPUT
-
- # Create a real asyncio.Task-like object that can be awaited
- import asyncio
-
- async def mock_plan_coroutine():
- return Mock() # Mock ExecutionPlan
-
- # Create actual task from coroutine, but mark it as done with a result
- mock_planning_task = asyncio.create_task(mock_plan_coroutine())
- # Wait a bit to let it complete
- await asyncio.sleep(0.01)
-
- # Create execution context with required metadata
- context = ExecutionContext("planning", conversation_id, "thread-1", "user-1")
- context.add_metadata(
- **{PLANNING_TASK: mock_planning_task, ORIGINAL_USER_INPUT: sample_user_input}
- )
-
- # Set up execution context in orchestrator
- orchestrator._execution_contexts[conversation_id] = context
-
- # Mock dependencies
- orchestrator._response_factory.plan_failed = Mock()
-
- async def mock_execute_plan(*args):
- yield Mock()
-
- # Mock the async generator method directly
- orchestrator._execute_plan_with_input_support = Mock(
- return_value=mock_execute_plan()
- )
-
- # Call the method to trigger metadata retrieval (lines 507-508)
- results = []
- async for response in orchestrator._continue_planning(
- conversation_id, "thread-1", context
- ):
- results.append(response)
-
- # Verify that the method executed successfully
- # The fact that we got here without errors means metadata was retrieved correctly
- assert (
- conversation_id not in orchestrator._execution_contexts
- ) # Context should be cleaned up
- assert mock_planning_task.done() # Task should be completed
- assert len(results) >= 1 # Should have yielded at least one response
-
-
-@pytest.mark.asyncio
-async def test_cancel_execution_with_planning_task(
- orchestrator: AgentOrchestrator, conversation_id: str
-):
- """Test that _cancel_execution correctly retrieves planning_task metadata."""
- from valuecell.core.coordinate.orchestrator import ExecutionContext
- from valuecell.core.constants import PLANNING_TASK
-
- # Create mock planning task
- mock_planning_task = Mock()
- mock_planning_task.done.return_value = False
- mock_planning_task.cancel = Mock()
-
- # Create execution context with planning task
- context = ExecutionContext("planning", conversation_id, "thread-1", "user-1")
- context.add_metadata(**{PLANNING_TASK: mock_planning_task})
-
- # Set up execution context in orchestrator
- orchestrator._execution_contexts[conversation_id] = context
-
- # Mock user input manager
- orchestrator.user_input_manager.clear_request = Mock()
-
- # Mock conversation manager
- mock_conversation = _stub_conversation()
- orchestrator.conversation_manager.get_conversation.return_value = mock_conversation
- orchestrator.conversation_manager.update_conversation = AsyncMock()
-
- # Call _cancel_execution to trigger
- await orchestrator._cancel_execution(conversation_id)
-
- # Verify planning task was retrieved and cancelled
- mock_planning_task.cancel.assert_called_once()
-
- # Verify context cleanup
- assert conversation_id not in orchestrator._execution_contexts
- orchestrator.user_input_manager.clear_request.assert_called_once_with(
- conversation_id
- )
-
-
@pytest.mark.asyncio
async def test_super_agent_answer_short_circuits_planner(
orchestrator: AgentOrchestrator,
@@ -442,14 +386,14 @@ async def test_super_agent_answer_short_circuits_planner(
enriched_query=None,
reason="Handled directly",
)
- orchestrator.super_agent = SimpleNamespace(
+ orchestrator.super_agent_service = SimpleNamespace(
name="ValueCellAgent",
run=AsyncMock(return_value=outcome),
)
user_input = UserInput(
query="What is 2+2?",
- target_agent_name=orchestrator.super_agent.name,
+ target_agent_name=orchestrator.super_agent_service.name,
meta=UserInputMetadata(conversation_id="conv-answer", user_id="user-answer"),
)
@@ -457,81 +401,10 @@ async def test_super_agent_answer_short_circuits_planner(
async for resp in orchestrator.process_user_input(user_input):
responses.append(resp)
- orchestrator.planner.create_plan.assert_not_called()
+ orchestrator.plan_service.planner.create_plan.assert_not_called()
payload_contents = [
getattr(resp.data.payload, "content", "")
for resp in responses
if getattr(resp, "data", None) and getattr(resp.data, "payload", None)
]
assert any("Concise reply" in content for content in payload_contents)
-
-
-@pytest.mark.asyncio
-async def test_super_agent_handoff_creates_component_events(
- orchestrator: AgentOrchestrator,
-):
- outcome = SuperAgentOutcome(
- decision=SuperAgentDecision.HANDOFF_TO_PLANNER,
- answer_content=None,
- enriched_query="Updated question",
- reason="Needs planner",
- )
- orchestrator.super_agent = SimpleNamespace(
- name="ValueCellAgent",
- run=AsyncMock(return_value=outcome),
- )
-
- handoff_task = Task(
- conversation_id="sub-conv",
- user_id="user-1",
- agent_name="ResearchAgent",
- query="Updated question",
- status=CoreTaskStatus.PENDING,
- handoff_from_super_agent=True,
- )
- plan = ExecutionPlan(
- plan_id="plan-handoff",
- conversation_id="conv-handoff",
- user_id="user-1",
- orig_query="Updated question",
- tasks=[handoff_task],
- created_at="2025-10-20T00:00:00",
- )
- orchestrator.planner.create_plan = AsyncMock(return_value=plan)
-
- def _empty_task_runner(*args, **kwargs):
- async def _gen():
- if False:
- yield None
-
- return _gen()
-
- orchestrator._execute_task_with_input_support = Mock(side_effect=_empty_task_runner)
- orchestrator._response_buffer.annotate = Mock(side_effect=lambda r: r)
- orchestrator._persist_from_buffer = AsyncMock()
- orchestrator._response_buffer.flush_task = Mock(return_value=[])
- orchestrator._persist_items = AsyncMock()
-
- user_input = UserInput(
- query="Original question",
- target_agent_name=orchestrator.super_agent.name,
- meta=UserInputMetadata(conversation_id="conv-handoff", user_id="user-1"),
- )
-
- responses = []
- async for resp in orchestrator.process_user_input(user_input):
- responses.append(resp)
-
- orchestrator.planner.create_plan.assert_awaited_once()
- assert user_input.target_agent_name == ""
- assert user_input.query == "Updated question"
-
- component_payloads = [
- getattr(resp.data.payload, "content", "")
- for resp in responses
- if getattr(resp, "data", None)
- and getattr(resp.data, "payload", None)
- and getattr(resp.data.payload, "component_type", "") == "subagent_conversation"
- ]
- assert any('"phase": "start"' in payload for payload in component_payloads)
- assert any('"phase": "end"' in payload for payload in component_payloads)
diff --git a/python/valuecell/core/plan/__init__.py b/python/valuecell/core/plan/__init__.py
new file mode 100644
index 000000000..75374d595
--- /dev/null
+++ b/python/valuecell/core/plan/__init__.py
@@ -0,0 +1,8 @@
+"""Planning service public exports."""
+
+from .service import PlanService, UserInputRegistry
+
+__all__ = [
+ "PlanService",
+ "UserInputRegistry",
+]
diff --git a/python/valuecell/core/coordinate/models.py b/python/valuecell/core/plan/models.py
similarity index 100%
rename from python/valuecell/core/coordinate/models.py
rename to python/valuecell/core/plan/models.py
diff --git a/python/valuecell/core/coordinate/planner.py b/python/valuecell/core/plan/planner.py
similarity index 99%
rename from python/valuecell/core/coordinate/planner.py
rename to python/valuecell/core/plan/planner.py
index bec8ffc2f..22e97c4ea 100644
--- a/python/valuecell/core/coordinate/planner.py
+++ b/python/valuecell/core/plan/planner.py
@@ -20,10 +20,6 @@
from agno.db.in_memory import InMemoryDb
from valuecell.core.agent.connect import RemoteConnections
-from valuecell.core.coordinate.planner_prompts import (
- PLANNER_EXPECTED_OUTPUT,
- PLANNER_INSTRUCTION,
-)
from valuecell.core.task import Task, TaskStatus
from valuecell.core.types import UserInput
from valuecell.utils import generate_uuid
@@ -32,6 +28,10 @@
from valuecell.utils.uuid import generate_conversation_id, generate_thread_id
from .models import ExecutionPlan, PlannerInput, PlannerResponse
+from .prompts import (
+ PLANNER_EXPECTED_OUTPUT,
+ PLANNER_INSTRUCTION,
+)
logger = logging.getLogger(__name__)
diff --git a/python/valuecell/core/coordinate/planner_prompts.py b/python/valuecell/core/plan/prompts.py
similarity index 100%
rename from python/valuecell/core/coordinate/planner_prompts.py
rename to python/valuecell/core/plan/prompts.py
diff --git a/python/valuecell/core/plan/service.py b/python/valuecell/core/plan/service.py
new file mode 100644
index 000000000..48b0729b2
--- /dev/null
+++ b/python/valuecell/core/plan/service.py
@@ -0,0 +1,86 @@
+"""Planning service coordinating planner and user input lifecycle."""
+
+from __future__ import annotations
+
+import asyncio
+from typing import Awaitable, Callable, Dict, Optional
+
+from valuecell.core.agent.connect import RemoteConnections
+from valuecell.core.plan.planner import (
+ ExecutionPlanner,
+ UserInputRequest,
+)
+from valuecell.core.types import UserInput
+
+
+class UserInputRegistry:
+ """In-memory store for pending planner-driven user input requests."""
+
+ def __init__(self) -> None:
+ self._pending: Dict[str, UserInputRequest] = {}
+
+ def add_request(self, conversation_id: str, request: UserInputRequest) -> None:
+ self._pending[conversation_id] = request
+
+ def has_request(self, conversation_id: str) -> bool:
+ return conversation_id in self._pending
+
+ def get_prompt(self, conversation_id: str) -> Optional[str]:
+ request = self._pending.get(conversation_id)
+ return request.prompt if request else None
+
+ def provide_response(self, conversation_id: str, response: str) -> bool:
+ if conversation_id not in self._pending:
+ return False
+ request = self._pending.pop(conversation_id)
+ request.provide_response(response)
+ return True
+
+ def clear(self, conversation_id: str) -> None:
+ self._pending.pop(conversation_id, None)
+
+
+class PlanService:
+ """Encapsulate plan creation and Human-in-the-Loop state."""
+
+ def __init__(
+ self,
+ agent_connections: RemoteConnections,
+ execution_planner: ExecutionPlanner | None = None,
+ user_input_registry: UserInputRegistry | None = None,
+ ) -> None:
+ self._planner = execution_planner or ExecutionPlanner(agent_connections)
+ self._input_registry = user_input_registry or UserInputRegistry()
+
+ @property
+ def planner(self) -> ExecutionPlanner:
+ return self._planner
+
+ def register_user_input(
+ self, conversation_id: str, request: UserInputRequest
+ ) -> None:
+ self._input_registry.add_request(conversation_id, request)
+
+ def has_pending_request(self, conversation_id: str) -> bool:
+ return self._input_registry.has_request(conversation_id)
+
+ def get_request_prompt(self, conversation_id: str) -> Optional[str]:
+ return self._input_registry.get_prompt(conversation_id)
+
+ def provide_user_response(self, conversation_id: str, response: str) -> bool:
+ return self._input_registry.provide_response(conversation_id, response)
+
+ def clear_pending_request(self, conversation_id: str) -> None:
+ self._input_registry.clear(conversation_id)
+
+ def start_planning_task(
+ self,
+ user_input: UserInput,
+ thread_id: str,
+ callback: Callable[[UserInputRequest], Awaitable[None]],
+ ) -> asyncio.Task:
+ """Kick off asynchronous planning."""
+
+ return asyncio.create_task(
+ self._planner.create_plan(user_input, callback, thread_id)
+ )
diff --git a/python/valuecell/core/plan/tests/__init__.py b/python/valuecell/core/plan/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/valuecell/core/coordinate/tests/test_planner.py b/python/valuecell/core/plan/tests/test_planner.py
similarity index 91%
rename from python/valuecell/core/coordinate/tests/test_planner.py
rename to python/valuecell/core/plan/tests/test_planner.py
index 821911426..ac9da8668 100644
--- a/python/valuecell/core/coordinate/tests/test_planner.py
+++ b/python/valuecell/core/plan/tests/test_planner.py
@@ -4,9 +4,9 @@
import pytest
-from valuecell.core.coordinate import planner as planner_mod
-from valuecell.core.coordinate.planner import ExecutionPlanner
-from valuecell.core.coordinate.models import PlannerResponse
+import valuecell.core.plan.planner as planner_mod
+from valuecell.core.plan.models import PlannerResponse
+from valuecell.core.plan.planner import ExecutionPlanner
from valuecell.core.types import UserInput, UserInputMetadata
@@ -32,11 +32,14 @@ async def test_create_plan_handles_paused_run(monkeypatch: pytest.MonkeyPatch):
"reason": "ok",
"tasks": [
{
+ "title": "Research task",
"query": "Run research",
"agent_name": "ResearchAgent",
"pattern": "once",
+ "schedule_config": None,
}
],
+ "guidance_message": None,
}
)
@@ -127,8 +130,8 @@ def run(self, *args, **kwargs):
async def callback(request):
raise AssertionError("callback should not be invoked")
- with pytest.raises(ValueError):
- await planner.create_plan(user_input, callback, "thread-55")
+ plan = await planner.create_plan(user_input, callback, "thread-55")
+ assert plan.guidance_message
def test_tool_get_enabled_agents_formats_cards():
diff --git a/python/valuecell/core/response/__init__.py b/python/valuecell/core/response/__init__.py
new file mode 100644
index 000000000..7913b6b72
--- /dev/null
+++ b/python/valuecell/core/response/__init__.py
@@ -0,0 +1,7 @@
+"""Response module exports."""
+
+from .service import ResponseService
+
+__all__ = [
+ "ResponseService",
+]
diff --git a/python/valuecell/core/coordinate/response_buffer.py b/python/valuecell/core/response/buffer.py
similarity index 100%
rename from python/valuecell/core/coordinate/response_buffer.py
rename to python/valuecell/core/response/buffer.py
diff --git a/python/valuecell/core/coordinate/response.py b/python/valuecell/core/response/factory.py
similarity index 100%
rename from python/valuecell/core/coordinate/response.py
rename to python/valuecell/core/response/factory.py
diff --git a/python/valuecell/core/coordinate/response_router.py b/python/valuecell/core/response/router.py
similarity index 98%
rename from python/valuecell/core/coordinate/response_router.py
rename to python/valuecell/core/response/router.py
index 2f7966bd5..85c9a7dd3 100644
--- a/python/valuecell/core/coordinate/response_router.py
+++ b/python/valuecell/core/response/router.py
@@ -7,7 +7,7 @@
from a2a.utils import get_message_text
from valuecell.core.agent.responses import EventPredicates
-from valuecell.core.coordinate.response import ResponseFactory
+from valuecell.core.response.factory import ResponseFactory
from valuecell.core.task import Task
from valuecell.core.types import (
BaseResponse,
diff --git a/python/valuecell/core/response/service.py b/python/valuecell/core/response/service.py
new file mode 100644
index 000000000..9192aa15e
--- /dev/null
+++ b/python/valuecell/core/response/service.py
@@ -0,0 +1,94 @@
+"""Response service consolidating factory, buffering, and persistence."""
+
+from __future__ import annotations
+
+from typing import Iterable
+
+from valuecell.core.conversation.service import ConversationService
+from valuecell.core.response.buffer import ResponseBuffer, SaveItem
+from valuecell.core.response.factory import ResponseFactory
+from valuecell.core.response.router import RouteResult, handle_status_update
+from valuecell.core.task import Task
+from valuecell.core.types import BaseResponse, ConversationItemEvent
+
+
+class ResponseService:
+ """Provide a single entry point for response creation and persistence."""
+
+ def __init__(
+ self,
+ conversation_service: ConversationService,
+ response_factory: ResponseFactory | None = None,
+ response_buffer: ResponseBuffer | None = None,
+ ) -> None:
+ self._conversation_service = conversation_service
+ self._factory = response_factory or ResponseFactory()
+ self._buffer = response_buffer or ResponseBuffer()
+
+ @property
+ def factory(self) -> ResponseFactory:
+ return self._factory
+
+ @property
+ def conversation_service(self) -> ConversationService:
+ return self._conversation_service
+
+ async def emit(self, response: BaseResponse) -> BaseResponse:
+ """Annotate, persist, and return the response."""
+
+ annotated = self._buffer.annotate(response)
+ await self._persist_from_buffer(annotated)
+ return annotated
+
+ async def emit_many(self, responses: Iterable[BaseResponse]) -> list[BaseResponse]:
+ """Persist a batch of responses in order."""
+
+ out: list[BaseResponse] = []
+ for resp in responses:
+ out.append(await self.emit(resp))
+ return out
+
+ async def flush_task(
+ self, conversation_id: str, thread_id: str | None, task_id: str | None
+ ) -> None:
+ """Force-flush buffered paragraphs for a task context."""
+
+ items = self._buffer.flush_task(conversation_id, thread_id, task_id)
+ await self._persist_items(items)
+
+ async def get_conversation_history(
+ self,
+ conversation_id: str | None = None,
+ event: ConversationItemEvent | None = None,
+ component_type: str | None = None,
+ ) -> list[BaseResponse]:
+ """Load persisted conversation items and rebuild responses."""
+
+ items = await self._conversation_service.get_conversation_items(
+ conversation_id=conversation_id,
+ event=event,
+ component_type=component_type,
+ )
+ return [self._factory.from_conversation_item(item) for item in items]
+
+ async def route_task_status(self, task: Task, thread_id: str, event) -> RouteResult:
+ """Route a task status update without side-effects."""
+
+ return await handle_status_update(self._factory, task, thread_id, event)
+
+ async def _persist_from_buffer(self, response: BaseResponse) -> None:
+ items = self._buffer.ingest(response)
+ await self._persist_items(items)
+
+ async def _persist_items(self, items: list[SaveItem]) -> None:
+ for item in items:
+ await self._conversation_service.add_item(
+ role=item.role,
+ event=item.event,
+ conversation_id=item.conversation_id,
+ thread_id=item.thread_id,
+ task_id=item.task_id,
+ payload=item.payload,
+ item_id=item.item_id,
+ agent_name=item.agent_name,
+ )
diff --git a/python/valuecell/core/response/tests/__init__.py b/python/valuecell/core/response/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/valuecell/core/coordinate/tests/test_component_id.py b/python/valuecell/core/response/tests/test_component_id.py
similarity index 99%
rename from python/valuecell/core/coordinate/tests/test_component_id.py
rename to python/valuecell/core/response/tests/test_component_id.py
index 190f2ed27..87010fa16 100644
--- a/python/valuecell/core/coordinate/tests/test_component_id.py
+++ b/python/valuecell/core/response/tests/test_component_id.py
@@ -1,7 +1,7 @@
"""Tests for component_id override functionality."""
from valuecell.core.agent.responses import streaming, notification
-from valuecell.core.coordinate.response import ResponseFactory
+from valuecell.core.response.factory import ResponseFactory
from valuecell.core.types import CommonResponseEvent
diff --git a/python/valuecell/core/coordinate/tests/test_response_buffer.py b/python/valuecell/core/response/tests/test_response_buffer.py
similarity index 99%
rename from python/valuecell/core/coordinate/tests/test_response_buffer.py
rename to python/valuecell/core/response/tests/test_response_buffer.py
index 3cb68cf97..1e028061d 100644
--- a/python/valuecell/core/coordinate/tests/test_response_buffer.py
+++ b/python/valuecell/core/response/tests/test_response_buffer.py
@@ -1,12 +1,12 @@
"""
-Unit tests for valuecell.core.coordinate.response_buffer module
+Unit tests for valuecell.core.response.buffer module
"""
import time
import pytest
-from valuecell.core.coordinate.response_buffer import (
+from valuecell.core.response.buffer import (
BufferEntry,
ResponseBuffer,
SaveItem,
diff --git a/python/valuecell/core/coordinate/tests/test_response_factory.py b/python/valuecell/core/response/tests/test_response_factory.py
similarity index 98%
rename from python/valuecell/core/coordinate/tests/test_response_factory.py
rename to python/valuecell/core/response/tests/test_response_factory.py
index f4825e87e..2bd6b61ce 100644
--- a/python/valuecell/core/coordinate/tests/test_response_factory.py
+++ b/python/valuecell/core/response/tests/test_response_factory.py
@@ -1,5 +1,5 @@
import pytest
-from valuecell.core.coordinate.response import ResponseFactory
+from valuecell.core.response.factory import ResponseFactory
from valuecell.core.types import (
BaseResponseDataPayload,
CommonResponseEvent,
diff --git a/python/valuecell/core/coordinate/tests/test_response_router.py b/python/valuecell/core/response/tests/test_response_router.py
similarity index 99%
rename from python/valuecell/core/coordinate/tests/test_response_router.py
rename to python/valuecell/core/response/tests/test_response_router.py
index ea0ee1a35..334dad858 100644
--- a/python/valuecell/core/coordinate/tests/test_response_router.py
+++ b/python/valuecell/core/response/tests/test_response_router.py
@@ -1,5 +1,5 @@
"""
-Unit tests for valuecell.core.coordinate.response_router module
+Unit tests for valuecell.core.response.router module
"""
from unittest.mock import MagicMock, patch
@@ -14,7 +14,7 @@
Role,
)
-from valuecell.core.coordinate.response_router import (
+from valuecell.core.response.router import (
RouteResult,
SideEffect,
SideEffectKind,
diff --git a/python/valuecell/core/super_agent/__init__.py b/python/valuecell/core/super_agent/__init__.py
new file mode 100644
index 000000000..646063bf2
--- /dev/null
+++ b/python/valuecell/core/super_agent/__init__.py
@@ -0,0 +1,10 @@
+"""Super agent service exports."""
+
+from .core import SuperAgentDecision, SuperAgentOutcome
+from .service import SuperAgentService
+
+__all__ = [
+ "SuperAgentDecision",
+ "SuperAgentOutcome",
+ "SuperAgentService",
+]
diff --git a/python/valuecell/core/coordinate/super_agent.py b/python/valuecell/core/super_agent/core.py
similarity index 97%
rename from python/valuecell/core/coordinate/super_agent.py
rename to python/valuecell/core/super_agent/core.py
index 8399b0924..d9b7b03bb 100644
--- a/python/valuecell/core/coordinate/super_agent.py
+++ b/python/valuecell/core/super_agent/core.py
@@ -7,7 +7,7 @@
from agno.tools.crawl4ai import Crawl4aiTools
from pydantic import BaseModel, Field
-from valuecell.core.coordinate.super_agent_prompts import (
+from valuecell.core.super_agent.prompts import (
SUPER_AGENT_EXPECTED_OUTPUT,
SUPER_AGENT_INSTRUCTION,
)
diff --git a/python/valuecell/core/coordinate/super_agent_prompts.py b/python/valuecell/core/super_agent/prompts.py
similarity index 100%
rename from python/valuecell/core/coordinate/super_agent_prompts.py
rename to python/valuecell/core/super_agent/prompts.py
diff --git a/python/valuecell/core/super_agent/service.py b/python/valuecell/core/super_agent/service.py
new file mode 100644
index 000000000..d1d31e2fa
--- /dev/null
+++ b/python/valuecell/core/super_agent/service.py
@@ -0,0 +1,21 @@
+"""Service façade for the super agent orchestration stage."""
+
+from __future__ import annotations
+
+from valuecell.core.types import UserInput
+
+from .core import SuperAgent, SuperAgentOutcome
+
+
+class SuperAgentService:
+ """Thin wrapper to expose SuperAgent behaviour as a service."""
+
+ def __init__(self, super_agent: SuperAgent | None = None) -> None:
+ self._super_agent = super_agent or SuperAgent()
+
+ @property
+ def name(self) -> str:
+ return self._super_agent.name
+
+ async def run(self, user_input: UserInput) -> SuperAgentOutcome:
+ return await self._super_agent.run(user_input)
diff --git a/python/valuecell/core/super_agent/tests/__init__.py b/python/valuecell/core/super_agent/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/valuecell/core/coordinate/tests/test_super_agent.py b/python/valuecell/core/super_agent/tests/test_super_agent.py
similarity index 95%
rename from python/valuecell/core/coordinate/tests/test_super_agent.py
rename to python/valuecell/core/super_agent/tests/test_super_agent.py
index 746bcc76d..6226857fb 100644
--- a/python/valuecell/core/coordinate/tests/test_super_agent.py
+++ b/python/valuecell/core/super_agent/tests/test_super_agent.py
@@ -6,7 +6,7 @@
import pytest
from valuecell.core.coordinate import super_agent as super_agent_mod
-from valuecell.core.coordinate.super_agent import SuperAgent, SuperAgentDecision
+from valuecell.core.super_agent import SuperAgent, SuperAgentDecision
from valuecell.core.types import UserInput, UserInputMetadata
diff --git a/python/valuecell/core/task/__init__.py b/python/valuecell/core/task/__init__.py
index 2053a1bad..829af751e 100644
--- a/python/valuecell/core/task/__init__.py
+++ b/python/valuecell/core/task/__init__.py
@@ -1,5 +1,6 @@
"""Task module public API"""
+from .executor import TaskExecutor
from .manager import TaskManager
from .models import Task, TaskPattern, TaskStatus
@@ -8,4 +9,5 @@
"TaskStatus",
"TaskPattern",
"TaskManager",
+ "TaskExecutor",
]
diff --git a/python/valuecell/core/task/executor.py b/python/valuecell/core/task/executor.py
new file mode 100644
index 000000000..774c6719d
--- /dev/null
+++ b/python/valuecell/core/task/executor.py
@@ -0,0 +1,355 @@
+import asyncio
+import json
+from datetime import datetime, timezone
+from typing import AsyncGenerator, Iterable, Optional
+
+from a2a.types import TaskArtifactUpdateEvent, TaskState, TaskStatusUpdateEvent
+from loguru import logger
+
+from valuecell.core.agent.connect import RemoteConnections
+from valuecell.core.agent.responses import EventPredicates
+from valuecell.core.constants import (
+ CURRENT_CONTEXT,
+ DEPENDENCIES,
+ LANGUAGE,
+ METADATA,
+ TIMEZONE,
+ USER_PROFILE,
+)
+from valuecell.core.conversation.service import ConversationService
+from valuecell.core.plan.models import ExecutionPlan
+from valuecell.core.response.factory import ResponseFactory
+from valuecell.core.response.router import RouteResult, SideEffectKind
+from valuecell.core.response.service import ResponseService
+from valuecell.core.task.models import Task
+from valuecell.core.task.service import DEFAULT_EXECUTION_POLL_INTERVAL, TaskService
+from valuecell.core.task.temporal import calculate_next_execution_delay
+from valuecell.core.types import (
+ BaseResponse,
+ ComponentType,
+ ScheduledTaskComponentContent,
+ StreamResponseEvent,
+ SubagentConversationPhase,
+)
+from valuecell.utils.i18n_utils import get_current_language, get_current_timezone
+from valuecell.utils.uuid import generate_item_id, generate_task_id
+
+
+class ScheduledTaskResultAccumulator:
+ """Collect streaming output for a scheduled task run."""
+
+ def __init__(self, task: Task) -> None:
+ self._task = task
+ self._buffer: list[str] = []
+
+ @property
+ def enabled(self) -> bool:
+ return self._task.schedule_config is not None
+
+ def consume(self, responses: Iterable[BaseResponse]) -> list[BaseResponse]:
+ if not self.enabled:
+ return list(responses)
+
+ passthrough: list[BaseResponse] = []
+ for resp in responses:
+ event = resp.event
+
+ if EventPredicates.is_message(event):
+ payload = resp.data.payload
+ content = payload.content if payload else None
+ if content:
+ self._buffer.append(content)
+ continue
+
+ if EventPredicates.is_reasoning(event):
+ continue
+
+ if EventPredicates.is_tool_call(event):
+ continue
+
+ passthrough.append(resp)
+
+ return passthrough
+
+ def finalize(self, response_factory: ResponseFactory) -> Optional[BaseResponse]:
+ if not self.enabled:
+ return None
+
+ content = "".join(self._buffer).strip()
+ if not content:
+ content = "Task completed without output."
+
+ component_payload = ScheduledTaskComponentContent(
+ task_id=self._task.task_id,
+ task_title=self._task.title,
+ result=content,
+ create_time=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S"),
+ )
+ component_payload_json = component_payload.model_dump_json(exclude_none=True)
+
+ return response_factory.component_generator(
+ conversation_id=self._task.conversation_id,
+ thread_id=self._task.thread_id,
+ task_id=self._task.task_id,
+ content=component_payload_json,
+ component_type=ComponentType.SCHEDULED_TASK_RESULT.value,
+ agent_name=self._task.agent_name,
+ )
+
+
+class TaskExecutor:
+ """Execute tasks and plans while persisting streamed output."""
+
+ def __init__(
+ self,
+ agent_connections: RemoteConnections,
+ task_service: TaskService,
+ response_service: ResponseService,
+ conversation_service: ConversationService,
+ poll_interval: float = DEFAULT_EXECUTION_POLL_INTERVAL,
+ ) -> None:
+ self._agent_connections = agent_connections
+ self._task_service = task_service
+ self._response_service = response_service
+ self._conversation_service = conversation_service
+ self._poll_interval = poll_interval
+
+ async def execute_plan(
+ self,
+ plan: ExecutionPlan,
+ thread_id: str,
+ metadata: Optional[dict] = None,
+ ) -> AsyncGenerator[BaseResponse, None]:
+ if plan.guidance_message:
+ response = self._response_service.factory.message_response_general(
+ event=StreamResponseEvent.MESSAGE_CHUNK,
+ conversation_id=plan.conversation_id,
+ thread_id=thread_id,
+ task_id=generate_task_id(),
+ content=plan.guidance_message,
+ )
+ yield await self._response_service.emit(response)
+ return
+
+ for task in plan.tasks:
+ subagent_component_id = generate_item_id()
+ if task.handoff_from_super_agent:
+ await self._conversation_service.ensure_conversation(
+ user_id=plan.user_id,
+ conversation_id=task.conversation_id,
+ )
+ component_payload = json.dumps(
+ {
+ "conversation_id": task.conversation_id,
+ "agent_name": task.agent_name,
+ "phase": SubagentConversationPhase.START.value,
+ }
+ )
+ component = self._response_service.factory.component_generator(
+ conversation_id=plan.conversation_id,
+ thread_id=thread_id,
+ task_id=task.task_id,
+ content=component_payload,
+ component_type=ComponentType.SUBAGENT_CONVERSATION.value,
+ component_id=subagent_component_id,
+ agent_name=task.agent_name,
+ )
+ yield await self._response_service.emit(component)
+
+ thread_started = self._response_service.factory.thread_started(
+ conversation_id=task.conversation_id,
+ thread_id=thread_id,
+ user_query=task.query,
+ )
+ yield await self._response_service.emit(thread_started)
+
+ try:
+ await self._task_service.update_task(task)
+ async for response in self._execute_task(task, thread_id, metadata):
+ yield response
+ except Exception as exc: # pragma: no cover - defensive logging
+ error_msg = f"(Error) Error executing {task.task_id}: {exc}"
+ logger.exception(error_msg)
+ failure = self._response_service.factory.task_failed(
+ conversation_id=plan.conversation_id,
+ thread_id=thread_id,
+ task_id=task.task_id,
+ content=error_msg,
+ agent_name=task.agent_name,
+ )
+ yield await self._response_service.emit(failure)
+ finally:
+ if task.handoff_from_super_agent:
+ component_payload = json.dumps(
+ {
+ "conversation_id": task.conversation_id,
+ "agent_name": task.agent_name,
+ "phase": SubagentConversationPhase.END.value,
+ }
+ )
+ component = self._response_service.factory.component_generator(
+ conversation_id=plan.conversation_id,
+ thread_id=thread_id,
+ task_id=task.task_id,
+ content=component_payload,
+ component_type=ComponentType.SUBAGENT_CONVERSATION.value,
+ component_id=subagent_component_id,
+ agent_name=task.agent_name,
+ )
+ yield await self._response_service.emit(component)
+
+ async def _execute_task(
+ self,
+ task: Task,
+ thread_id: str,
+ metadata: Optional[dict] = None,
+ ) -> AsyncGenerator[BaseResponse, None]:
+ task_id = task.task_id
+ conversation_id = task.conversation_id
+
+ await self._task_service.start_task(task_id)
+
+ exec_metadata = dict(metadata or {})
+ exec_metadata.setdefault(METADATA, {})
+ exec_metadata.setdefault(
+ DEPENDENCIES,
+ {
+ USER_PROFILE: {},
+ CURRENT_CONTEXT: {},
+ LANGUAGE: get_current_language(),
+ TIMEZONE: get_current_timezone(),
+ },
+ )
+
+ if task.schedule_config:
+ controller = self._response_service.factory.component_generator(
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=task_id,
+ content=ScheduledTaskComponentContent(
+ task_id=task_id,
+ task_title=task.title,
+ ).model_dump_json(exclude_none=True),
+ component_type=ComponentType.SCHEDULED_TASK_CONTROLLER.value,
+ agent_name=task.agent_name,
+ )
+ yield await self._response_service.emit(controller)
+ yield await self._response_service.emit(
+ self._response_service.factory.done(
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ )
+ )
+
+ accumulator = ScheduledTaskResultAccumulator(task)
+
+ try:
+ while True:
+ async for response in self._execute_single_task_run(
+ task, thread_id, exec_metadata, accumulator
+ ):
+ yield response
+
+ if not task.schedule_config:
+ break
+
+ delay = calculate_next_execution_delay(task.schedule_config)
+ if not delay:
+ break
+ logger.info(
+ f"Scheduled task `{task.title}` ({task_id}) will re-execute in {delay} seconds."
+ )
+
+ await self._sleep_with_cancellation(task, delay)
+
+ if task.is_finished():
+ break
+
+ await self._task_service.complete_task(task_id)
+ completed = self._response_service.factory.task_completed(
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=task_id,
+ agent_name=task.agent_name,
+ )
+ yield await self._response_service.emit(completed)
+ except Exception as exc:
+ await self._task_service.fail_task(task_id, str(exc))
+ raise
+ finally:
+ await self._response_service.flush_task(
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=task_id,
+ )
+
+ async def _execute_single_task_run(
+ self,
+ task: Task,
+ thread_id: str,
+ metadata: dict,
+ accumulator: ScheduledTaskResultAccumulator,
+ ) -> AsyncGenerator[BaseResponse, None]:
+ agent_name = task.agent_name
+ client = await self._agent_connections.get_client(agent_name)
+ if not client:
+ raise RuntimeError(f"Could not connect to agent {agent_name}")
+
+ remote_response = await client.send_message(
+ task.query,
+ conversation_id=task.conversation_id,
+ metadata=metadata,
+ )
+
+ async for remote_task, event in remote_response:
+ if event is None and remote_task.status.state == TaskState.submitted:
+ task.remote_task_ids.append(remote_task.id)
+ started = self._response_service.factory.task_started(
+ conversation_id=task.conversation_id,
+ thread_id=thread_id,
+ task_id=task.task_id,
+ agent_name=agent_name,
+ )
+ yield await self._response_service.emit(started)
+ continue
+
+ if isinstance(event, TaskStatusUpdateEvent):
+ route_result: RouteResult = (
+ await self._response_service.route_task_status(
+ task, thread_id, event
+ )
+ )
+ responses = accumulator.consume(route_result.responses)
+ for resp in responses:
+ yield await self._response_service.emit(resp)
+ for side_effect in route_result.side_effects:
+ if side_effect.kind == SideEffectKind.FAIL_TASK:
+ await self._task_service.fail_task(
+ task.task_id, side_effect.reason or ""
+ )
+ if route_result.done:
+ return
+ continue
+
+ if isinstance(event, TaskArtifactUpdateEvent):
+ logger.info(
+ "Received unexpected artifact update for task %s: %s",
+ task.task_id,
+ event,
+ )
+ continue
+
+ final_component = accumulator.finalize(self._response_service.factory)
+ if final_component is not None:
+ yield await self._response_service.emit(final_component)
+
+ return
+
+ async def _sleep_with_cancellation(self, task: Task, delay: float) -> None:
+ remaining = delay
+ while remaining > 0:
+ if task.is_finished():
+ return
+ sleep_for = min(self._poll_interval, remaining)
+ await asyncio.sleep(sleep_for)
+ remaining -= sleep_for
diff --git a/python/valuecell/core/task/manager.py b/python/valuecell/core/task/manager.py
index 0b48426b1..720532813 100644
--- a/python/valuecell/core/task/manager.py
+++ b/python/valuecell/core/task/manager.py
@@ -33,7 +33,7 @@ async def start_task(self, task_id: str) -> bool:
if not task or task.status != TaskStatus.PENDING:
return False
- task.start_task()
+ task.start()
await self.update_task(task)
return True
@@ -43,7 +43,7 @@ async def complete_task(self, task_id: str) -> bool:
if not task or task.is_finished():
return False
- task.complete_task()
+ task.complete()
await self.update_task(task)
return True
@@ -53,7 +53,7 @@ async def fail_task(self, task_id: str, error_message: str) -> bool:
if not task or task.is_finished():
return False
- task.fail_task(error_message)
+ task.fail(error_message)
await self.update_task(task)
return True
@@ -63,7 +63,7 @@ async def cancel_task(self, task_id: str) -> bool:
if not task or task.is_finished():
return False
- task.cancel_task()
+ task.cancel()
await self.update_task(task)
return True
@@ -77,7 +77,7 @@ async def cancel_conversation_tasks(self, conversation_id: str) -> int:
for task in tasks:
if not task.is_finished():
- task.cancel_task()
+ task.cancel()
await self.update_task(task)
cancelled_count += 1
diff --git a/python/valuecell/core/task/models.py b/python/valuecell/core/task/models.py
index b23bdd612..19c000631 100644
--- a/python/valuecell/core/task/models.py
+++ b/python/valuecell/core/task/models.py
@@ -49,7 +49,7 @@ class Task(BaseModel):
description="Task identifier determined by the remote agent after submission",
)
title: str = Field(
- ...,
+ default="",
description="A concise task title or summary (<=10 words or characters)",
)
query: str = Field(..., description="The task to be performed")
@@ -93,19 +93,19 @@ class Task(BaseModel):
class Config:
json_encoders = {datetime: lambda v: v.isoformat() if v else None}
- def start_task(self) -> None:
+ def start(self) -> None:
"""Start task execution"""
self.status = TaskStatus.RUNNING
self.started_at = datetime.now()
self.updated_at = datetime.now()
- def complete_task(self) -> None:
+ def complete(self) -> None:
"""Complete the task"""
self.status = TaskStatus.COMPLETED
self.completed_at = datetime.now()
self.updated_at = datetime.now()
- def fail_task(self, error_message: str) -> None:
+ def fail(self, error_message: str) -> None:
"""Mark task as failed"""
self.status = TaskStatus.FAILED
self.completed_at = datetime.now()
@@ -113,7 +113,7 @@ def fail_task(self, error_message: str) -> None:
self.error_message = error_message
# TODO: cancel agent remote task
- def cancel_task(self) -> None:
+ def cancel(self) -> None:
"""Cancel the task"""
self.status = TaskStatus.CANCELLED
self.completed_at = datetime.now()
diff --git a/python/valuecell/core/task/service.py b/python/valuecell/core/task/service.py
new file mode 100644
index 000000000..ec8970c1c
--- /dev/null
+++ b/python/valuecell/core/task/service.py
@@ -0,0 +1,37 @@
+"""Task services covering task management and execution."""
+
+from __future__ import annotations
+
+from valuecell.core.task.manager import TaskManager
+from valuecell.core.task.models import Task
+
+DEFAULT_EXECUTION_POLL_INTERVAL = 0.1
+
+
+class TaskService:
+ """Expose task management independent of the orchestrator."""
+
+ def __init__(self, manager: TaskManager | None = None) -> None:
+ self._manager = manager or TaskManager()
+
+ @property
+ def manager(self) -> TaskManager:
+ return self._manager
+
+ async def update_task(self, task: Task) -> None:
+ await self._manager.update_task(task)
+
+ async def start_task(self, task_id: str) -> bool:
+ return await self._manager.start_task(task_id)
+
+ async def complete_task(self, task_id: str) -> bool:
+ return await self._manager.complete_task(task_id)
+
+ async def fail_task(self, task_id: str, reason: str) -> bool:
+ return await self._manager.fail_task(task_id, reason)
+
+ async def cancel_task(self, task_id: str) -> bool:
+ return await self._manager.cancel_task(task_id)
+
+ async def cancel_conversation_tasks(self, conversation_id: str) -> int:
+ return await self._manager.cancel_conversation_tasks(conversation_id)
diff --git a/python/valuecell/core/coordinate/temporal.py b/python/valuecell/core/task/temporal.py
similarity index 100%
rename from python/valuecell/core/coordinate/temporal.py
rename to python/valuecell/core/task/temporal.py
diff --git a/python/valuecell/core/task/tests/test_models.py b/python/valuecell/core/task/tests/test_models.py
index 72fcee85e..e28f43329 100644
--- a/python/valuecell/core/task/tests/test_models.py
+++ b/python/valuecell/core/task/tests/test_models.py
@@ -120,7 +120,7 @@ def test_start_task(self):
start_time = datetime(2023, 1, 1, 12, 1, 0)
mock_datetime.now.return_value = start_time
- task.start_task()
+ task.start()
assert task.status == TaskStatus.RUNNING
assert task.started_at == start_time
@@ -141,7 +141,7 @@ def test_complete_task(self):
complete_time = datetime(2023, 1, 1, 12, 5, 0)
mock_datetime.now.return_value = complete_time
- task.complete_task()
+ task.complete()
assert task.status == TaskStatus.COMPLETED
assert task.completed_at == complete_time
@@ -162,7 +162,7 @@ def test_fail_task(self):
fail_time = datetime(2023, 1, 1, 12, 5, 0)
mock_datetime.now.return_value = fail_time
- task.fail_task("Test error message")
+ task.fail("Test error message")
assert task.status == TaskStatus.FAILED
assert task.completed_at == fail_time
@@ -184,7 +184,7 @@ def test_cancel_task(self):
cancel_time = datetime(2023, 1, 1, 12, 5, 0)
mock_datetime.now.return_value = cancel_time
- task.cancel_task()
+ task.cancel()
assert task.status == TaskStatus.CANCELLED
assert task.completed_at == cancel_time
From d0a2d0575be3fdcd45347e9bee8df54efaee531b Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Fri, 24 Oct 2025 11:17:36 +0800
Subject: [PATCH 16/30] refactor: rename ResponseService as
EventResponseService
---
python/valuecell/core/coordinate/orchestrator.py | 4 ++--
python/valuecell/core/coordinate/services.py | 8 ++++----
.../core/coordinate/tests/test_orchestrator.py | 4 ++--
python/valuecell/core/event/__init__.py | 7 +++++++
python/valuecell/core/{response => event}/buffer.py | 0
python/valuecell/core/{response => event}/factory.py | 0
python/valuecell/core/{response => event}/router.py | 2 +-
python/valuecell/core/{response => event}/service.py | 10 +++++-----
.../core/{response => event}/tests/__init__.py | 0
.../{response => event}/tests/test_component_id.py | 2 +-
.../{response => event}/tests/test_response_buffer.py | 2 +-
.../{response => event}/tests/test_response_factory.py | 2 +-
.../{response => event}/tests/test_response_router.py | 2 +-
python/valuecell/core/response/__init__.py | 7 -------
python/valuecell/core/task/executor.py | 10 +++++-----
15 files changed, 30 insertions(+), 30 deletions(-)
create mode 100644 python/valuecell/core/event/__init__.py
rename python/valuecell/core/{response => event}/buffer.py (100%)
rename python/valuecell/core/{response => event}/factory.py (100%)
rename python/valuecell/core/{response => event}/router.py (98%)
rename python/valuecell/core/{response => event}/service.py (92%)
rename python/valuecell/core/{response => event}/tests/__init__.py (100%)
rename python/valuecell/core/{response => event}/tests/test_component_id.py (99%)
rename python/valuecell/core/{response => event}/tests/test_response_buffer.py (99%)
rename python/valuecell/core/{response => event}/tests/test_response_factory.py (98%)
rename python/valuecell/core/{response => event}/tests/test_response_router.py (99%)
delete mode 100644 python/valuecell/core/response/__init__.py
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 4790a8cfa..09ae1a3d1 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -6,7 +6,7 @@
from valuecell.core.constants import ORIGINAL_USER_INPUT, PLANNING_TASK
from valuecell.core.conversation import ConversationService, ConversationStatus
from valuecell.core.plan import PlanService
-from valuecell.core.response import ResponseService
+from valuecell.core.event import EventResponseService
from valuecell.core.super_agent import (
SuperAgentDecision,
SuperAgentOutcome,
@@ -71,7 +71,7 @@ class AgentOrchestrator:
def __init__(
self,
conversation_service: ConversationService | None = None,
- response_service: ResponseService | None = None,
+ response_service: EventResponseService | None = None,
plan_service: PlanService | None = None,
super_agent_service: SuperAgentService | None = None,
task_executor: TaskExecutor | None = None,
diff --git a/python/valuecell/core/coordinate/services.py b/python/valuecell/core/coordinate/services.py
index 69ee32226..1ae68ac8c 100644
--- a/python/valuecell/core/coordinate/services.py
+++ b/python/valuecell/core/coordinate/services.py
@@ -9,7 +9,7 @@
from valuecell.core.conversation import ConversationManager, SQLiteItemStore
from valuecell.core.conversation.service import ConversationService
from valuecell.core.plan.service import PlanService
-from valuecell.core.response.service import ResponseService
+from valuecell.core.event.service import EventResponseService
from valuecell.core.super_agent import SuperAgentService
from valuecell.core.task.executor import TaskExecutor
from valuecell.core.task.service import TaskService
@@ -29,7 +29,7 @@ class AgentServiceBundle:
agent_connections: RemoteConnections
conversation_service: ConversationService
- response_service: ResponseService
+ response_service: EventResponseService
task_service: TaskService
plan_service: PlanService
super_agent_service: SuperAgentService
@@ -46,7 +46,7 @@ def compose(
cls,
*,
conversation_service: Optional[ConversationService] = None,
- response_service: Optional[ResponseService] = None,
+ response_service: Optional[EventResponseService] = None,
plan_service: Optional[PlanService] = None,
super_agent_service: Optional[SuperAgentService] = None,
task_executor: Optional[TaskExecutor] = None,
@@ -65,7 +65,7 @@ def compose(
)
conv_service = ConversationService(manager=base_manager)
- resp_service = response_service or ResponseService(
+ resp_service = response_service or EventResponseService(
conversation_service=conv_service
)
t_service = TaskService()
diff --git a/python/valuecell/core/coordinate/tests/test_orchestrator.py b/python/valuecell/core/coordinate/tests/test_orchestrator.py
index a25eb6ae4..2b85a9b1c 100644
--- a/python/valuecell/core/coordinate/tests/test_orchestrator.py
+++ b/python/valuecell/core/coordinate/tests/test_orchestrator.py
@@ -31,7 +31,7 @@
from valuecell.core.coordinate.orchestrator import AgentOrchestrator
from valuecell.core.plan.models import ExecutionPlan
from valuecell.core.plan.service import PlanService
-from valuecell.core.response.service import ResponseService
+from valuecell.core.event.service import EventResponseService
from valuecell.core.super_agent import (
SuperAgentDecision,
SuperAgentOutcome,
@@ -195,7 +195,7 @@ def _orchestrator(
agent_connections.start_agent = AsyncMock()
conversation_service = ConversationService(manager=mock_conversation_manager)
- response_service = ResponseService(conversation_service=conversation_service)
+ response_service = EventResponseService(conversation_service=conversation_service)
task_service = TaskService(manager=mock_task_manager)
plan_service = PlanService(
agent_connections=agent_connections, execution_planner=mock_planner
diff --git a/python/valuecell/core/event/__init__.py b/python/valuecell/core/event/__init__.py
new file mode 100644
index 000000000..e8581cd63
--- /dev/null
+++ b/python/valuecell/core/event/__init__.py
@@ -0,0 +1,7 @@
+"""Response module exports."""
+
+from .service import EventResponseService
+
+__all__ = [
+ "EventResponseService",
+]
diff --git a/python/valuecell/core/response/buffer.py b/python/valuecell/core/event/buffer.py
similarity index 100%
rename from python/valuecell/core/response/buffer.py
rename to python/valuecell/core/event/buffer.py
diff --git a/python/valuecell/core/response/factory.py b/python/valuecell/core/event/factory.py
similarity index 100%
rename from python/valuecell/core/response/factory.py
rename to python/valuecell/core/event/factory.py
diff --git a/python/valuecell/core/response/router.py b/python/valuecell/core/event/router.py
similarity index 98%
rename from python/valuecell/core/response/router.py
rename to python/valuecell/core/event/router.py
index 85c9a7dd3..277c26f3e 100644
--- a/python/valuecell/core/response/router.py
+++ b/python/valuecell/core/event/router.py
@@ -7,7 +7,7 @@
from a2a.utils import get_message_text
from valuecell.core.agent.responses import EventPredicates
-from valuecell.core.response.factory import ResponseFactory
+from valuecell.core.event.factory import ResponseFactory
from valuecell.core.task import Task
from valuecell.core.types import (
BaseResponse,
diff --git a/python/valuecell/core/response/service.py b/python/valuecell/core/event/service.py
similarity index 92%
rename from python/valuecell/core/response/service.py
rename to python/valuecell/core/event/service.py
index 9192aa15e..019dc0bcf 100644
--- a/python/valuecell/core/response/service.py
+++ b/python/valuecell/core/event/service.py
@@ -5,14 +5,14 @@
from typing import Iterable
from valuecell.core.conversation.service import ConversationService
-from valuecell.core.response.buffer import ResponseBuffer, SaveItem
-from valuecell.core.response.factory import ResponseFactory
-from valuecell.core.response.router import RouteResult, handle_status_update
+from valuecell.core.event.buffer import ResponseBuffer, SaveItem
+from valuecell.core.event.factory import ResponseFactory
+from valuecell.core.event.router import RouteResult, handle_status_update
from valuecell.core.task import Task
from valuecell.core.types import BaseResponse, ConversationItemEvent
-class ResponseService:
+class EventResponseService:
"""Provide a single entry point for response creation and persistence."""
def __init__(
@@ -48,7 +48,7 @@ async def emit_many(self, responses: Iterable[BaseResponse]) -> list[BaseRespons
out.append(await self.emit(resp))
return out
- async def flush_task(
+ async def flush_task_response(
self, conversation_id: str, thread_id: str | None, task_id: str | None
) -> None:
"""Force-flush buffered paragraphs for a task context."""
diff --git a/python/valuecell/core/response/tests/__init__.py b/python/valuecell/core/event/tests/__init__.py
similarity index 100%
rename from python/valuecell/core/response/tests/__init__.py
rename to python/valuecell/core/event/tests/__init__.py
diff --git a/python/valuecell/core/response/tests/test_component_id.py b/python/valuecell/core/event/tests/test_component_id.py
similarity index 99%
rename from python/valuecell/core/response/tests/test_component_id.py
rename to python/valuecell/core/event/tests/test_component_id.py
index 87010fa16..c0c251b69 100644
--- a/python/valuecell/core/response/tests/test_component_id.py
+++ b/python/valuecell/core/event/tests/test_component_id.py
@@ -1,7 +1,7 @@
"""Tests for component_id override functionality."""
from valuecell.core.agent.responses import streaming, notification
-from valuecell.core.response.factory import ResponseFactory
+from valuecell.core.event.factory import ResponseFactory
from valuecell.core.types import CommonResponseEvent
diff --git a/python/valuecell/core/response/tests/test_response_buffer.py b/python/valuecell/core/event/tests/test_response_buffer.py
similarity index 99%
rename from python/valuecell/core/response/tests/test_response_buffer.py
rename to python/valuecell/core/event/tests/test_response_buffer.py
index 1e028061d..a8bdc2693 100644
--- a/python/valuecell/core/response/tests/test_response_buffer.py
+++ b/python/valuecell/core/event/tests/test_response_buffer.py
@@ -6,7 +6,7 @@
import pytest
-from valuecell.core.response.buffer import (
+from valuecell.core.event.buffer import (
BufferEntry,
ResponseBuffer,
SaveItem,
diff --git a/python/valuecell/core/response/tests/test_response_factory.py b/python/valuecell/core/event/tests/test_response_factory.py
similarity index 98%
rename from python/valuecell/core/response/tests/test_response_factory.py
rename to python/valuecell/core/event/tests/test_response_factory.py
index 2bd6b61ce..64097f963 100644
--- a/python/valuecell/core/response/tests/test_response_factory.py
+++ b/python/valuecell/core/event/tests/test_response_factory.py
@@ -1,5 +1,5 @@
import pytest
-from valuecell.core.response.factory import ResponseFactory
+from valuecell.core.event.factory import ResponseFactory
from valuecell.core.types import (
BaseResponseDataPayload,
CommonResponseEvent,
diff --git a/python/valuecell/core/response/tests/test_response_router.py b/python/valuecell/core/event/tests/test_response_router.py
similarity index 99%
rename from python/valuecell/core/response/tests/test_response_router.py
rename to python/valuecell/core/event/tests/test_response_router.py
index 334dad858..5861ce147 100644
--- a/python/valuecell/core/response/tests/test_response_router.py
+++ b/python/valuecell/core/event/tests/test_response_router.py
@@ -14,7 +14,7 @@
Role,
)
-from valuecell.core.response.router import (
+from valuecell.core.event.router import (
RouteResult,
SideEffect,
SideEffectKind,
diff --git a/python/valuecell/core/response/__init__.py b/python/valuecell/core/response/__init__.py
deleted file mode 100644
index 7913b6b72..000000000
--- a/python/valuecell/core/response/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Response module exports."""
-
-from .service import ResponseService
-
-__all__ = [
- "ResponseService",
-]
diff --git a/python/valuecell/core/task/executor.py b/python/valuecell/core/task/executor.py
index 774c6719d..7bd62f2d8 100644
--- a/python/valuecell/core/task/executor.py
+++ b/python/valuecell/core/task/executor.py
@@ -18,9 +18,9 @@
)
from valuecell.core.conversation.service import ConversationService
from valuecell.core.plan.models import ExecutionPlan
-from valuecell.core.response.factory import ResponseFactory
-from valuecell.core.response.router import RouteResult, SideEffectKind
-from valuecell.core.response.service import ResponseService
+from valuecell.core.event.factory import ResponseFactory
+from valuecell.core.event.router import RouteResult, SideEffectKind
+from valuecell.core.event.service import EventResponseService
from valuecell.core.task.models import Task
from valuecell.core.task.service import DEFAULT_EXECUTION_POLL_INTERVAL, TaskService
from valuecell.core.task.temporal import calculate_next_execution_delay
@@ -104,7 +104,7 @@ def __init__(
self,
agent_connections: RemoteConnections,
task_service: TaskService,
- response_service: ResponseService,
+ response_service: EventResponseService,
conversation_service: ConversationService,
poll_interval: float = DEFAULT_EXECUTION_POLL_INTERVAL,
) -> None:
@@ -277,7 +277,7 @@ async def _execute_task(
await self._task_service.fail_task(task_id, str(exc))
raise
finally:
- await self._response_service.flush_task(
+ await self._response_service.flush_task_response(
conversation_id=conversation_id,
thread_id=thread_id,
task_id=task_id,
From c9c9eed9ccbf9693738978078cdd662f48441960 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Fri, 24 Oct 2025 11:32:53 +0800
Subject: [PATCH 17/30] fix circular imports
---
.../valuecell/core/coordinate/tests/test_orchestrator.py | 4 ++--
python/valuecell/core/event/router.py | 2 +-
python/valuecell/core/event/service.py | 2 +-
python/valuecell/core/event/tests/test_response_router.py | 8 ++++----
python/valuecell/core/plan/models.py | 3 +--
python/valuecell/core/plan/planner.py | 2 +-
6 files changed, 10 insertions(+), 11 deletions(-)
diff --git a/python/valuecell/core/coordinate/tests/test_orchestrator.py b/python/valuecell/core/coordinate/tests/test_orchestrator.py
index 2b85a9b1c..367947532 100644
--- a/python/valuecell/core/coordinate/tests/test_orchestrator.py
+++ b/python/valuecell/core/coordinate/tests/test_orchestrator.py
@@ -29,17 +29,17 @@
from valuecell.core.conversation import ConversationStatus
from valuecell.core.conversation.service import ConversationService
from valuecell.core.coordinate.orchestrator import AgentOrchestrator
+from valuecell.core.event.service import EventResponseService
from valuecell.core.plan.models import ExecutionPlan
from valuecell.core.plan.service import PlanService
-from valuecell.core.event.service import EventResponseService
from valuecell.core.super_agent import (
SuperAgentDecision,
SuperAgentOutcome,
SuperAgentService,
)
-from valuecell.core.task import Task
from valuecell.core.task import TaskStatus as CoreTaskStatus
from valuecell.core.task.executor import TaskExecutor
+from valuecell.core.task.models import Task
from valuecell.core.task.service import TaskService
from valuecell.core.types import UserInput, UserInputMetadata
diff --git a/python/valuecell/core/event/router.py b/python/valuecell/core/event/router.py
index 277c26f3e..776765936 100644
--- a/python/valuecell/core/event/router.py
+++ b/python/valuecell/core/event/router.py
@@ -8,7 +8,7 @@
from valuecell.core.agent.responses import EventPredicates
from valuecell.core.event.factory import ResponseFactory
-from valuecell.core.task import Task
+from valuecell.core.task.models import Task
from valuecell.core.types import (
BaseResponse,
CommonResponseEvent,
diff --git a/python/valuecell/core/event/service.py b/python/valuecell/core/event/service.py
index 019dc0bcf..eb824002f 100644
--- a/python/valuecell/core/event/service.py
+++ b/python/valuecell/core/event/service.py
@@ -8,7 +8,7 @@
from valuecell.core.event.buffer import ResponseBuffer, SaveItem
from valuecell.core.event.factory import ResponseFactory
from valuecell.core.event.router import RouteResult, handle_status_update
-from valuecell.core.task import Task
+from valuecell.core.task.models import Task
from valuecell.core.types import BaseResponse, ConversationItemEvent
diff --git a/python/valuecell/core/event/tests/test_response_router.py b/python/valuecell/core/event/tests/test_response_router.py
index 5861ce147..151cd43fd 100644
--- a/python/valuecell/core/event/tests/test_response_router.py
+++ b/python/valuecell/core/event/tests/test_response_router.py
@@ -6,12 +6,12 @@
import pytest
from a2a.types import (
+ Message,
+ Role,
TaskState,
- TaskStatusUpdateEvent,
TaskStatus,
- Message,
+ TaskStatusUpdateEvent,
TextPart,
- Role,
)
from valuecell.core.event.router import (
@@ -20,7 +20,7 @@
SideEffectKind,
handle_status_update,
)
-from valuecell.core.task import Task
+from valuecell.core.task.models import Task
class TestSideEffectKind:
diff --git a/python/valuecell/core/plan/models.py b/python/valuecell/core/plan/models.py
index f647e092e..9e4d1690c 100644
--- a/python/valuecell/core/plan/models.py
+++ b/python/valuecell/core/plan/models.py
@@ -2,8 +2,7 @@
from pydantic import BaseModel, Field
-from valuecell.core.task import Task
-from valuecell.core.task.models import ScheduleConfig, TaskPattern
+from valuecell.core.task.models import ScheduleConfig, Task, TaskPattern
class ExecutionPlan(BaseModel):
diff --git a/python/valuecell/core/plan/planner.py b/python/valuecell/core/plan/planner.py
index 22e97c4ea..71909591b 100644
--- a/python/valuecell/core/plan/planner.py
+++ b/python/valuecell/core/plan/planner.py
@@ -20,7 +20,7 @@
from agno.db.in_memory import InMemoryDb
from valuecell.core.agent.connect import RemoteConnections
-from valuecell.core.task import Task, TaskStatus
+from valuecell.core.task.models import Task, TaskStatus
from valuecell.core.types import UserInput
from valuecell.utils import generate_uuid
from valuecell.utils.env import agent_debug_mode_enabled
From 1db7f8a975e3904eeaed410ec018718994a30e36 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Fri, 24 Oct 2025 13:00:16 +0800
Subject: [PATCH 18/30] refactor: remove unused get_conversation_history method
and update ConversationService to use CoreConversationService
---
.../valuecell/core/coordinate/orchestrator.py | 24 -------------------
.../server/services/conversation_service.py | 23 ++++++++++++++----
2 files changed, 18 insertions(+), 29 deletions(-)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 09ae1a3d1..b2bd63ede 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -15,7 +15,6 @@
from valuecell.core.task import TaskExecutor
from valuecell.core.types import (
BaseResponse,
- ConversationItemEvent,
StreamResponseEvent,
UserInput,
)
@@ -141,29 +140,6 @@ async def emit(item: Optional[BaseResponse]):
# Best-effort: if producer already finished, nothing to do
# We deliberately do not cancel the producer to keep execution alive
- async def get_conversation_history(
- self,
- conversation_id: Optional[str] = None,
- event: Optional[ConversationItemEvent] = None,
- component_type: Optional[str] = None,
- ) -> list[BaseResponse]:
- """Return the persisted conversation history as a list of responses.
-
- Args:
- conversation_id: The conversation to retrieve history for.
- event: Optional filter to include only items with this event type.
- component_type: Optional filter to include only items with this component type.
-
- Returns:
- A list of `BaseResponse` instances reconstructed from persisted
- ConversationItems.
- """
- return await self.response_service.get_conversation_history(
- conversation_id=conversation_id,
- event=event,
- component_type=component_type,
- )
-
# ==================== Private Helper Methods ====================
async def _run_session(
diff --git a/python/valuecell/server/services/conversation_service.py b/python/valuecell/server/services/conversation_service.py
index 8702e4489..316e06223 100644
--- a/python/valuecell/server/services/conversation_service.py
+++ b/python/valuecell/server/services/conversation_service.py
@@ -7,7 +7,10 @@
SQLiteConversationStore,
SQLiteItemStore,
)
-from valuecell.core.coordinate.orchestrator import AgentOrchestrator
+from valuecell.core.conversation.service import (
+ ConversationService as CoreConversationService,
+)
+from valuecell.core.event.factory import ResponseFactory
from valuecell.server.api.schemas.conversation import (
ConversationDeleteData,
ConversationHistoryData,
@@ -31,7 +34,10 @@ def __init__(self):
self.conversation_manager = ConversationManager(
conversation_store=conversation_store, item_store=self.item_store
)
- self.orchestrator = AgentOrchestrator()
+ self.core_conversation_service = CoreConversationService(
+ manager=self.conversation_manager
+ )
+ self.response_factory = ResponseFactory()
async def get_conversation_list(
self, user_id: Optional[str] = None, limit: int = 10, offset: int = 0
@@ -90,11 +96,18 @@ async def get_conversation_history(
if not conversation:
raise ValueError(f"Conversation {conversation_id} not found")
- # Get conversation history using orchestrator's method
- base_responses = await self.orchestrator.get_conversation_history(
- conversation_id=conversation_id
+ # Retrieve persisted conversation items and rebuild responses
+ conversation_items = (
+ await self.core_conversation_service.get_conversation_items(
+ conversation_id=conversation_id
+ )
)
+ base_responses = [
+ self.response_factory.from_conversation_item(item)
+ for item in conversation_items
+ ]
+
# Convert BaseResponse objects to ConversationHistoryItem objects
history_items = []
for response in base_responses:
From dfa2747673fdf3068db6676a4431771ce62343a3 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Fri, 24 Oct 2025 14:02:00 +0800
Subject: [PATCH 19/30] refactor: enhance CORE_ARCHITECTURE documentation with
Super Agent triage and updated service interactions
---
docs/CORE_ARCHITECTURE.md | 207 +++++++++++++++++++++++++++++---------
1 file changed, 159 insertions(+), 48 deletions(-)
diff --git a/docs/CORE_ARCHITECTURE.md b/docs/CORE_ARCHITECTURE.md
index 1cab813a6..4bc0c308e 100644
--- a/docs/CORE_ARCHITECTURE.md
+++ b/docs/CORE_ARCHITECTURE.md
@@ -4,34 +4,103 @@ This document explains how the modules under `valuecell/core/` collaborate at ru
## Highlights
-- Async, re-entrant orchestrator: `process_user_input` is a streaming async entrypoint that can pause for HITL and resume safely.
-- Planner with HITL: pauses on missing info/risky steps via `UserInputRequest` (asyncio.Event), resumes after user feedback to produce an adequate plan.
-- Streaming pipeline: `Response` → `ResponseBuffer` (buffered vs immediate) → `ResponseRouter` to UI and Store, with stable item IDs for partial aggregation.
+- Super Agent triage ahead of planning: a lightweight "Super Agent" analyzes the user input first and either answers directly or hands off an enriched query to the planner.
+- Async, re-entrant orchestrator: `process_user_input` streams responses and now runs planning/execution in a background producer so long-running work continues even if the client disconnects.
+- Planner with HITL: pauses on missing info/risky steps via `UserInputRequest`, resumes after user feedback to produce an adequate plan.
+- Streaming pipeline: A2A status events → `ResponseRouter` (map to BaseResponse) → `ResponseBuffer` (annotate/aggregate) → persisted to Store and streamed to UI, with stable item IDs for partial aggregation.
- Agent2Agent (A2A) integration: tasks call remote agents via `a2a-sdk`; status events drive routing; agents can be wrapped by lightweight decorators/servers.
- Conversation memory: in-memory/SQLite stores enable reproducible history, fast "resume from last", and auditability.
- Robustness: typed errors, side-effects (e.g., fail task) from router, and room for retry/backoff policies where appropriate.
+## Services interaction overview
+
+The diagram below focuses on how the orchestrator collaborates with the core services. It reflects the current code structure under `coordinate/`, `super_agent/`, `plan/`, `task/`, `event/`, and `conversation/`.
+
+```mermaid
+flowchart LR
+ subgraph UI[UI / Client]
+ end
+
+ O[Orchestrator]
+
+ CS[ConversationService]
+ SA[SuperAgentService]
+ PS[PlanService]
+ TE[TaskExecutor]
+ RC[RemoteConnections]
+
+ subgraph ES[EventResponseService]
+ ROuter[event → responses]
+ RBuf[annotate/aggregate]
+ end
+
+ Store[(Conversation Store)]
+
+ %% Entry & conversation lifecycle
+ UI -->|user_input| O
+ O -->|ensure/load| CS
+
+ %% Super Agent triage
+ O -->|run| SA
+ SA -- ANSWER --> O
+ O -->|emit message| ES
+ ES --> RBuf
+ RBuf --> Store
+ O --> UI
+
+ SA -- HANDOFF(enriched) --> PS
+
+ %% Planner + HITL
+ O -->|start_planning_task| PS
+ PS -- UserInputRequest --> O
+ O -->|require_user_input / activate| CS
+ PS -- ExecutionPlan --> O
+
+ %% Execution
+ O -->|execute_plan| TE
+ TE -->|send_message| RC
+ RC -- TaskStatusUpdateEvent --> TE
+ TE -->|route_task_status| ES
+ ES --> ROuter
+ ROuter --> ES
+ ES --> RBuf
+ RBuf --> Store
+ O -->|stream annotated responses| UI
+```
+
+Key points:
+
+- Orchestrator is the hub: it calls SuperAgentService, PlanService, TaskExecutor, and uses ConversationService to manage statuses.
+- EventResponseService performs two roles:
+ - Routing: maps remote task status events to typed BaseResponses via ResponseRouter.
+ - Buffering & persistence: annotates with stable item IDs via ResponseBuffer and writes to the conversation store.
+- Super Agent can short-circuit with a direct answer; otherwise it hands off an enriched query to the planner.
+
## High-level flow
-The orchestration loop ingests a user input, plans next steps, optionally requests human input to resolve ambiguity, and then executes tasks via remote agents (Agent2Agent, A2A). Responses stream back incrementally and are routed to the appropriate sinks (UI, logs, stores).
+The orchestration loop ingests a user input, lets the Super Agent triage and possibly answer or enrich the request, then plans next steps (with HITL when needed) and executes tasks via remote agents (A2A). Responses stream back incrementally and are routed to the appropriate sinks (UI, logs, stores).
```mermaid
flowchart TD
- U[User Input] --> O[Orchestrator
- process_user_input]
- O -->|analyze input + context| P[Planner]
+ U[User Input] --> O[Orchestrator process_user_input]
+ O --> SA[Super Agent triage]
+ SA -->|answer directly| SR1[Responses]
+ SR1 --> RB1[ResponseBuffer]
+ RB1 --> UI
+ RB1 --> ST[Store]
+ SA -->|handoff enriched| P[Planner]
P -->|adequate plan| PL[Plan]
P -->|needs clarification| HITL[HITL: clarification / approval]
- HITL --> UI[UI / Operator]
+ HITL --> UI
UI -->|feedback| P
- PL --> T[Tasks]
+ PL --> T[Task Executor]
T --> A2A[A2A calls]
A2A --> RA[Remote Agents]
- RA --> SR[Streamed Responses]
- SR --> RB[ResponseBuffer]
- RB --> RR[ResponseRouter]
- RR --> UI
- RR --> ST[Store]
+ RA --> RR2[ResponseRouter]
+ RR2 --> SR2[Responses]
+ SR2 --> RB2[ResponseBuffer]
+ RB2 --> UI
+ RB2 --> ST[Store]
```
### Sequence: async and reentrancy
@@ -41,6 +110,7 @@ sequenceDiagram
autonumber
participant U as User/UI
participant O as Orchestrator
+ participant SA as Super Agent
participant CS as ConversationStore/ItemStore
participant P as Planner
participant RB as ResponseBuffer
@@ -50,42 +120,53 @@ sequenceDiagram
participant RA as Remote Agent
U->>O: user_input(query, meta)
- O->>CS: load conversation context
+ O->>CS: ensure/load conversation
CS-->>O: context/items
- O->>P: create_plan(user_input, callback)
- alt needs clarification
- P-->>O: UserInputRequest(prompt)
- O-->>U: PLAN_REQUIRE_USER_INPUT(prompt)
- U->>O: provide_user_input(response)
- O->>P: resume with response
+ O->>SA: run(user_input)
+ alt Super Agent answers
+ SA-->>O: decision=ANSWER, content
+ O->>RB: annotate/ingest(message)
+ RB-->>ST: persist SaveItem(s)
+ O-->>U: stream
+ O-->>U: done
+ else Super Agent handoff
+ SA-->>O: decision=HANDOFF_TO_PLANNER, enriched_query
+ O->>P: create_plan(enriched_query, callback)
+ alt needs clarification
+ P-->>O: UserInputRequest(prompt)
+ O-->>U: PLAN_REQUIRE_USER_INPUT(prompt)
+ U->>O: provide_user_input(response)
+ O->>P: resume with response
+ end
+ P-->>O: ExecutionPlan(tasks)
+ loop each task
+ O->>A2A: execute(task)
+ A2A->>RA: request(stream)
+ RA-->>O: TaskStatusUpdateEvent (streaming)
+ O->>RR: route(status→responses)
+ RR-->>O: BaseResponse(s)
+ O->>RB: annotate/ingest(responses)
+ RB-->>ST: persist SaveItem(s)
+ O-->>U: stream to UI
+ end
+ O-->>U: done
end
- P-->>O: ExecutionPlan(tasks)
- loop each task
- O->>A2A: execute(task)
- A2A->>RA: request(stream)
- RA-->>O: TaskStatusUpdateEvent (streaming)
- O->>RB: annotate/ingest(resp)
- RB-->>O: SaveItem(s)
- O->>RR: route(resp)
- RR-->>U: stream to UI
- RR-->>ST: persist SaveItem(s)
- end
- O-->>U: done
```
## Orchestrator: process_user_input
-The orchestrator entrypoint (conceptually `process_user_input`) receives a user message (plus context IDs) and coordinates the entire lifecycle:
+The orchestrator entrypoint (`coordinate/orchestrator.py::AgentOrchestrator.process_user_input`) receives a user message (plus context IDs) and coordinates the entire lifecycle:
-1. Delegate to the Planner to derive an actionable plan
-2. If the plan needs confirmation or extra parameters, trigger Human-in-the-Loop (HITL)
-3. Execute the plan as one or more tasks
+1. Delegate to the Super Agent to triage the request: directly answer simple queries or enrich the query and hand off to planning
+2. Run the Planner to derive an actionable plan; if the plan needs confirmation or extra parameters, trigger Human-in-the-Loop (HITL)
+3. Execute the plan via the Task Executor
4. Stream partial responses while executing
5. Persist results and emit final responses
-The orchestrator is async and re-entrant:
+The orchestrator is async and re-entrant, and now decouples producers/consumers:
- All I/O boundaries (`await`) are explicit to support concurrency
+- A background producer continues planning/execution even if the client disconnects; the async generator simply drains a per-call queue
- If a human confirmation is required, the orchestrator can pause, surface a checkpoint, and resume later when feedback arrives
- Reentrancy is supported by idempotent response buffering and conversation state: resuming continues from the last acknowledged step
@@ -93,15 +174,33 @@ The orchestrator is async and re-entrant:
Responses are produced incrementally while tasks execute:
-- `Response` represents typed chunks (tokens, tool results, notifications)
-- `ResponseBuffer` accumulates and aggregates partials into stable snapshots
-- `ResponseRouter` fans out to multiple sinks (UI streams, logs, stores)
+- Remote agent status events are first mapped by `ResponseRouter` into typed `Response` objects (message chunks, reasoning, tool results, components)
+- `ResponseBuffer` annotates with stable item IDs and aggregates partials, and `EventResponseService` persists them to the conversation store
+- The orchestrator streams the annotated responses to the UI; persistence and streaming are decoupled from the client connection
This allows the UI to render partial progress while long-running steps (such as remote agent calls) are still in flight.
+## Super Agent: triage before planning
+
+The Super Agent performs a quick, tool-augmented triage of the user input to decide whether it can answer directly or should hand off to the planner.
+
+Responsibilities:
+
+- Detect simple Q&A or retrieval-style requests that can be answered immediately
+- Optionally enrich/normalize the query and provide a concise restatement for planning
+- Record minimal rationale for auditability
+
+Under the hood:
+
+- `super_agent/core.py` defines the `SuperAgent`, decision schema (`SuperAgentOutcome`) and tool wiring
+- `super_agent/prompts.py` contains the instruction and expected output schema
+- `super_agent/service.py` exposes a simple façade used by the orchestrator
+
+If the decision is ANSWER, the orchestrator streams the content and returns. If the decision is HANDOFF_TO_PLANNER, the enriched query is passed to the planner.
+
## Planner: intent → plan (with HITL)
-The Planner turns a natural-language user input into an executable plan. Its responsibilities include:
+The Planner turns a natural-language user input (often enriched by the Super Agent) into an executable plan. Its responsibilities include:
- Interpreting the user’s goal and available agent capabilities
- Identifying missing parameters and ambiguities
@@ -115,19 +214,26 @@ Human-in-the-loop is integrated into planning:
Under the hood:
-- `planner.py` encapsulates the decision logic
-- `planner_prompts.py` centralizes prompt templates (when LLM-based planning is used)
-- `coordinate/models.py` defines plan/step data models used by both planner and orchestrator
+- `plan/planner.py` encapsulates the decision logic (`ExecutionPlanner` and `UserInputRequest`)
+- `plan/prompts.py` centralizes prompt templates (when LLM-based planning is used)
+- `plan/models.py` defines plan/step data models, consumed by the orchestrator and executor
+- `plan/service.py` manages the planner lifecycle and the pending user-input registry
## Task execution
-After planning, the orchestrator executes each task. A task is an atomic unit that typically invokes a remote agent to perform work.
+After planning, the Task Executor runs each task. A task is an atomic unit that typically invokes a remote agent to perform work. Scheduled tasks are supported and can re-run according to their schedule; streaming output is accumulated and summarized for schedule results.
Execution characteristics:
- Tasks are awaited asynchronously; independent tasks may run concurrently when safe
- Each task emits structured responses (tool results, logs, progress) as it runs
- Failures are converted into typed errors and can trigger retries or compensating steps (policy-dependent)
+- When the Super Agent hands off to a specific sub-agent, start/end components are emitted to mark that sub-agent conversation window
+
+Under the hood:
+
+- `task/executor.py` streams execution, integrates scheduled task accumulation, and routes A2A events through the response service
+- `task/service.py` persists and transitions task state; `task/models.py` define the task’s shape
The conversation and item stores record inputs/outputs for reproducibility and auditing.
@@ -163,9 +269,11 @@ This memory layer underpins reentrancy and auditability.
## Async & reentrancy details
-- All external calls (planning, remote agents, storage) are awaited
+- All external calls (super-agent triage, planning, remote agents, storage) are awaited
+- A background producer runs independently of the client connection; consumers can cancel without stopping execution
- `ResponseBuffer` enables idempotent aggregation of partial output so a resumed session can safely replay or continue
- Orchestrator checkpoints (HITL) are modeled as explicit yield points; upon resumption, the same context IDs lead the flow to continue from the next step
+- Execution contexts support validation (user consistency, TTL) and cleanup of expired sessions
- Backpressure: routers can apply flow control when sinks are slow
## Error handling & resilience
@@ -173,9 +281,11 @@ This memory layer underpins reentrancy and auditability.
Typical edge cases and policies:
- Missing parameters → HITL clarification
+- Super Agent errors → surfaced as structured failures; fallback to planner handoff can be policy-defined
- Planner errors → structured failure with user-facing guidance
- Agent timeouts → retry/backoff policies; partial results remain in the buffer
- Transport errors → surfaced via typed exceptions; orchestration may retry or abort
+- Invalid or expired execution contexts → cancelled safely with user-facing messages
- Consistency → conversation records ensure inputs/outputs are durable
## Extensibility
@@ -183,8 +293,9 @@ Typical edge cases and policies:
- Add a new agent: create a capability card, implement a decorated async handler, register/connect it
- Add a new store: implement the `ItemStore`/`ConversationStore` interfaces
- Add a new transport: integrate a compatible adapter and update the A2A client wiring
+- Customize the Super Agent: adjust prompts/decision logic or tools; control when to answer vs handoff
- Customize planning: extend planner prompts/logic and enrich plan models
---
-In short, the orchestrator coordinates an async, re-entrant loop of plan → execute → stream, with human checkpoints where appropriate. Tasks talk A2A to remote agents, and the response pipeline keeps users informed in real time while maintaining durable, reproducible state.
+In short, the orchestrator coordinates an async, re-entrant loop of triage → plan → execute → stream, with human checkpoints where appropriate. The Super Agent can answer or enrich before planning, tasks talk A2A to remote agents, and the response pipeline keeps users informed in real time while maintaining durable, reproducible state.
From 457d5a6194d9cf9f265dde831e798a73817497b0 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 10:39:58 +0800
Subject: [PATCH 20/30] refactor: update guidance_message for scheduled task
confirmations to enhance clarity and user experience
---
python/valuecell/core/plan/prompts.py | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/python/valuecell/core/plan/prompts.py b/python/valuecell/core/plan/prompts.py
index 3f3fa1638..c879d335a 100644
--- a/python/valuecell/core/plan/prompts.py
+++ b/python/valuecell/core/plan/prompts.py
@@ -51,6 +51,11 @@
6) Agent targeting policy
- Trust the specified agent's capabilities; do not over-validate or split into multiple tasks.
+
+7) Language & tone
+- Always respond in the user's language. Detect language from the user's query if no explicit locale is provided.
+- `guidance_message` MUST be written in the user's language.
+- For Chinese users, use concise, polite phrasing and avoid mixed-language text.
"""
@@ -74,6 +79,13 @@
- If the request suggests recurring monitoring or scheduled tasks, return `adequate: false` with a confirmation question in `guidance_message`.
- When waiting for confirmation: check conversation history to detect if the previous response was a confirmation request. If yes, and user responds with confirmation words (yes/ok/confirm/proceed), use the ORIGINAL query from history to create the task, NOT the confirmation response itself.
- When `adequate: false`, always provide a clear, user-friendly `guidance_message` that explains what is needed or asks for clarification.
+
+
+- When confirming a scheduled/recurring task, the `guidance_message` MUST follow the user's language.
+- Use this template (translate it into the user's language as needed):
+ To better set up the {title} task, please confirm the update frequency: {schedule_config}
+- Keep the message short and clear; do not include code blocks or markdown.
+
@@ -224,7 +236,7 @@
"tasks": [],
"adequate": false,
"reason": "Scheduled task requires user confirmation.",
- "guidance_message": "I understand you want to check Tesla's stock price every hour and get alerts on significant changes. This will set up a recurring task that runs automatically every 60 minutes. Should I proceed with this scheduled task?"
+ "guidance_message": "To better set up the Tesla price check task, please confirm the update frequency: every 60 minutes"
}
// Step 2: User confirms
@@ -268,7 +280,7 @@
"tasks": [],
"adequate": false,
"reason": "Scheduled task requires user confirmation.",
- "guidance_message": "I understand you want to analyze market trends every day at 9:00 AM. This will set up a recurring task that runs automatically at the same time each day. Should I proceed with this scheduled task?"
+ "guidance_message": "To better set up the Market trends task, please confirm the update frequency: daily at 09:00"
}
// Step 2: User confirms
From 5c238ec0a39654edbc139b5de37f000f200f87fb Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 11:53:39 +0800
Subject: [PATCH 21/30] refactor: add pagination support to
get_conversation_items method in ConversationManager and ConversationService
---
python/valuecell/core/conversation/manager.py | 15 +++++++++++----
python/valuecell/core/conversation/service.py | 14 +++++++++++++-
python/valuecell/core/event/service.py | 17 +----------------
3 files changed, 25 insertions(+), 21 deletions(-)
diff --git a/python/valuecell/core/conversation/manager.py b/python/valuecell/core/conversation/manager.py
index 3173842c1..11f2a54e6 100644
--- a/python/valuecell/core/conversation/manager.py
+++ b/python/valuecell/core/conversation/manager.py
@@ -139,17 +139,24 @@ async def get_conversation_items(
conversation_id: Optional[str] = None,
event: Optional[ConversationItemEvent] = None,
component_type: Optional[str] = None,
+ limit: Optional[int] = None,
+ offset: Optional[int] = None,
) -> List[ConversationItem]:
"""Get items for a conversation with optional filtering and pagination
Args:
conversation_id: Conversation ID
- limit: Maximum number of items to return
- offset: Number of items to skip
- role: Filter by specific role (optional)
+ event: Filter by specific event (optional)
+ component_type: Filter by component type (optional)
+ limit: Maximum number of items to return (optional, default: all)
+ offset: Number of items to skip (optional, default: 0)
"""
return await self.item_store.get_items(
- conversation_id=conversation_id, event=event, component_type=component_type
+ conversation_id=conversation_id,
+ event=event,
+ component_type=component_type,
+ limit=limit,
+ offset=offset or 0,
)
async def get_latest_item(self, conversation_id: str) -> Optional[ConversationItem]:
diff --git a/python/valuecell/core/conversation/service.py b/python/valuecell/core/conversation/service.py
index c0332dc0b..56b6896bf 100644
--- a/python/valuecell/core/conversation/service.py
+++ b/python/valuecell/core/conversation/service.py
@@ -104,11 +104,23 @@ async def get_conversation_items(
conversation_id: Optional[str] = None,
event: Optional[ConversationItemEvent] = None,
component_type: Optional[str] = None,
+ limit: Optional[int] = None,
+ offset: Optional[int] = None,
) -> List[ConversationItem]:
- """Load conversation items with optional filtering."""
+ """Load conversation items with optional filtering and pagination.
+
+ Args:
+ conversation_id: Filter by conversation ID (optional)
+ event: Filter by event type (optional)
+ component_type: Filter by component type (optional)
+ limit: Maximum number of items to return (optional, default: all)
+ offset: Number of items to skip (optional, default: 0)
+ """
return await self._manager.get_conversation_items(
conversation_id=conversation_id,
event=event,
component_type=component_type,
+ limit=limit,
+ offset=offset,
)
diff --git a/python/valuecell/core/event/service.py b/python/valuecell/core/event/service.py
index eb824002f..b4fa81c39 100644
--- a/python/valuecell/core/event/service.py
+++ b/python/valuecell/core/event/service.py
@@ -9,7 +9,7 @@
from valuecell.core.event.factory import ResponseFactory
from valuecell.core.event.router import RouteResult, handle_status_update
from valuecell.core.task.models import Task
-from valuecell.core.types import BaseResponse, ConversationItemEvent
+from valuecell.core.types import BaseResponse
class EventResponseService:
@@ -56,21 +56,6 @@ async def flush_task_response(
items = self._buffer.flush_task(conversation_id, thread_id, task_id)
await self._persist_items(items)
- async def get_conversation_history(
- self,
- conversation_id: str | None = None,
- event: ConversationItemEvent | None = None,
- component_type: str | None = None,
- ) -> list[BaseResponse]:
- """Load persisted conversation items and rebuild responses."""
-
- items = await self._conversation_service.get_conversation_items(
- conversation_id=conversation_id,
- event=event,
- component_type=component_type,
- )
- return [self._factory.from_conversation_item(item) for item in items]
-
async def route_task_status(self, task: Task, thread_id: str, event) -> RouteResult:
"""Route a task status update without side-effects."""
From 9fd8323a656a8298a7611a0f32ea2d7df90d1cb4 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 15:36:39 +0800
Subject: [PATCH 22/30] refactor: add metadata field to conversation items and
update related components for enhanced data handling
---
.../valuecell/core/conversation/item_store.py | 7 +-
python/valuecell/core/conversation/manager.py | 17 ++-
python/valuecell/core/conversation/service.py | 21 +++-
.../conversation/tests/test_conv_manager.py | 6 +-
.../tests/test_sqlite_item_store.py | 8 ++
python/valuecell/core/event/buffer.py | 8 +-
python/valuecell/core/event/factory.py | 78 ++++++++++++++
python/valuecell/core/event/service.py | 1 +
python/valuecell/core/task/executor.py | 101 +++++++++---------
python/valuecell/core/types.py | 6 ++
.../server/api/schemas/conversation.py | 1 +
python/valuecell/server/db/init_db.py | 1 +
.../server/services/conversation_service.py | 4 +
13 files changed, 199 insertions(+), 60 deletions(-)
diff --git a/python/valuecell/core/conversation/item_store.py b/python/valuecell/core/conversation/item_store.py
index 234a3626a..05dd838c2 100644
--- a/python/valuecell/core/conversation/item_store.py
+++ b/python/valuecell/core/conversation/item_store.py
@@ -133,6 +133,7 @@ async def _ensure_initialized(self) -> None:
task_id TEXT,
payload TEXT,
agent_name TEXT,
+ metadata TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
"""
@@ -157,6 +158,7 @@ def _row_to_item(row: sqlite3.Row) -> ConversationItem:
task_id=row["task_id"],
payload=row["payload"],
agent_name=row["agent_name"],
+ metadata=row["metadata"],
)
async def save_item(self, item: ConversationItem) -> None:
@@ -167,8 +169,8 @@ async def save_item(self, item: ConversationItem) -> None:
await db.execute(
"""
INSERT OR REPLACE INTO conversation_items (
- item_id, role, event, conversation_id, thread_id, task_id, payload, agent_name
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
+ item_id, role, event, conversation_id, thread_id, task_id, payload, agent_name, metadata
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
item.item_id,
@@ -179,6 +181,7 @@ async def save_item(self, item: ConversationItem) -> None:
item.task_id,
item.payload,
item.agent_name,
+ item.metadata,
),
)
await db.commit()
diff --git a/python/valuecell/core/conversation/manager.py b/python/valuecell/core/conversation/manager.py
index 11f2a54e6..7e405e5f5 100644
--- a/python/valuecell/core/conversation/manager.py
+++ b/python/valuecell/core/conversation/manager.py
@@ -1,9 +1,10 @@
from datetime import datetime
-from typing import List, Optional
+from typing import Dict, List, Optional
from valuecell.core.types import (
ConversationItem,
ConversationItemEvent,
+ ResponseMetadata,
ResponsePayload,
Role,
)
@@ -81,9 +82,10 @@ async def add_item(
conversation_id: str,
thread_id: Optional[str] = None,
task_id: Optional[str] = None,
- payload: ResponsePayload = None,
+ payload: Optional[ResponsePayload] = None,
item_id: Optional[str] = None,
agent_name: Optional[str] = None,
+ metadata: Optional[ResponseMetadata] = None,
) -> Optional[ConversationItem]:
"""Add item to conversation
@@ -95,6 +97,8 @@ async def add_item(
task_id: Associated task ID (optional)
payload: Item payload
item_id: Item ID (optional)
+ agent_name: Agent name (optional)
+ metadata: Additional metadata as dict (optional)
"""
# Verify conversation exists
conversation = await self.get_conversation(conversation_id)
@@ -114,6 +118,14 @@ async def add_item(
except Exception:
payload_str = None
+ # Serialize metadata to JSON string
+ metadata_str = "{}"
+ if metadata is not None:
+ try:
+ metadata_str = metadata.model_dump_json(exclude_none=True)
+ except Exception:
+ pass
+
item = ConversationItem(
item_id=item_id or generate_item_id(),
role=role,
@@ -123,6 +135,7 @@ async def add_item(
task_id=task_id,
payload=payload_str,
agent_name=agent_name,
+ metadata=metadata_str,
)
# Save item directly to item store
diff --git a/python/valuecell/core/conversation/service.py b/python/valuecell/core/conversation/service.py
index 56b6896bf..cdadefb94 100644
--- a/python/valuecell/core/conversation/service.py
+++ b/python/valuecell/core/conversation/service.py
@@ -2,13 +2,14 @@
from __future__ import annotations
-from typing import List, Optional, Tuple
+from typing import Dict, List, Optional, Tuple
from valuecell.core.conversation.manager import ConversationManager
from valuecell.core.conversation.models import Conversation, ConversationStatus
from valuecell.core.types import (
ConversationItem,
ConversationItemEvent,
+ ResponseMetadata,
ResponsePayload,
Role,
)
@@ -82,11 +83,24 @@ async def add_item(
conversation_id: str,
thread_id: Optional[str] = None,
task_id: Optional[str] = None,
- payload: ResponsePayload = None,
+ payload: Optional[ResponsePayload] = None,
item_id: Optional[str] = None,
agent_name: Optional[str] = None,
+ metadata: Optional[ResponseMetadata] = None,
) -> Optional[ConversationItem]:
- """Persist a conversation item via the underlying manager."""
+ """Persist a conversation item via the underlying manager.
+
+ Args:
+ role: Item role (USER, AGENT, SYSTEM)
+ event: Item event
+ conversation_id: Conversation ID
+ thread_id: Thread ID (optional)
+ task_id: Task ID (optional)
+ payload: Item payload
+ item_id: Item ID (optional)
+ agent_name: Agent name (optional)
+ metadata: Additional metadata as dict (optional)
+ """
return await self._manager.add_item(
role=role,
@@ -97,6 +111,7 @@ async def add_item(
payload=payload,
item_id=item_id,
agent_name=agent_name,
+ metadata=metadata,
)
async def get_conversation_items(
diff --git a/python/valuecell/core/conversation/tests/test_conv_manager.py b/python/valuecell/core/conversation/tests/test_conv_manager.py
index a7606245e..3d17ce580 100644
--- a/python/valuecell/core/conversation/tests/test_conv_manager.py
+++ b/python/valuecell/core/conversation/tests/test_conv_manager.py
@@ -371,7 +371,11 @@ async def test_get_conversation_items(self):
assert result == items
manager.item_store.get_items.assert_called_once_with(
- conversation_id="conv-123", event=None, component_type=None
+ conversation_id="conv-123",
+ event=None,
+ component_type=None,
+ limit=None,
+ offset=0,
)
@pytest.mark.asyncio
diff --git a/python/valuecell/core/conversation/tests/test_sqlite_item_store.py b/python/valuecell/core/conversation/tests/test_sqlite_item_store.py
index 65cbcfc3f..b5ea7361a 100644
--- a/python/valuecell/core/conversation/tests/test_sqlite_item_store.py
+++ b/python/valuecell/core/conversation/tests/test_sqlite_item_store.py
@@ -22,6 +22,7 @@ async def test_sqlite_item_store_basic_crud():
thread_id="t1",
task_id=None,
payload='{"a":1}',
+ metadata="{}",
)
i2 = ConversationItem(
item_id="i2",
@@ -31,6 +32,7 @@ async def test_sqlite_item_store_basic_crud():
thread_id="t1",
task_id=None,
payload='{"a":1}',
+ metadata="{}",
)
await store.save_item(i1)
await store.save_item(i2)
@@ -81,6 +83,7 @@ async def test_sqlite_item_store_get_items_all_conversations():
thread_id="t1",
task_id=None,
payload='{"msg": "conv1 item1"}',
+ metadata="{}",
),
ConversationItem(
item_id="c1-i2",
@@ -91,6 +94,7 @@ async def test_sqlite_item_store_get_items_all_conversations():
task_id=None,
payload='{"msg": "conv1 item2"}',
agent_name="agent-alpha",
+ metadata="{}",
),
ConversationItem(
item_id="c2-i1",
@@ -100,6 +104,7 @@ async def test_sqlite_item_store_get_items_all_conversations():
thread_id="t2",
task_id=None,
payload='{"msg": "conv2 item1"}',
+ metadata="{}",
),
]
@@ -142,6 +147,7 @@ async def test_sqlite_item_store_filters_and_pagination():
thread_id="t1",
task_id=None,
payload='{"a":1}',
+ metadata="{}",
),
ConversationItem(
item_id="a2",
@@ -151,6 +157,7 @@ async def test_sqlite_item_store_filters_and_pagination():
thread_id="t1",
task_id=None,
payload='{"component_type":"card","a":2}',
+ metadata="{}",
),
ConversationItem(
item_id="a3",
@@ -160,6 +167,7 @@ async def test_sqlite_item_store_filters_and_pagination():
thread_id="t2",
task_id=None,
payload='{"component_type":"chart","a":3}',
+ metadata="{}",
),
]
diff --git a/python/valuecell/core/event/buffer.py b/python/valuecell/core/event/buffer.py
index 51749929e..98ff9a154 100644
--- a/python/valuecell/core/event/buffer.py
+++ b/python/valuecell/core/event/buffer.py
@@ -9,6 +9,8 @@
BaseResponseDataPayload,
CommonResponseEvent,
NotifyResponseEvent,
+ ResponseMetadata,
+ ResponsePayload,
Role,
StreamResponseEvent,
SystemResponseEvent,
@@ -24,9 +26,10 @@ class SaveItem:
conversation_id: str
thread_id: Optional[str]
task_id: Optional[str]
- payload: Optional[BaseModel]
+ payload: Optional[ResponsePayload]
role: Role = Role.AGENT
agent_name: Optional[str] = None
+ metadata: Optional[ResponseMetadata] = None
# conversation_id, thread_id, task_id, event
@@ -241,6 +244,7 @@ def _finalize_keys(self, keys: List[BufferKey]) -> List[SaveItem]:
payload=payload,
role=entry.role or Role.AGENT,
agent_name=entry.agent_name,
+ metadata=None, # Buffered entries don't have metadata
)
)
if key in self._buffers:
@@ -288,6 +292,7 @@ def _make_save_item_from_response(self, resp: BaseResponse) -> SaveItem:
payload=bm,
role=data.role,
agent_name=data.agent_name,
+ metadata=data.metadata,
)
def _make_save_item(
@@ -306,4 +311,5 @@ def _make_save_item(
payload=payload,
role=data.role,
agent_name=data.agent_name,
+ metadata=data.metadata,
)
diff --git a/python/valuecell/core/event/factory.py b/python/valuecell/core/event/factory.py
index eda8f4638..ab4fd6339 100644
--- a/python/valuecell/core/event/factory.py
+++ b/python/valuecell/core/event/factory.py
@@ -2,11 +2,13 @@
from typing_extensions import Literal
+from valuecell.core.task.models import Task
from valuecell.core.types import (
BaseResponseDataPayload,
CommonResponseEvent,
ComponentGeneratorResponse,
ComponentGeneratorResponseDataPayload,
+ ComponentType,
ConversationItem,
ConversationStartedResponse,
DoneResponse,
@@ -16,6 +18,7 @@
PlanRequireUserInputResponse,
ReasoningResponse,
Role,
+ ScheduledTaskComponentContent,
StreamResponseEvent,
SystemFailedResponse,
SystemResponseEvent,
@@ -87,6 +90,18 @@ def parse_payload_as(model_cls):
except Exception:
return None
+ # Parse metadata
+ def parse_metadata():
+ raw_metadata = item.metadata
+ if not raw_metadata:
+ return None
+ try:
+ import json
+
+ return json.loads(raw_metadata)
+ except Exception:
+ return None
+
# Base UnifiedResponseData builder
def make_data(payload=None):
return UnifiedResponseData(
@@ -97,6 +112,7 @@ def make_data(payload=None):
role=role,
item_id=item.item_id,
agent_name=item.agent_name,
+ metadata=parse_metadata(),
)
# ----- System-level events -----
@@ -484,6 +500,7 @@ def component_generator(
component_type: str,
component_id: Optional[str] = None,
agent_name: Optional[str] = None,
+ metadata: Optional[dict] = None,
) -> ComponentGeneratorResponse:
"""Create a ComponentGeneratorResponse for UI component generation.
@@ -511,5 +528,66 @@ def component_generator(
role=Role.AGENT,
item_id=component_id or generate_item_id(),
agent_name=agent_name,
+ metadata=metadata,
),
)
+
+ def schedule_task_controller_component(
+ self,
+ conversation_id: str,
+ thread_id: str,
+ task: Task,
+ ) -> ComponentGeneratorResponse:
+ """Create a ComponentGeneratorResponse for a task controller component.
+
+ Args:
+ conversation_id: Conversation id.
+ thread_id: Thread id.
+ task_id: Task id.
+ task_title: Title of the scheduled task.
+ content: Serialized component content (e.g., markup or json).
+ agent_name: Name of the agent generating the component.
+
+ Returns:
+ ComponentGeneratorResponse wrapping the payload.
+ """
+ return self.component_generator(
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=task.task_id,
+ content=ScheduledTaskComponentContent(
+ task_id=task.task_id,
+ task_title=task.title,
+ ).model_dump_json(exclude_none=True),
+ component_type=ComponentType.SCHEDULED_TASK_CONTROLLER.value,
+ agent_name=task.agent_name,
+ metadata={
+ "task_title": task.title,
+ },
+ )
+
+ def schedule_task_result_component(
+ self,
+ task: Task,
+ content: str,
+ ) -> ComponentGeneratorResponse:
+ """Create a ComponentGeneratorResponse for a task result component.
+
+ Args:
+ task: The Task instance.
+ result_content: Serialized content representing the task result.
+
+ Returns:
+ ComponentGeneratorResponse wrapping the payload.
+ """
+ return self.component_generator(
+ conversation_id=task.conversation_id,
+ thread_id=task.thread_id,
+ task_id=task.task_id,
+ content=content,
+ component_type=ComponentType.SCHEDULED_TASK_RESULT.value,
+ agent_name=task.agent_name,
+ metadata={
+ "task_title": task.title,
+ },
+ )
diff --git a/python/valuecell/core/event/service.py b/python/valuecell/core/event/service.py
index b4fa81c39..39f8a43e3 100644
--- a/python/valuecell/core/event/service.py
+++ b/python/valuecell/core/event/service.py
@@ -76,4 +76,5 @@ async def _persist_items(self, items: list[SaveItem]) -> None:
payload=item.payload,
item_id=item.item_id,
agent_name=item.agent_name,
+ metadata=item.metadata,
)
diff --git a/python/valuecell/core/task/executor.py b/python/valuecell/core/task/executor.py
index 926d663f3..e9872f5b8 100644
--- a/python/valuecell/core/task/executor.py
+++ b/python/valuecell/core/task/executor.py
@@ -81,20 +81,14 @@ def finalize(self, response_factory: ResponseFactory) -> Optional[BaseResponse]:
content = "Task completed without output."
component_payload = ScheduledTaskComponentContent(
- task_id=self._task.task_id,
- task_title=self._task.title,
result=content,
create_time=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S"),
)
component_payload_json = component_payload.model_dump_json(exclude_none=True)
- return response_factory.component_generator(
- conversation_id=self._task.conversation_id,
- thread_id=self._task.thread_id,
- task_id=self._task.task_id,
+ return response_factory.schedule_task_result_component(
+ task=self._task,
content=component_payload_json,
- component_type=ComponentType.SCHEDULED_TASK_RESULT.value,
- agent_name=self._task.agent_name,
)
@@ -140,23 +134,15 @@ async def execute_plan(
conversation_id=task.conversation_id,
agent_name=task.agent_name,
)
- component_payload = json.dumps(
- {
- "conversation_id": task.conversation_id,
- "agent_name": task.agent_name,
- "phase": SubagentConversationPhase.START.value,
- }
- )
- component = self._response_service.factory.component_generator(
- conversation_id=plan.conversation_id,
- thread_id=thread_id,
- task_id=task.task_id,
- content=component_payload,
- component_type=ComponentType.SUBAGENT_CONVERSATION.value,
- component_id=subagent_component_id,
- agent_name=task.agent_name,
+
+ # Emit subagent conversation start component
+ yield await self._emit_subagent_conversation_component(
+ plan.conversation_id,
+ thread_id,
+ task,
+ subagent_component_id,
+ SubagentConversationPhase.START,
)
- yield await self._response_service.emit(component)
thread_started = self._response_service.factory.thread_started(
conversation_id=task.conversation_id,
@@ -182,23 +168,41 @@ async def execute_plan(
yield await self._response_service.emit(failure)
finally:
if task.handoff_from_super_agent:
- component_payload = json.dumps(
- {
- "conversation_id": task.conversation_id,
- "agent_name": task.agent_name,
- "phase": SubagentConversationPhase.END.value,
- }
+ # Emit subagent conversation end component
+ yield await self._emit_subagent_conversation_component(
+ plan.conversation_id,
+ thread_id,
+ task,
+ subagent_component_id,
+ SubagentConversationPhase.END,
)
- component = self._response_service.factory.component_generator(
- conversation_id=plan.conversation_id,
- thread_id=thread_id,
- task_id=task.task_id,
- content=component_payload,
- component_type=ComponentType.SUBAGENT_CONVERSATION.value,
- component_id=subagent_component_id,
- agent_name=task.agent_name,
- )
- yield await self._response_service.emit(component)
+
+ async def _emit_subagent_conversation_component(
+ self,
+ super_agent_conversation_id: str,
+ thread_id: str,
+ subagent_task: Task,
+ component_id: str,
+ phase: SubagentConversationPhase,
+ ) -> BaseResponse:
+ """Emit a subagent conversation component with the specified phase."""
+ component_payload = json.dumps(
+ {
+ "conversation_id": subagent_task.conversation_id,
+ "agent_name": subagent_task.agent_name,
+ "phase": phase.value,
+ }
+ )
+ component = self._response_service.factory.component_generator(
+ conversation_id=super_agent_conversation_id,
+ thread_id=thread_id,
+ task_id=subagent_task.task_id,
+ content=component_payload,
+ component_type=ComponentType.SUBAGENT_CONVERSATION.value,
+ component_id=component_id,
+ agent_name=subagent_task.agent_name,
+ )
+ return await self._response_service.emit(component)
async def _execute_task(
self,
@@ -224,18 +228,13 @@ async def _execute_task(
)
if task.schedule_config:
- controller = self._response_service.factory.component_generator(
- conversation_id=conversation_id,
- thread_id=thread_id,
- task_id=task_id,
- content=ScheduledTaskComponentContent(
- task_id=task_id,
- task_title=task.title,
- ).model_dump_json(exclude_none=True),
- component_type=ComponentType.SCHEDULED_TASK_CONTROLLER.value,
- agent_name=task.agent_name,
+ yield await self._response_service.emit(
+ self._response_service.factory.schedule_task_controller_component(
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task=task,
+ )
)
- yield await self._response_service.emit(controller)
yield await self._response_service.emit(
self._response_service.factory.done(
conversation_id=conversation_id,
diff --git a/python/valuecell/core/types.py b/python/valuecell/core/types.py
index 9d77654d5..274965410 100644
--- a/python/valuecell/core/types.py
+++ b/python/valuecell/core/types.py
@@ -236,6 +236,8 @@ class FilteredCardPushNotificationComponentData(BaseModel):
ToolCallPayload,
]
+ResponseMetadata = Dict[str, str | int | float]
+
ConversationItemEvent = Union[
StreamResponseEvent,
@@ -275,6 +277,7 @@ class ConversationItem(BaseModel):
None, description="Task ID if associated with a task"
)
payload: str = Field(..., description="The actual message payload")
+ metadata: str = Field("{}", description="Additional metadata for the item")
class UnifiedResponseData(BaseModel):
@@ -295,6 +298,9 @@ class UnifiedResponseData(BaseModel):
payload: Optional[ResponsePayload] = Field(
None, description="The message data payload"
)
+ metadata: Optional[ResponseMetadata] = Field(
+ None, description="Additional metadata for the response"
+ )
role: Role = Field(..., description="The role of the message sender")
item_id: str = Field(default_factory=generate_item_id)
diff --git a/python/valuecell/server/api/schemas/conversation.py b/python/valuecell/server/api/schemas/conversation.py
index c3ba79f0c..13e970cb4 100644
--- a/python/valuecell/server/api/schemas/conversation.py
+++ b/python/valuecell/server/api/schemas/conversation.py
@@ -37,6 +37,7 @@ class MessageData(BaseModel):
role: Optional[str] = Field(None, description="Role for simple event format")
item_id: Optional[str] = Field(None, description="Item ID for simple event format")
agent_name: Optional[str] = Field(None, description="Name of the agent")
+ metadata: Optional[Dict[str, str | int | float]] = Field(None, description="Metadata")
class MessageEvent(BaseModel):
diff --git a/python/valuecell/server/db/init_db.py b/python/valuecell/server/db/init_db.py
index fe7e24290..65aef79c2 100644
--- a/python/valuecell/server/db/init_db.py
+++ b/python/valuecell/server/db/init_db.py
@@ -147,6 +147,7 @@ def create_tables(self) -> bool:
task_id TEXT,
payload TEXT,
agent_name TEXT,
+ metadata TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
""")
diff --git a/python/valuecell/server/services/conversation_service.py b/python/valuecell/server/services/conversation_service.py
index 3b0b7c405..1f122674c 100644
--- a/python/valuecell/server/services/conversation_service.py
+++ b/python/valuecell/server/services/conversation_service.py
@@ -117,6 +117,10 @@ async def get_conversation_history(
role=role_str,
item_id=data.item_id,
)
+ if data.agent_name:
+ message_data_with_meta.agent_name = data.agent_name
+ if data.metadata:
+ message_data_with_meta.metadata = data.metadata
history_item = ConversationHistoryItem(
event=event_str, data=message_data_with_meta
From ad8c56cab7c4c744616126d4797102c6cbe7bfd6 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 16:41:54 +0800
Subject: [PATCH 23/30] test: add unit tests for thread_id preservation during
agent handoff
---
python/valuecell/core/plan/planner.py | 7 ++-
.../valuecell/tests/test_planner_thread_id.py | 55 +++++++++++++++++++
2 files changed, 60 insertions(+), 2 deletions(-)
create mode 100644 python/valuecell/tests/test_planner_thread_id.py
diff --git a/python/valuecell/core/plan/planner.py b/python/valuecell/core/plan/planner.py
index 71909591b..426907f6e 100644
--- a/python/valuecell/core/plan/planner.py
+++ b/python/valuecell/core/plan/planner.py
@@ -25,7 +25,7 @@
from valuecell.utils import generate_uuid
from valuecell.utils.env import agent_debug_mode_enabled
from valuecell.utils.model import get_model
-from valuecell.utils.uuid import generate_conversation_id, generate_thread_id
+from valuecell.utils.uuid import generate_conversation_id
from .models import ExecutionPlan, PlannerInput, PlannerResponse
from .prompts import (
@@ -262,9 +262,12 @@ def _create_task(
"""
# task_brief is a _TaskBrief model instance
+ # Reuse parent thread_id across subagent handoff.
+ # When handing off from Super Agent, a NEW conversation_id is created for the subagent,
+ # but we PRESERVE the parent thread_id to correlate the entire flow as one interaction.
if handoff_from_super_agent:
conversation_id = generate_conversation_id()
- thread_id = generate_thread_id()
+ # Do NOT override thread_id here (keep the parent's thread_id per Spec A)
return Task(
conversation_id=conversation_id,
diff --git a/python/valuecell/tests/test_planner_thread_id.py b/python/valuecell/tests/test_planner_thread_id.py
new file mode 100644
index 000000000..a62efe7b9
--- /dev/null
+++ b/python/valuecell/tests/test_planner_thread_id.py
@@ -0,0 +1,55 @@
+from types import SimpleNamespace
+
+from valuecell.core.plan.planner import ExecutionPlanner
+from valuecell.core.task.models import TaskPattern
+
+
+def _make_task_brief():
+ return SimpleNamespace(
+ agent_name="demo_agent",
+ title="demo task",
+ query="do something",
+ pattern=TaskPattern.ONCE,
+ schedule_config=None,
+ )
+
+
+def test_handoff_preserves_parent_thread_id_and_new_conversation_id():
+ parent_conversation_id = "conv_parent"
+ parent_thread_id = "thread_parent"
+
+ # Bypass __init__ to avoid heavy dependencies in planner construction
+ planner = ExecutionPlanner.__new__(ExecutionPlanner)
+
+ tb = _make_task_brief()
+ task = planner._create_task(
+ tb,
+ user_id="user-1",
+ conversation_id=parent_conversation_id,
+ thread_id=parent_thread_id,
+ handoff_from_super_agent=True,
+ )
+
+ assert task.handoff_from_super_agent is True
+ assert task.conversation_id != parent_conversation_id # new sub-conversation
+ assert task.thread_id == parent_thread_id # Spec A: reuse parent thread
+
+
+def test_no_handoff_keeps_conversation_and_thread():
+ parent_conversation_id = "conv_parent"
+ parent_thread_id = "thread_parent"
+
+ planner = ExecutionPlanner.__new__(ExecutionPlanner)
+
+ tb = _make_task_brief()
+ task = planner._create_task(
+ tb,
+ user_id="user-1",
+ conversation_id=parent_conversation_id,
+ thread_id=parent_thread_id,
+ handoff_from_super_agent=False,
+ )
+
+ assert task.handoff_from_super_agent is False
+ assert task.conversation_id == parent_conversation_id
+ assert task.thread_id == parent_thread_id
From 37f89f6145d5244e660f5f97b557768ec43fc959 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 17:01:57 +0800
Subject: [PATCH 24/30] refactor: rename response_service to event_service for
consistency across orchestrator and service components
---
python/valuecell/core/conversation/manager.py | 2 +-
python/valuecell/core/conversation/service.py | 6 +--
.../valuecell/core/coordinate/orchestrator.py | 52 +++++++++----------
python/valuecell/core/coordinate/services.py | 14 ++---
.../coordinate/tests/test_orchestrator.py | 6 +--
python/valuecell/core/event/factory.py | 4 +-
python/valuecell/core/task/executor.py | 50 +++++++++---------
.../server/api/schemas/conversation.py | 4 +-
8 files changed, 69 insertions(+), 69 deletions(-)
diff --git a/python/valuecell/core/conversation/manager.py b/python/valuecell/core/conversation/manager.py
index 7e405e5f5..22502dc72 100644
--- a/python/valuecell/core/conversation/manager.py
+++ b/python/valuecell/core/conversation/manager.py
@@ -1,5 +1,5 @@
from datetime import datetime
-from typing import Dict, List, Optional
+from typing import List, Optional
from valuecell.core.types import (
ConversationItem,
diff --git a/python/valuecell/core/conversation/service.py b/python/valuecell/core/conversation/service.py
index cdadefb94..4ed06eaba 100644
--- a/python/valuecell/core/conversation/service.py
+++ b/python/valuecell/core/conversation/service.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Dict, List, Optional, Tuple
+from typing import List, Optional, Tuple
from valuecell.core.conversation.manager import ConversationManager
from valuecell.core.conversation.models import Conversation, ConversationStatus
@@ -89,7 +89,7 @@ async def add_item(
metadata: Optional[ResponseMetadata] = None,
) -> Optional[ConversationItem]:
"""Persist a conversation item via the underlying manager.
-
+
Args:
role: Item role (USER, AGENT, SYSTEM)
event: Item event
@@ -123,7 +123,7 @@ async def get_conversation_items(
offset: Optional[int] = None,
) -> List[ConversationItem]:
"""Load conversation items with optional filtering and pagination.
-
+
Args:
conversation_id: Filter by conversation ID (optional)
event: Filter by event type (optional)
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 80bba9455..398cf608a 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -70,21 +70,21 @@ class AgentOrchestrator:
def __init__(
self,
conversation_service: ConversationService | None = None,
- response_service: EventResponseService | None = None,
+ event_service: EventResponseService | None = None,
plan_service: PlanService | None = None,
super_agent_service: SuperAgentService | None = None,
task_executor: TaskExecutor | None = None,
) -> None:
services = AgentServiceBundle.compose(
conversation_service=conversation_service,
- response_service=response_service,
+ event_service=event_service,
plan_service=plan_service,
super_agent_service=super_agent_service,
task_executor=task_executor,
)
self.conversation_service = services.conversation_service
- self.response_service = services.response_service
+ self.event_service = services.event_service
self.super_agent_service = services.super_agent_service
self.plan_service = services.plan_service
self.task_executor = services.task_executor
@@ -186,10 +186,10 @@ async def _generate_responses(
)
if created:
- started = self.response_service.factory.conversation_started(
+ started = self.event_service.factory.conversation_started(
conversation_id=conversation_id
)
- yield await self.response_service.emit(started)
+ yield await self.event_service.emit(started)
if conversation.status == ConversationStatus.REQUIRE_USER_INPUT:
async for response in self._handle_conversation_continuation(
@@ -204,12 +204,12 @@ async def _generate_responses(
logger.exception(
f"Error processing user input for conversation {conversation_id}"
)
- failure = self.response_service.factory.system_failed(
+ failure = self.event_service.factory.system_failed(
conversation_id, f"(Error) Error processing request: {str(e)}"
)
- yield await self.response_service.emit(failure)
+ yield await self.event_service.emit(failure)
finally:
- yield self.response_service.factory.done(conversation_id)
+ yield self.event_service.factory.done(conversation_id)
async def _handle_conversation_continuation(
self, user_input: UserInput
@@ -228,32 +228,32 @@ async def _handle_conversation_continuation(
# Validate execution context exists
if conversation_id not in self._execution_contexts:
- failure = self.response_service.factory.system_failed(
+ failure = self.event_service.factory.system_failed(
conversation_id,
"No execution context found for this conversation. The conversation may have expired.",
)
- yield await self.response_service.emit(failure)
+ yield await self.event_service.emit(failure)
return
context = self._execution_contexts[conversation_id]
# Validate context integrity and user consistency
if not self._validate_execution_context(context, user_id):
- failure = self.response_service.factory.system_failed(
+ failure = self.event_service.factory.system_failed(
conversation_id,
"Invalid execution context or user mismatch.",
)
- yield await self.response_service.emit(failure)
+ yield await self.event_service.emit(failure)
await self._cancel_execution(conversation_id)
return
thread_id = generate_thread_id()
- response = self.response_service.factory.thread_started(
+ response = self.event_service.factory.thread_started(
conversation_id=conversation_id,
thread_id=thread_id,
user_query=user_input.query,
)
- yield await self.response_service.emit(response)
+ yield await self.event_service.emit(response)
# Provide user response and resume execution
# If we are in an execution stage, store the pending response for resume
@@ -270,11 +270,11 @@ async def _handle_conversation_continuation(
yield response
# Resuming execution stage is not yet supported
else:
- failure = self.response_service.factory.system_failed(
+ failure = self.event_service.factory.system_failed(
conversation_id,
"Resuming execution stage is not yet supported.",
)
- yield await self.response_service.emit(failure)
+ yield await self.event_service.emit(failure)
async def _handle_new_request(
self, user_input: UserInput
@@ -286,12 +286,12 @@ async def _handle_new_request(
"""
conversation_id = user_input.meta.conversation_id
thread_id = generate_thread_id()
- response = self.response_service.factory.thread_started(
+ response = self.event_service.factory.thread_started(
conversation_id=conversation_id,
thread_id=thread_id,
user_query=user_input.query,
)
- yield await self.response_service.emit(response)
+ yield await self.event_service.emit(response)
# 1) Super Agent triage phase (pre-planning) - skip if target agent is specified
if user_input.target_agent_name == self.super_agent_service.name:
@@ -299,7 +299,7 @@ async def _handle_new_request(
user_input
)
if super_outcome.decision == SuperAgentDecision.ANSWER:
- ans = self.response_service.factory.message_response_general(
+ ans = self.event_service.factory.message_response_general(
StreamResponseEvent.MESSAGE_CHUNK,
conversation_id,
thread_id,
@@ -307,7 +307,7 @@ async def _handle_new_request(
content=super_outcome.answer_content,
agent_name=self.super_agent_service.name,
)
- yield await self.response_service.emit(ans)
+ yield await self.event_service.emit(ans)
return
if super_outcome.decision == SuperAgentDecision.HANDOFF_TO_PLANNER:
@@ -377,12 +377,12 @@ async def _monitor_planning_task(
# Update conversation status and send user input request
await self.conversation_service.require_user_input(conversation_id)
prompt = self.plan_service.get_request_prompt(conversation_id) or ""
- response = self.response_service.factory.plan_require_user_input(
+ response = self.event_service.factory.plan_require_user_input(
conversation_id,
thread_id,
prompt,
)
- yield await self.response_service.emit(response)
+ yield await self.event_service.emit(response)
return
await asyncio.sleep(ASYNC_SLEEP_INTERVAL)
@@ -425,12 +425,12 @@ async def _continue_planning(
original_user_input = context.get_metadata(ORIGINAL_USER_INPUT)
if not all([planning_task, original_user_input]):
- failure = self.response_service.factory.plan_failed(
+ failure = self.event_service.factory.plan_failed(
conversation_id,
thread_id,
"Invalid planning context - missing required data",
)
- yield await self.response_service.emit(failure)
+ yield await self.event_service.emit(failure)
await self._cancel_execution(conversation_id)
return
@@ -441,10 +441,10 @@ async def _continue_planning(
prompt = self.plan_service.get_request_prompt(conversation_id) or ""
# Ensure conversation is set to require user input again for repeated prompts
await self.conversation_service.require_user_input(conversation_id)
- response = self.response_service.factory.plan_require_user_input(
+ response = self.event_service.factory.plan_require_user_input(
conversation_id, thread_id, prompt
)
- yield await self.response_service.emit(response)
+ yield await self.event_service.emit(response)
return
await asyncio.sleep(ASYNC_SLEEP_INTERVAL)
diff --git a/python/valuecell/core/coordinate/services.py b/python/valuecell/core/coordinate/services.py
index 4a8be4843..0ddc9b60e 100644
--- a/python/valuecell/core/coordinate/services.py
+++ b/python/valuecell/core/coordinate/services.py
@@ -33,7 +33,7 @@ class AgentServiceBundle:
agent_connections: RemoteConnections
conversation_service: ConversationService
- response_service: EventResponseService
+ event_service: EventResponseService
task_service: TaskService
plan_service: PlanService
super_agent_service: SuperAgentService
@@ -44,7 +44,7 @@ def compose(
cls,
*,
conversation_service: Optional[ConversationService] = None,
- response_service: Optional[EventResponseService] = None,
+ event_service: Optional[EventResponseService] = None,
plan_service: Optional[PlanService] = None,
super_agent_service: Optional[SuperAgentService] = None,
task_executor: Optional[TaskExecutor] = None,
@@ -55,8 +55,8 @@ def compose(
if conversation_service is not None:
conv_service = conversation_service
- elif response_service is not None:
- conv_service = response_service.conversation_service
+ elif event_service is not None:
+ conv_service = event_service.conversation_service
else:
base_manager = ConversationManager(
conversation_store=SQLiteConversationStore(resolve_db_path()),
@@ -64,7 +64,7 @@ def compose(
)
conv_service = ConversationService(manager=base_manager)
- resp_service = response_service or EventResponseService(
+ event_service = event_service or EventResponseService(
conversation_service=conv_service
)
t_service = TaskService()
@@ -73,14 +73,14 @@ def compose(
executor = task_executor or TaskExecutor(
agent_connections=connections,
task_service=t_service,
- response_service=resp_service,
+ event_service=event_service,
conversation_service=conv_service,
)
return cls(
agent_connections=connections,
conversation_service=conv_service,
- response_service=resp_service,
+ event_service=event_service,
task_service=t_service,
plan_service=p_service,
super_agent_service=sa_service,
diff --git a/python/valuecell/core/coordinate/tests/test_orchestrator.py b/python/valuecell/core/coordinate/tests/test_orchestrator.py
index 367947532..c542470db 100644
--- a/python/valuecell/core/coordinate/tests/test_orchestrator.py
+++ b/python/valuecell/core/coordinate/tests/test_orchestrator.py
@@ -195,7 +195,7 @@ def _orchestrator(
agent_connections.start_agent = AsyncMock()
conversation_service = ConversationService(manager=mock_conversation_manager)
- response_service = EventResponseService(conversation_service=conversation_service)
+ event_service = EventResponseService(conversation_service=conversation_service)
task_service = TaskService(manager=mock_task_manager)
plan_service = PlanService(
agent_connections=agent_connections, execution_planner=mock_planner
@@ -204,14 +204,14 @@ def _orchestrator(
task_executor = TaskExecutor(
agent_connections=agent_connections,
task_service=task_service,
- response_service=response_service,
+ event_service=event_service,
conversation_service=conversation_service,
)
bundle = SimpleNamespace(
agent_connections=agent_connections,
conversation_service=conversation_service,
- response_service=response_service,
+ event_service=event_service,
task_service=task_service,
plan_service=plan_service,
super_agent_service=super_agent_service,
diff --git a/python/valuecell/core/event/factory.py b/python/valuecell/core/event/factory.py
index ab4fd6339..098db7ca5 100644
--- a/python/valuecell/core/event/factory.py
+++ b/python/valuecell/core/event/factory.py
@@ -565,7 +565,7 @@ def schedule_task_controller_component(
"task_title": task.title,
},
)
-
+
def schedule_task_result_component(
self,
task: Task,
@@ -576,7 +576,7 @@ def schedule_task_result_component(
Args:
task: The Task instance.
result_content: Serialized content representing the task result.
-
+
Returns:
ComponentGeneratorResponse wrapping the payload.
"""
diff --git a/python/valuecell/core/task/executor.py b/python/valuecell/core/task/executor.py
index e9872f5b8..b44ecdff9 100644
--- a/python/valuecell/core/task/executor.py
+++ b/python/valuecell/core/task/executor.py
@@ -99,13 +99,13 @@ def __init__(
self,
agent_connections: RemoteConnections,
task_service: TaskService,
- response_service: EventResponseService,
+ event_service: EventResponseService,
conversation_service: ConversationService,
poll_interval: float = DEFAULT_EXECUTION_POLL_INTERVAL,
) -> None:
self._agent_connections = agent_connections
self._task_service = task_service
- self._response_service = response_service
+ self._event_service = event_service
self._conversation_service = conversation_service
self._poll_interval = poll_interval
@@ -116,14 +116,14 @@ async def execute_plan(
metadata: Optional[dict] = None,
) -> AsyncGenerator[BaseResponse, None]:
if plan.guidance_message:
- response = self._response_service.factory.message_response_general(
+ response = self._event_service.factory.message_response_general(
event=StreamResponseEvent.MESSAGE_CHUNK,
conversation_id=plan.conversation_id,
thread_id=thread_id,
task_id=generate_task_id(),
content=plan.guidance_message,
)
- yield await self._response_service.emit(response)
+ yield await self._event_service.emit(response)
return
for task in plan.tasks:
@@ -144,12 +144,12 @@ async def execute_plan(
SubagentConversationPhase.START,
)
- thread_started = self._response_service.factory.thread_started(
+ thread_started = self._event_service.factory.thread_started(
conversation_id=task.conversation_id,
thread_id=thread_id,
user_query=task.query,
)
- yield await self._response_service.emit(thread_started)
+ yield await self._event_service.emit(thread_started)
try:
await self._task_service.update_task(task)
@@ -158,14 +158,14 @@ async def execute_plan(
except Exception as exc: # pragma: no cover - defensive logging
error_msg = f"(Error) Error executing {task.task_id}: {exc}"
logger.exception(error_msg)
- failure = self._response_service.factory.task_failed(
+ failure = self._event_service.factory.task_failed(
conversation_id=plan.conversation_id,
thread_id=thread_id,
task_id=task.task_id,
content=error_msg,
agent_name=task.agent_name,
)
- yield await self._response_service.emit(failure)
+ yield await self._event_service.emit(failure)
finally:
if task.handoff_from_super_agent:
# Emit subagent conversation end component
@@ -193,7 +193,7 @@ async def _emit_subagent_conversation_component(
"phase": phase.value,
}
)
- component = self._response_service.factory.component_generator(
+ component = self._event_service.factory.component_generator(
conversation_id=super_agent_conversation_id,
thread_id=thread_id,
task_id=subagent_task.task_id,
@@ -202,7 +202,7 @@ async def _emit_subagent_conversation_component(
component_id=component_id,
agent_name=subagent_task.agent_name,
)
- return await self._response_service.emit(component)
+ return await self._event_service.emit(component)
async def _execute_task(
self,
@@ -228,15 +228,15 @@ async def _execute_task(
)
if task.schedule_config:
- yield await self._response_service.emit(
- self._response_service.factory.schedule_task_controller_component(
+ yield await self._event_service.emit(
+ self._event_service.factory.schedule_task_controller_component(
conversation_id=conversation_id,
thread_id=thread_id,
task=task,
)
)
- yield await self._response_service.emit(
- self._response_service.factory.done(
+ yield await self._event_service.emit(
+ self._event_service.factory.done(
conversation_id=conversation_id,
thread_id=thread_id,
)
@@ -267,18 +267,18 @@ async def _execute_task(
break
await self._task_service.complete_task(task_id)
- completed = self._response_service.factory.task_completed(
+ completed = self._event_service.factory.task_completed(
conversation_id=conversation_id,
thread_id=thread_id,
task_id=task_id,
agent_name=task.agent_name,
)
- yield await self._response_service.emit(completed)
+ yield await self._event_service.emit(completed)
except Exception as exc:
await self._task_service.fail_task(task_id, str(exc))
raise
finally:
- await self._response_service.flush_task_response(
+ await self._event_service.flush_task_response(
conversation_id=conversation_id,
thread_id=thread_id,
task_id=task_id,
@@ -305,24 +305,22 @@ async def _execute_single_task_run(
async for remote_task, event in remote_response:
if event is None and remote_task.status.state == TaskState.submitted:
task.remote_task_ids.append(remote_task.id)
- started = self._response_service.factory.task_started(
+ started = self._event_service.factory.task_started(
conversation_id=task.conversation_id,
thread_id=thread_id,
task_id=task.task_id,
agent_name=agent_name,
)
- yield await self._response_service.emit(started)
+ yield await self._event_service.emit(started)
continue
if isinstance(event, TaskStatusUpdateEvent):
- route_result: RouteResult = (
- await self._response_service.route_task_status(
- task, thread_id, event
- )
+ route_result: RouteResult = await self._event_service.route_task_status(
+ task, thread_id, event
)
responses = accumulator.consume(route_result.responses)
for resp in responses:
- yield await self._response_service.emit(resp)
+ yield await self._event_service.emit(resp)
for side_effect in route_result.side_effects:
if side_effect.kind == SideEffectKind.FAIL_TASK:
await self._task_service.fail_task(
@@ -340,9 +338,9 @@ async def _execute_single_task_run(
)
continue
- final_component = accumulator.finalize(self._response_service.factory)
+ final_component = accumulator.finalize(self._event_service.factory)
if final_component is not None:
- yield await self._response_service.emit(final_component)
+ yield await self._event_service.emit(final_component)
return
diff --git a/python/valuecell/server/api/schemas/conversation.py b/python/valuecell/server/api/schemas/conversation.py
index 13e970cb4..3d02f246e 100644
--- a/python/valuecell/server/api/schemas/conversation.py
+++ b/python/valuecell/server/api/schemas/conversation.py
@@ -37,7 +37,9 @@ class MessageData(BaseModel):
role: Optional[str] = Field(None, description="Role for simple event format")
item_id: Optional[str] = Field(None, description="Item ID for simple event format")
agent_name: Optional[str] = Field(None, description="Name of the agent")
- metadata: Optional[Dict[str, str | int | float]] = Field(None, description="Metadata")
+ metadata: Optional[Dict[str, str | int | float]] = Field(
+ None, description="Metadata"
+ )
class MessageEvent(BaseModel):
From 651f4a33e6812cfaa82ef56398013519d0ce4cfe Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 17:51:16 +0800
Subject: [PATCH 25/30] fix tests
---
python/valuecell/core/super_agent/tests/test_super_agent.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/python/valuecell/core/super_agent/tests/test_super_agent.py b/python/valuecell/core/super_agent/tests/test_super_agent.py
index 6226857fb..1f72fb771 100644
--- a/python/valuecell/core/super_agent/tests/test_super_agent.py
+++ b/python/valuecell/core/super_agent/tests/test_super_agent.py
@@ -5,8 +5,8 @@
import pytest
-from valuecell.core.coordinate import super_agent as super_agent_mod
-from valuecell.core.super_agent import SuperAgent, SuperAgentDecision
+from valuecell.core.super_agent import core as super_agent_mod
+from valuecell.core.super_agent.core import SuperAgent, SuperAgentDecision
from valuecell.core.types import UserInput, UserInputMetadata
@@ -51,7 +51,7 @@ def __init__(self, *args, **kwargs):
def test_super_agent_prompts_are_non_empty():
- from valuecell.core.coordinate.super_agent_prompts import (
+ from valuecell.core.super_agent.prompts import (
SUPER_AGENT_EXPECTED_OUTPUT,
SUPER_AGENT_INSTRUCTION,
)
From 9744e3a6274638773bd373e02d02ae7ec0ab9498 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 18:32:51 +0800
Subject: [PATCH 26/30] update coverage
---
.../tests/test_conversation_service.py | 171 ++++++++++++++
.../tests/test_orchestrator_context.py | 217 ++++++++++++++++++
.../tests/test_event_response_service.py | 148 ++++++++++++
.../core/event/tests/test_response_factory.py | 54 +++++
.../valuecell/core/plan/tests/test_service.py | 94 ++++++++
.../super_agent/tests/test_super_agent.py | 21 ++
.../core/task/tests/test_executor.py | 216 +++++++++++++++++
.../core/task/tests/test_service_unit.py | 62 +++++
.../core/task/tests/test_temporal.py | 53 +++++
9 files changed, 1036 insertions(+)
create mode 100644 python/valuecell/core/conversation/tests/test_conversation_service.py
create mode 100644 python/valuecell/core/coordinate/tests/test_orchestrator_context.py
create mode 100644 python/valuecell/core/event/tests/test_event_response_service.py
create mode 100644 python/valuecell/core/plan/tests/test_service.py
create mode 100644 python/valuecell/core/task/tests/test_executor.py
create mode 100644 python/valuecell/core/task/tests/test_service_unit.py
create mode 100644 python/valuecell/core/task/tests/test_temporal.py
diff --git a/python/valuecell/core/conversation/tests/test_conversation_service.py b/python/valuecell/core/conversation/tests/test_conversation_service.py
new file mode 100644
index 000000000..5de546701
--- /dev/null
+++ b/python/valuecell/core/conversation/tests/test_conversation_service.py
@@ -0,0 +1,171 @@
+import pytest
+from types import SimpleNamespace
+from unittest.mock import AsyncMock
+
+from valuecell.core.conversation.models import ConversationStatus
+from valuecell.core.conversation.service import ConversationService
+from valuecell.core.types import NotifyResponseEvent, Role
+
+
+class _ConversationStub(SimpleNamespace):
+ def activate(self) -> None:
+ self.status = ConversationStatus.ACTIVE
+
+ def require_user_input(self) -> None:
+ self.status = ConversationStatus.REQUIRE_USER_INPUT
+
+ def set_status(self, status: ConversationStatus) -> None:
+ self.status = status
+
+
+@pytest.fixture()
+def manager() -> AsyncMock:
+ mgr = AsyncMock()
+ mgr.update_conversation = AsyncMock()
+ mgr.create_conversation = AsyncMock()
+ mgr.get_conversation = AsyncMock()
+ mgr.add_item = AsyncMock()
+ mgr.get_conversation_items = AsyncMock()
+ return mgr
+
+
+@pytest.mark.asyncio
+async def test_ensure_conversation_returns_existing(manager: AsyncMock):
+ existing = _ConversationStub(conversation_id="conv-existing")
+ manager.get_conversation.return_value = existing
+
+ service = ConversationService(manager=manager)
+
+ conversation, created = await service.ensure_conversation(
+ user_id="user-1", conversation_id="conv-existing"
+ )
+
+ assert conversation is existing
+ assert created is False
+ manager.create_conversation.assert_not_awaited()
+
+
+@pytest.mark.asyncio
+async def test_ensure_conversation_creates_when_missing(manager: AsyncMock):
+ manager.get_conversation.return_value = None
+ created_conv = _ConversationStub(conversation_id="conv-new")
+ manager.create_conversation.return_value = created_conv
+
+ service = ConversationService(manager=manager)
+
+ conversation, created = await service.ensure_conversation(
+ user_id="user-1",
+ conversation_id="conv-new",
+ title="Sample",
+ agent_name="assistant",
+ )
+
+ assert conversation is created_conv
+ assert created is True
+ manager.create_conversation.assert_awaited_once_with(
+ user_id="user-1",
+ title="Sample",
+ conversation_id="conv-new",
+ agent_name="assistant",
+ )
+
+
+@pytest.mark.asyncio
+async def test_activate_updates_conversation(manager: AsyncMock):
+ conversation = _ConversationStub(status=ConversationStatus.INACTIVE)
+ manager.get_conversation.return_value = conversation
+
+ service = ConversationService(manager=manager)
+
+ result = await service.activate("conv-1")
+
+ assert result is True
+ assert conversation.status == ConversationStatus.ACTIVE
+ manager.update_conversation.assert_awaited_once_with(conversation)
+
+
+@pytest.mark.asyncio
+async def test_activate_returns_false_when_missing(manager: AsyncMock):
+ manager.get_conversation.return_value = None
+ service = ConversationService(manager=manager)
+
+ assert await service.activate("missing") is False
+ manager.update_conversation.assert_not_awaited()
+
+
+@pytest.mark.asyncio
+async def test_require_user_input_updates_status(manager: AsyncMock):
+ conversation = _ConversationStub(status=ConversationStatus.ACTIVE)
+ manager.get_conversation.return_value = conversation
+
+ service = ConversationService(manager=manager)
+
+ assert await service.require_user_input("conv") is True
+ assert conversation.status == ConversationStatus.REQUIRE_USER_INPUT
+ manager.update_conversation.assert_awaited_once_with(conversation)
+
+
+@pytest.mark.asyncio
+async def test_set_status_handles_missing_conversation(manager: AsyncMock):
+ manager.get_conversation.return_value = None
+ service = ConversationService(manager=manager)
+
+ assert await service.set_status("conv", ConversationStatus.INACTIVE) is False
+
+
+@pytest.mark.asyncio
+async def test_set_status_updates_conversation(manager: AsyncMock):
+ conversation = _ConversationStub(status=ConversationStatus.ACTIVE)
+ manager.get_conversation.return_value = conversation
+
+ service = ConversationService(manager=manager)
+
+ assert await service.set_status("conv", ConversationStatus.INACTIVE) is True
+ assert conversation.status == ConversationStatus.INACTIVE
+ manager.update_conversation.assert_awaited_once_with(conversation)
+
+
+@pytest.mark.asyncio
+async def test_add_item_delegates_to_manager(manager: AsyncMock):
+ service = ConversationService(manager=manager)
+
+ await service.add_item(
+ role=Role.USER,
+ event=NotifyResponseEvent.MESSAGE,
+ conversation_id="conv",
+ item_id="item",
+ payload=None,
+ )
+
+ manager.add_item.assert_awaited_once_with(
+ role=Role.USER,
+ event=NotifyResponseEvent.MESSAGE,
+ conversation_id="conv",
+ thread_id=None,
+ task_id=None,
+ payload=None,
+ item_id="item",
+ agent_name=None,
+ metadata=None,
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_conversation_items_pass_through(manager: AsyncMock):
+ service = ConversationService(manager=manager)
+ manager.get_conversation_items.return_value = ["item"]
+
+ items = await service.get_conversation_items(
+ conversation_id="conv",
+ limit=1,
+ offset=2,
+ )
+
+ assert items == ["item"]
+ manager.get_conversation_items.assert_awaited_once_with(
+ conversation_id="conv",
+ event=None,
+ component_type=None,
+ limit=1,
+ offset=2,
+ )
diff --git a/python/valuecell/core/coordinate/tests/test_orchestrator_context.py b/python/valuecell/core/coordinate/tests/test_orchestrator_context.py
new file mode 100644
index 000000000..8c55f163a
--- /dev/null
+++ b/python/valuecell/core/coordinate/tests/test_orchestrator_context.py
@@ -0,0 +1,217 @@
+import asyncio
+from types import SimpleNamespace
+from unittest.mock import AsyncMock
+
+import pytest
+
+from valuecell.core.coordinate.orchestrator import (
+ ASYNC_SLEEP_INTERVAL,
+ DEFAULT_CONTEXT_TIMEOUT_SECONDS,
+ AgentOrchestrator,
+ ExecutionContext,
+)
+from valuecell.core.event.factory import ResponseFactory
+from valuecell.core.types import SystemResponseEvent
+
+
+class DummyEventService:
+ def __init__(self) -> None:
+ self.factory = ResponseFactory()
+ self.emitted: list = []
+
+ async def emit(self, response):
+ self.emitted.append(response)
+ return response
+
+
+class DummyPlanService:
+ def __init__(self) -> None:
+ self.pending = False
+ self.prompt: str | None = None
+ self.provided: list[tuple[str, str]] = []
+ self.cleared: list[str] = []
+
+ def has_pending_request(self, conversation_id: str) -> bool:
+ return self.pending
+
+ def get_request_prompt(self, conversation_id: str) -> str | None:
+ return self.prompt
+
+ def provide_user_response(self, conversation_id: str, response: str) -> bool:
+ self.provided.append((conversation_id, response))
+ return self.pending
+
+ def register_user_input(self, conversation_id: str, request):
+ pass
+
+ def clear_pending_request(self, conversation_id: str) -> None:
+ self.cleared.append(conversation_id)
+
+
+class DummyConversationService:
+ def __init__(self) -> None:
+ self.activated: list[str] = []
+ self.required: list[str] = []
+
+ async def activate(self, conversation_id: str) -> None:
+ self.activated.append(conversation_id)
+
+ async def require_user_input(self, conversation_id: str) -> None:
+ self.required.append(conversation_id)
+
+
+class DummyTaskExecutor:
+ def __init__(self, event_service: DummyEventService) -> None:
+ self.event_service = event_service
+ self.executed: list[tuple[object, str]] = []
+
+ async def execute_plan(self, plan, thread_id: str):
+ self.executed.append((plan, thread_id))
+ response = self.event_service.factory.done(plan.conversation_id)
+ yield await self.event_service.emit(response)
+
+
+@pytest.fixture()
+def orchestrator(monkeypatch: pytest.MonkeyPatch):
+ event_service = DummyEventService()
+ plan_service = DummyPlanService()
+ conversation_service = DummyConversationService()
+ task_executor = DummyTaskExecutor(event_service)
+
+ bundle = SimpleNamespace(
+ agent_connections=SimpleNamespace(),
+ conversation_service=conversation_service,
+ event_service=event_service,
+ plan_service=plan_service,
+ super_agent_service=SimpleNamespace(name="super", run=AsyncMock()),
+ task_executor=task_executor,
+ )
+
+ monkeypatch.setattr(
+ "valuecell.core.coordinate.orchestrator.AgentServiceBundle.compose",
+ lambda **_: bundle,
+ )
+
+ orch = AgentOrchestrator()
+ return orch, bundle
+
+
+def test_validate_execution_context(orchestrator):
+ orch, _ = orchestrator
+ context = ExecutionContext(
+ stage="planning", conversation_id="conv", thread_id="thread", user_id="user"
+ )
+
+ assert orch._validate_execution_context(context, "user") is True
+
+ context.stage = ""
+ assert orch._validate_execution_context(context, "user") is False
+
+ context.stage = "planning"
+ assert orch._validate_execution_context(context, "other") is False
+
+ context.stage = "planning"
+ context.created_at -= DEFAULT_CONTEXT_TIMEOUT_SECONDS + 1
+ assert orch._validate_execution_context(context, "user") is False
+
+
+@pytest.mark.asyncio
+async def test_continue_planning_invalid_context_triggers_failure(orchestrator):
+ orch, bundle = orchestrator
+ loop = asyncio.get_event_loop()
+ planning_future = loop.create_future()
+
+ context = ExecutionContext(
+ stage="planning", conversation_id="conv", thread_id="thread", user_id="user"
+ )
+ context.add_metadata(planning_task=planning_future)
+
+ orch._execution_contexts["conv"] = context
+
+ outputs = [
+ resp async for resp in orch._continue_planning("conv", "thread", context)
+ ]
+
+ assert outputs
+ assert outputs[0].event == SystemResponseEvent.PLAN_FAILED
+ assert planning_future.cancelled()
+ assert "conv" in bundle.plan_service.cleared
+ assert "conv" in bundle.conversation_service.activated
+ assert "conv" not in orch._execution_contexts
+
+
+@pytest.mark.asyncio
+async def test_continue_planning_pending_request_prompts_user(orchestrator, monkeypatch):
+ orch, bundle = orchestrator
+ loop = asyncio.get_event_loop()
+ planning_future = loop.create_future()
+
+ context = ExecutionContext(
+ stage="planning", conversation_id="conv", thread_id="thread", user_id="user"
+ )
+ context.add_metadata(planning_task=planning_future, original_user_input="query")
+
+ bundle.plan_service.pending = True
+ bundle.plan_service.prompt = "Need info"
+
+ orch._execution_contexts["conv"] = context
+
+ async def fast_sleep(delay):
+ return None
+
+ monkeypatch.setattr(
+ "valuecell.core.coordinate.orchestrator.asyncio.sleep", fast_sleep
+ )
+
+ outputs = [
+ resp async for resp in orch._continue_planning("conv", "thread", context)
+ ]
+
+ assert outputs[0].event == SystemResponseEvent.PLAN_REQUIRE_USER_INPUT
+ assert "conv" in bundle.conversation_service.required
+ assert "conv" in orch._execution_contexts
+
+
+@pytest.mark.asyncio
+async def test_continue_planning_executes_plan_when_ready(orchestrator):
+ orch, bundle = orchestrator
+ loop = asyncio.get_event_loop()
+ planning_future = loop.create_future()
+ plan = SimpleNamespace(conversation_id="conv")
+ planning_future.set_result(plan)
+
+ context = ExecutionContext(
+ stage="planning", conversation_id="conv", thread_id="thread", user_id="user"
+ )
+ context.add_metadata(planning_task=planning_future, original_user_input="query")
+ orch._execution_contexts["conv"] = context
+
+ outputs = [
+ resp async for resp in orch._continue_planning("conv", "thread", context)
+ ]
+
+ assert outputs
+ assert outputs[-1].event == SystemResponseEvent.DONE
+ assert "conv" not in orch._execution_contexts
+ assert bundle.task_executor.executed == [(plan, "thread")]
+
+
+@pytest.mark.asyncio
+async def test_cleanup_expired_contexts(orchestrator):
+ orch, bundle = orchestrator
+ loop = asyncio.get_event_loop()
+ planning_future = loop.create_future()
+
+ context = ExecutionContext(
+ stage="planning", conversation_id="conv", thread_id="thread", user_id="user"
+ )
+ context.add_metadata(planning_task=planning_future, original_user_input="query")
+ context.created_at -= DEFAULT_CONTEXT_TIMEOUT_SECONDS + 1
+ orch._execution_contexts["conv"] = context
+
+ await orch._cleanup_expired_contexts(max_age_seconds=ASYNC_SLEEP_INTERVAL)
+
+ assert planning_future.cancelled()
+ assert "conv" in bundle.conversation_service.activated
+ assert "conv" in bundle.plan_service.cleared
+ assert "conv" not in orch._execution_contexts
diff --git a/python/valuecell/core/event/tests/test_event_response_service.py b/python/valuecell/core/event/tests/test_event_response_service.py
new file mode 100644
index 000000000..b346f25f0
--- /dev/null
+++ b/python/valuecell/core/event/tests/test_event_response_service.py
@@ -0,0 +1,148 @@
+from types import SimpleNamespace
+from unittest.mock import AsyncMock
+
+import pytest
+
+from valuecell.core.event.buffer import SaveItem
+from valuecell.core.event.factory import ResponseFactory
+from valuecell.core.event.service import EventResponseService
+from valuecell.core.types import NotifyResponseEvent, Role
+
+
+class DummyBuffer:
+ def __init__(self):
+ self.annotated = []
+ self.ingested = []
+ self.flushed = []
+
+ def annotate(self, response):
+ self.annotated.append(response)
+ return response
+
+ def ingest(self, response):
+ self.ingested.append(response)
+ return [
+ SaveItem(
+ item_id="item-1",
+ event=response.event,
+ conversation_id=response.data.conversation_id,
+ thread_id=response.data.thread_id,
+ task_id=response.data.task_id,
+ payload=response.data.payload,
+ agent_name=response.data.agent_name,
+ metadata=response.data.metadata,
+ role=response.data.role,
+ )
+ ]
+
+ def flush_task(self, conversation_id, thread_id, task_id):
+ self.flushed.append((conversation_id, thread_id, task_id))
+ return [
+ SaveItem(
+ item_id="item-flush",
+ event=NotifyResponseEvent.MESSAGE,
+ conversation_id=conversation_id,
+ thread_id=thread_id,
+ task_id=task_id,
+ payload=None,
+ agent_name=None,
+ metadata=None,
+ role=Role.AGENT,
+ )
+ ]
+
+
+@pytest.fixture()
+def response_factory() -> ResponseFactory:
+ return ResponseFactory()
+
+
+@pytest.fixture()
+def conversation_service() -> AsyncMock:
+ service = AsyncMock()
+ service.add_item = AsyncMock()
+ return service
+
+
+@pytest.fixture()
+def event_service(response_factory: ResponseFactory, conversation_service: AsyncMock):
+ buffer = DummyBuffer()
+ service = EventResponseService(
+ conversation_service=conversation_service,
+ response_factory=response_factory,
+ response_buffer=buffer,
+ )
+ service._buffer = buffer # type: ignore[attr-defined]
+ return service
+
+
+@pytest.mark.asyncio
+async def test_emit_persists_items(event_service: EventResponseService, conversation_service: AsyncMock):
+ response = event_service.factory.message_response_general(
+ event=NotifyResponseEvent.MESSAGE,
+ conversation_id="conv",
+ thread_id="thread",
+ task_id="task",
+ content="hello",
+ agent_name="agent",
+ )
+
+ result = await event_service.emit(response)
+
+ assert result is response
+ conversation_service.add_item.assert_awaited_once()
+ kwargs = conversation_service.add_item.call_args.kwargs
+ assert kwargs["conversation_id"] == "conv"
+ assert kwargs["event"] == NotifyResponseEvent.MESSAGE
+
+
+@pytest.mark.asyncio
+async def test_emit_many(event_service: EventResponseService, conversation_service: AsyncMock):
+ responses = [
+ event_service.factory.message_response_general(
+ event=NotifyResponseEvent.MESSAGE,
+ conversation_id="conv",
+ thread_id="thread",
+ task_id="task",
+ content="one",
+ ),
+ event_service.factory.message_response_general(
+ event=NotifyResponseEvent.MESSAGE,
+ conversation_id="conv",
+ thread_id="thread",
+ task_id="task",
+ content="two",
+ ),
+ ]
+
+ emitted = await event_service.emit_many(responses)
+
+ assert emitted == responses
+ assert conversation_service.add_item.await_count >= 2
+
+
+@pytest.mark.asyncio
+async def test_flush_task_response(event_service: EventResponseService, conversation_service: AsyncMock):
+ await event_service.flush_task_response("conv", "thread", "task")
+
+ conversation_service.add_item.assert_awaited_once()
+ kwargs = conversation_service.add_item.call_args.kwargs
+ assert kwargs["item_id"] == "item-flush"
+
+
+@pytest.mark.asyncio
+async def test_route_task_status(monkeypatch: pytest.MonkeyPatch, event_service: EventResponseService):
+ sentinel = SimpleNamespace(done=True)
+
+ async def fake_handle(factory, task, thread_id, event):
+ return sentinel
+
+ monkeypatch.setattr(
+ "valuecell.core.event.service.handle_status_update", fake_handle
+ )
+
+ result = await event_service.route_task_status(
+ task=SimpleNamespace(), thread_id="thread", event=SimpleNamespace()
+ )
+
+ assert result is sentinel
diff --git a/python/valuecell/core/event/tests/test_response_factory.py b/python/valuecell/core/event/tests/test_response_factory.py
index 64097f963..3c1cfaa0e 100644
--- a/python/valuecell/core/event/tests/test_response_factory.py
+++ b/python/valuecell/core/event/tests/test_response_factory.py
@@ -1,5 +1,8 @@
+import json
+
import pytest
from valuecell.core.event.factory import ResponseFactory
+from valuecell.core.task.models import Task
from valuecell.core.types import (
BaseResponseDataPayload,
CommonResponseEvent,
@@ -107,3 +110,54 @@ def test_tool_call_completed(factory: ResponseFactory):
resp = factory.from_conversation_item(item)
assert resp.event == StreamResponseEvent.TOOL_CALL_COMPLETED
assert resp.data.payload.tool_name == "search" # type: ignore[attr-defined]
+
+
+def test_from_conversation_item_rejects_unknown_event(factory: ResponseFactory):
+ item = ConversationItem.model_construct(
+ item_id="it-1",
+ role=Role.AGENT,
+ agent_name=None,
+ event="unknown_event",
+ conversation_id="sess-1",
+ thread_id="th-1",
+ task_id="tk-1",
+ payload="{}",
+ metadata="{}",
+ )
+ with pytest.raises(ValueError):
+ factory.from_conversation_item(item)
+
+
+def test_schedule_task_controller_component(factory: ResponseFactory):
+ task = Task(
+ task_id="task-123",
+ title="Morning report",
+ query="run",
+ conversation_id="conv",
+ user_id="user",
+ agent_name="agent",
+ )
+
+ resp = factory.schedule_task_controller_component("conv", "thread", task)
+
+ assert resp.data.agent_name == "agent"
+ assert resp.data.metadata == {"task_title": "Morning report"}
+ payload = json.loads(resp.data.payload.content) # type: ignore[attr-defined]
+ assert payload["task_id"] == "task-123"
+
+
+def test_schedule_task_result_component(factory: ResponseFactory):
+ task = Task(
+ task_id="task-456",
+ title="Daily summary",
+ query="run",
+ conversation_id="conv",
+ user_id="user",
+ agent_name="agent",
+ )
+
+ resp = factory.schedule_task_result_component(task, content="{\"result\":1}")
+
+ assert resp.data.agent_name == "agent"
+ assert resp.data.metadata == {"task_title": "Daily summary"}
+ assert resp.data.payload.content == "{\"result\":1}" # type: ignore[attr-defined]
diff --git a/python/valuecell/core/plan/tests/test_service.py b/python/valuecell/core/plan/tests/test_service.py
new file mode 100644
index 000000000..73065f893
--- /dev/null
+++ b/python/valuecell/core/plan/tests/test_service.py
@@ -0,0 +1,94 @@
+import asyncio
+from types import SimpleNamespace
+from unittest.mock import AsyncMock, Mock
+
+import pytest
+
+from valuecell.core.plan.planner import UserInputRequest
+from valuecell.core.plan.service import PlanService, UserInputRegistry
+from valuecell.core.types import UserInput, UserInputMetadata
+
+
+def test_user_input_registry_lifecycle():
+ registry = UserInputRegistry()
+ request = UserInputRequest(prompt="Need clarification")
+
+ registry.add_request("conv-1", request)
+ assert registry.has_request("conv-1") is True
+ assert registry.get_prompt("conv-1") == "Need clarification"
+
+ provided = registry.provide_response("conv-1", "answer")
+ assert provided is True
+ assert request.response == "answer"
+ assert registry.has_request("conv-1") is False
+ assert registry.get_prompt("conv-1") is None
+
+ # Providing a response again should be a no-op
+ assert registry.provide_response("conv-1", "ignored") is False
+
+ registry.add_request("conv-2", request)
+ registry.clear("conv-2")
+ assert registry.has_request("conv-2") is False
+
+
+@pytest.fixture()
+def plan_service() -> PlanService:
+ fake_planner = SimpleNamespace(create_plan=AsyncMock(return_value="plan"))
+ return PlanService(agent_connections=Mock(), execution_planner=fake_planner)
+
+
+def _make_user_input() -> UserInput:
+ return UserInput(
+ query="please run",
+ target_agent_name="agent-x",
+ meta=UserInputMetadata(conversation_id="conv", user_id="user"),
+ )
+
+
+def test_register_and_prompt(plan_service: PlanService):
+ request = UserInputRequest(prompt="fill this")
+ plan_service.register_user_input("conv", request)
+
+ assert plan_service.has_pending_request("conv") is True
+ assert plan_service.get_request_prompt("conv") == "fill this"
+
+
+def test_provide_user_response(plan_service: PlanService):
+ request = UserInputRequest(prompt="fill this")
+ plan_service.register_user_input("conv", request)
+
+ assert plan_service.provide_user_response("conv", "value") is True
+ assert request.response == "value"
+ assert plan_service.has_pending_request("conv") is False
+
+
+def test_clear_pending_request(plan_service: PlanService):
+ request = UserInputRequest(prompt="fill this")
+ plan_service.register_user_input("conv", request)
+
+ plan_service.clear_pending_request("conv")
+ assert plan_service.has_pending_request("conv") is False
+
+
+@pytest.mark.asyncio
+async def test_start_planning_task_uses_asyncio_create_task(
+ plan_service: PlanService, monkeypatch: pytest.MonkeyPatch
+):
+ scheduled_tasks: list[asyncio.Task] = []
+ original_create_task = asyncio.create_task
+
+ def fake_create_task(coro):
+ task = original_create_task(coro)
+ scheduled_tasks.append(task)
+ return task
+
+ monkeypatch.setattr(asyncio, "create_task", fake_create_task)
+
+ user_input = _make_user_input()
+ callback = AsyncMock()
+
+ task = plan_service.start_planning_task(user_input, "thread-1", callback)
+
+ assert scheduled_tasks, "expected create_task to be invoked"
+ await asyncio.sleep(0)
+ task.cancel()
diff --git a/python/valuecell/core/super_agent/tests/test_super_agent.py b/python/valuecell/core/super_agent/tests/test_super_agent.py
index 1f72fb771..4e6816963 100644
--- a/python/valuecell/core/super_agent/tests/test_super_agent.py
+++ b/python/valuecell/core/super_agent/tests/test_super_agent.py
@@ -7,6 +7,7 @@
from valuecell.core.super_agent import core as super_agent_mod
from valuecell.core.super_agent.core import SuperAgent, SuperAgentDecision
+from valuecell.core.super_agent.service import SuperAgentService
from valuecell.core.types import UserInput, UserInputMetadata
@@ -58,3 +59,23 @@ def test_super_agent_prompts_are_non_empty():
assert "" in SUPER_AGENT_INSTRUCTION
assert '"decision"' in SUPER_AGENT_EXPECTED_OUTPUT
+
+
+@pytest.mark.asyncio
+async def test_super_agent_service_delegates_to_underlying_agent():
+ fake_agent = SimpleNamespace(
+ name="Helper",
+ run=AsyncMock(return_value="result"),
+ )
+ service = SuperAgentService(super_agent=fake_agent)
+ user_input = UserInput(
+ query="test",
+ target_agent_name="Helper",
+ meta=UserInputMetadata(conversation_id="conv", user_id="user"),
+ )
+
+ assert service.name == "Helper"
+ outcome = await service.run(user_input)
+
+ assert outcome == "result"
+ fake_agent.run.assert_awaited_once_with(user_input)
diff --git a/python/valuecell/core/task/tests/test_executor.py b/python/valuecell/core/task/tests/test_executor.py
new file mode 100644
index 000000000..32aab5fb4
--- /dev/null
+++ b/python/valuecell/core/task/tests/test_executor.py
@@ -0,0 +1,216 @@
+import json
+from types import SimpleNamespace
+from unittest.mock import AsyncMock
+
+import pytest
+
+from valuecell.core.event.factory import ResponseFactory
+from valuecell.core.task.executor import ScheduledTaskResultAccumulator, TaskExecutor
+from valuecell.core.task.models import ScheduleConfig, Task
+from valuecell.core.task.service import TaskService
+from valuecell.core.types import (
+ CommonResponseEvent,
+ NotifyResponseEvent,
+ StreamResponseEvent,
+ SubagentConversationPhase,
+)
+
+
+class StubEventService:
+ def __init__(self) -> None:
+ self.factory = ResponseFactory()
+ self.emitted: list = []
+ self.flushed: list[tuple[str, str | None, str | None]] = []
+
+ async def emit(self, response):
+ self.emitted.append(response)
+ return response
+
+ async def flush_task_response(self, conversation_id, thread_id, task_id):
+ self.flushed.append((conversation_id, thread_id, task_id))
+
+
+class StubConversationService:
+ def __init__(self) -> None:
+ self.calls: list[tuple[str, str]] = []
+
+ async def ensure_conversation(self, user_id: str, conversation_id: str, agent_name: str):
+ self.calls.append((user_id, conversation_id))
+
+
+@pytest.fixture()
+def task_service() -> TaskService:
+ svc = TaskService(manager=AsyncMock())
+ svc.manager.start_task = AsyncMock(return_value=True)
+ svc.manager.complete_task = AsyncMock(return_value=True)
+ svc.manager.fail_task = AsyncMock(return_value=True)
+ svc.manager.update_task = AsyncMock()
+ return svc
+
+
+def _make_task(schedule: ScheduleConfig | None = None, **overrides) -> Task:
+ defaults = dict(
+ task_id="task-1",
+ title="My Task",
+ query="do it",
+ conversation_id="conv",
+ user_id="user",
+ agent_name="agent",
+ schedule_config=schedule,
+ )
+ defaults.update(overrides)
+ return Task(**defaults)
+
+
+def test_accumulator_passthrough_when_disabled():
+ task = _make_task(schedule=None)
+ accumulator = ScheduledTaskResultAccumulator(task)
+ factory = ResponseFactory()
+
+ message = factory.message_response_general(
+ event=NotifyResponseEvent.MESSAGE,
+ conversation_id="conv",
+ thread_id="thread",
+ task_id="task",
+ content="hello",
+ )
+
+ out = accumulator.consume([message])
+ assert out == [message]
+ assert accumulator.finalize(factory) is None
+
+
+def test_accumulator_collects_and_finalizes_content():
+ schedule = ScheduleConfig(interval_minutes=10)
+ task = _make_task(schedule=schedule)
+ accumulator = ScheduledTaskResultAccumulator(task)
+ factory = ResponseFactory()
+
+ msg = factory.message_response_general(
+ event=StreamResponseEvent.MESSAGE_CHUNK,
+ conversation_id="conv",
+ thread_id="thread",
+ task_id="task",
+ content="chunk",
+ )
+ reasoning = factory.reasoning(
+ conversation_id="conv",
+ thread_id="thread",
+ task_id="task",
+ event=StreamResponseEvent.REASONING,
+ content="thinking",
+ )
+ tool = factory.tool_call(
+ event=StreamResponseEvent.TOOL_CALL_STARTED,
+ conversation_id="conv",
+ thread_id="thread",
+ task_id="task",
+ tool_call_id="tc",
+ tool_name="tool",
+ )
+
+ out = accumulator.consume([msg, reasoning, tool])
+ assert out == []
+
+ final_component = accumulator.finalize(factory)
+ assert final_component is not None
+ payload = json.loads(final_component.data.payload.content) # type: ignore[attr-defined]
+ assert payload["result"] == "chunk"
+ assert "create_time" in payload
+ assert final_component.data.metadata == {"task_title": "My Task"}
+
+
+def test_accumulator_finalize_default_message():
+ schedule = ScheduleConfig(interval_minutes=5)
+ task = _make_task(schedule=schedule)
+ accumulator = ScheduledTaskResultAccumulator(task)
+ factory = ResponseFactory()
+
+ final_component = accumulator.finalize(factory)
+ assert final_component is not None
+ payload = json.loads(final_component.data.payload.content) # type: ignore[attr-defined]
+ assert payload["result"] == "Task completed without output."
+
+
+@pytest.mark.asyncio
+async def test_execute_plan_guidance_message(task_service: TaskService):
+ event_service = StubEventService()
+ executor = TaskExecutor(
+ agent_connections=SimpleNamespace(),
+ task_service=task_service,
+ event_service=event_service,
+ conversation_service=StubConversationService(),
+ )
+
+ plan = SimpleNamespace(
+ plan_id="plan",
+ conversation_id="conv",
+ user_id="user",
+ guidance_message="Please review",
+ tasks=[],
+ )
+
+ responses = [
+ resp async for resp in executor.execute_plan(plan, thread_id="thread")
+ ]
+
+ assert responses[0].event == StreamResponseEvent.MESSAGE_CHUNK
+ assert responses[0].data.payload.content == "Please review" # type: ignore[attr-defined]
+
+
+@pytest.mark.asyncio
+async def test_emit_subagent_conversation_component(task_service: TaskService):
+ event_service = StubEventService()
+ executor = TaskExecutor(
+ agent_connections=SimpleNamespace(),
+ task_service=task_service,
+ event_service=event_service,
+ conversation_service=StubConversationService(),
+ )
+
+ task = _make_task(handoff_from_super_agent=True)
+ component = await executor._emit_subagent_conversation_component(
+ super_agent_conversation_id="super-conv",
+ thread_id="thread",
+ subagent_task=task,
+ component_id="component",
+ phase=SubagentConversationPhase.START,
+ )
+
+ assert component.event == CommonResponseEvent.COMPONENT_GENERATOR
+ emitted_payload = json.loads(component.data.payload.content) # type: ignore[attr-defined]
+ assert emitted_payload["conversation_id"] == task.conversation_id
+ assert emitted_payload["phase"] == SubagentConversationPhase.START.value
+ assert component.data.item_id == "component"
+
+
+@pytest.mark.asyncio
+async def test_sleep_with_cancellation(monkeypatch: pytest.MonkeyPatch, task_service: TaskService):
+ event_service = StubEventService()
+ executor = TaskExecutor(
+ agent_connections=SimpleNamespace(),
+ task_service=task_service,
+ event_service=event_service,
+ conversation_service=StubConversationService(),
+ poll_interval=0.05,
+ )
+
+ class DummyTask:
+ def __init__(self):
+ self.calls = 0
+
+ def is_finished(self):
+ self.calls += 1
+ return self.calls >= 3
+
+ sleeps: list[float] = []
+
+ async def fake_sleep(duration):
+ sleeps.append(duration)
+ return None
+
+ monkeypatch.setattr("valuecell.core.task.executor.asyncio.sleep", fake_sleep)
+
+ await executor._sleep_with_cancellation(DummyTask(), delay=0.2)
+
+ assert sleeps
diff --git a/python/valuecell/core/task/tests/test_service_unit.py b/python/valuecell/core/task/tests/test_service_unit.py
new file mode 100644
index 000000000..689e12027
--- /dev/null
+++ b/python/valuecell/core/task/tests/test_service_unit.py
@@ -0,0 +1,62 @@
+import pytest
+from unittest.mock import AsyncMock
+
+from valuecell.core.task.models import Task
+from valuecell.core.task.service import TaskService
+
+
+@pytest.fixture()
+def manager() -> AsyncMock:
+ m = AsyncMock()
+ m.update_task = AsyncMock()
+ m.start_task = AsyncMock(return_value=True)
+ m.complete_task = AsyncMock(return_value=True)
+ m.fail_task = AsyncMock(return_value=True)
+ m.cancel_task = AsyncMock(return_value=True)
+ m.cancel_conversation_tasks = AsyncMock(return_value=2)
+ return m
+
+
+def _make_task() -> Task:
+ return Task(
+ task_id="task",
+ query="do something",
+ conversation_id="conv",
+ user_id="user",
+ agent_name="agent",
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_task(manager: AsyncMock):
+ service = TaskService(manager=manager)
+ task = _make_task()
+
+ await service.update_task(task)
+
+ manager.update_task.assert_awaited_once_with(task)
+
+
+@pytest.mark.asyncio
+async def test_start_complete_fail_cancel(manager: AsyncMock):
+ service = TaskService(manager=manager)
+
+ assert await service.start_task("task") is True
+ assert await service.complete_task("task") is True
+ assert await service.fail_task("task", "reason") is True
+ assert await service.cancel_task("task") is True
+
+ manager.start_task.assert_awaited_once_with("task")
+ manager.complete_task.assert_awaited_once_with("task")
+ manager.fail_task.assert_awaited_once_with("task", "reason")
+ manager.cancel_task.assert_awaited_once_with("task")
+
+
+@pytest.mark.asyncio
+async def test_cancel_conversation_tasks(manager: AsyncMock):
+ service = TaskService(manager=manager)
+
+ result = await service.cancel_conversation_tasks("conv")
+
+ assert result == 2
+ manager.cancel_conversation_tasks.assert_awaited_once_with("conv")
diff --git a/python/valuecell/core/task/tests/test_temporal.py b/python/valuecell/core/task/tests/test_temporal.py
new file mode 100644
index 000000000..607e14978
--- /dev/null
+++ b/python/valuecell/core/task/tests/test_temporal.py
@@ -0,0 +1,53 @@
+from datetime import datetime as real_datetime
+
+import pytest
+
+from valuecell.core.task.models import ScheduleConfig
+from valuecell.core.task.temporal import calculate_next_execution_delay
+
+
+def test_no_schedule_returns_none():
+ assert calculate_next_execution_delay(None) is None
+
+
+def test_interval_minutes_converted_to_seconds():
+ cfg = ScheduleConfig(interval_minutes=5)
+ assert calculate_next_execution_delay(cfg) == 300
+
+
+@pytest.mark.parametrize(
+ "current_time,daily_time,expected",
+ [
+ (real_datetime(2025, 1, 1, 8, 0, 0), "09:30", 5400),
+ (real_datetime(2025, 1, 1, 20, 0, 0), "07:15", 11 * 3600 + 15 * 60),
+ ],
+)
+def test_daily_time_calculations(
+ current_time: real_datetime,
+ daily_time: str,
+ expected: int,
+ monkeypatch: pytest.MonkeyPatch,
+):
+ class FixedDatetime(real_datetime):
+ @classmethod
+ def now(cls):
+ return current_time
+
+ monkeypatch.setattr("valuecell.core.task.temporal.datetime", FixedDatetime)
+
+ cfg = ScheduleConfig(daily_time=daily_time)
+ delay = calculate_next_execution_delay(cfg)
+
+ assert int(delay) == expected
+
+
+def test_invalid_daily_time_returns_none(monkeypatch: pytest.MonkeyPatch):
+ class FixedDatetime(real_datetime):
+ @classmethod
+ def now(cls):
+ return real_datetime(2025, 1, 1, 8, 0, 0)
+
+ monkeypatch.setattr("valuecell.core.task.temporal.datetime", FixedDatetime)
+
+ cfg = ScheduleConfig(daily_time="bad-input")
+ assert calculate_next_execution_delay(cfg) is None
From bc80eb3703cbdae88c56ba6d73ef66d08e36bb64 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 18:34:11 +0800
Subject: [PATCH 27/30] make format
---
.../tests/test_orchestrator_context.py | 4 +++-
.../event/tests/test_event_response_service.py | 16 ++++++++++++----
.../core/event/tests/test_response_factory.py | 4 ++--
.../valuecell/core/task/tests/test_executor.py | 12 +++++++-----
4 files changed, 24 insertions(+), 12 deletions(-)
diff --git a/python/valuecell/core/coordinate/tests/test_orchestrator_context.py b/python/valuecell/core/coordinate/tests/test_orchestrator_context.py
index 8c55f163a..c9e1e5ed8 100644
--- a/python/valuecell/core/coordinate/tests/test_orchestrator_context.py
+++ b/python/valuecell/core/coordinate/tests/test_orchestrator_context.py
@@ -141,7 +141,9 @@ async def test_continue_planning_invalid_context_triggers_failure(orchestrator):
@pytest.mark.asyncio
-async def test_continue_planning_pending_request_prompts_user(orchestrator, monkeypatch):
+async def test_continue_planning_pending_request_prompts_user(
+ orchestrator, monkeypatch
+):
orch, bundle = orchestrator
loop = asyncio.get_event_loop()
planning_future = loop.create_future()
diff --git a/python/valuecell/core/event/tests/test_event_response_service.py b/python/valuecell/core/event/tests/test_event_response_service.py
index b346f25f0..d9da60c89 100644
--- a/python/valuecell/core/event/tests/test_event_response_service.py
+++ b/python/valuecell/core/event/tests/test_event_response_service.py
@@ -77,7 +77,9 @@ def event_service(response_factory: ResponseFactory, conversation_service: Async
@pytest.mark.asyncio
-async def test_emit_persists_items(event_service: EventResponseService, conversation_service: AsyncMock):
+async def test_emit_persists_items(
+ event_service: EventResponseService, conversation_service: AsyncMock
+):
response = event_service.factory.message_response_general(
event=NotifyResponseEvent.MESSAGE,
conversation_id="conv",
@@ -97,7 +99,9 @@ async def test_emit_persists_items(event_service: EventResponseService, conversa
@pytest.mark.asyncio
-async def test_emit_many(event_service: EventResponseService, conversation_service: AsyncMock):
+async def test_emit_many(
+ event_service: EventResponseService, conversation_service: AsyncMock
+):
responses = [
event_service.factory.message_response_general(
event=NotifyResponseEvent.MESSAGE,
@@ -122,7 +126,9 @@ async def test_emit_many(event_service: EventResponseService, conversation_servi
@pytest.mark.asyncio
-async def test_flush_task_response(event_service: EventResponseService, conversation_service: AsyncMock):
+async def test_flush_task_response(
+ event_service: EventResponseService, conversation_service: AsyncMock
+):
await event_service.flush_task_response("conv", "thread", "task")
conversation_service.add_item.assert_awaited_once()
@@ -131,7 +137,9 @@ async def test_flush_task_response(event_service: EventResponseService, conversa
@pytest.mark.asyncio
-async def test_route_task_status(monkeypatch: pytest.MonkeyPatch, event_service: EventResponseService):
+async def test_route_task_status(
+ monkeypatch: pytest.MonkeyPatch, event_service: EventResponseService
+):
sentinel = SimpleNamespace(done=True)
async def fake_handle(factory, task, thread_id, event):
diff --git a/python/valuecell/core/event/tests/test_response_factory.py b/python/valuecell/core/event/tests/test_response_factory.py
index 3c1cfaa0e..7c29e0060 100644
--- a/python/valuecell/core/event/tests/test_response_factory.py
+++ b/python/valuecell/core/event/tests/test_response_factory.py
@@ -156,8 +156,8 @@ def test_schedule_task_result_component(factory: ResponseFactory):
agent_name="agent",
)
- resp = factory.schedule_task_result_component(task, content="{\"result\":1}")
+ resp = factory.schedule_task_result_component(task, content='{"result":1}')
assert resp.data.agent_name == "agent"
assert resp.data.metadata == {"task_title": "Daily summary"}
- assert resp.data.payload.content == "{\"result\":1}" # type: ignore[attr-defined]
+ assert resp.data.payload.content == '{"result":1}' # type: ignore[attr-defined]
diff --git a/python/valuecell/core/task/tests/test_executor.py b/python/valuecell/core/task/tests/test_executor.py
index 32aab5fb4..f8389b9b2 100644
--- a/python/valuecell/core/task/tests/test_executor.py
+++ b/python/valuecell/core/task/tests/test_executor.py
@@ -34,7 +34,9 @@ class StubConversationService:
def __init__(self) -> None:
self.calls: list[tuple[str, str]] = []
- async def ensure_conversation(self, user_id: str, conversation_id: str, agent_name: str):
+ async def ensure_conversation(
+ self, user_id: str, conversation_id: str, agent_name: str
+ ):
self.calls.append((user_id, conversation_id))
@@ -150,9 +152,7 @@ async def test_execute_plan_guidance_message(task_service: TaskService):
tasks=[],
)
- responses = [
- resp async for resp in executor.execute_plan(plan, thread_id="thread")
- ]
+ responses = [resp async for resp in executor.execute_plan(plan, thread_id="thread")]
assert responses[0].event == StreamResponseEvent.MESSAGE_CHUNK
assert responses[0].data.payload.content == "Please review" # type: ignore[attr-defined]
@@ -185,7 +185,9 @@ async def test_emit_subagent_conversation_component(task_service: TaskService):
@pytest.mark.asyncio
-async def test_sleep_with_cancellation(monkeypatch: pytest.MonkeyPatch, task_service: TaskService):
+async def test_sleep_with_cancellation(
+ monkeypatch: pytest.MonkeyPatch, task_service: TaskService
+):
event_service = StubEventService()
executor = TaskExecutor(
agent_connections=SimpleNamespace(),
From a58b646ec59ff57b951f288f7ea853a16d4d3fc4 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Mon, 27 Oct 2025 18:36:59 +0800
Subject: [PATCH 28/30] add type hint
---
python/valuecell/core/task/temporal.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/python/valuecell/core/task/temporal.py b/python/valuecell/core/task/temporal.py
index 55ccd06ac..0ac529b9b 100644
--- a/python/valuecell/core/task/temporal.py
+++ b/python/valuecell/core/task/temporal.py
@@ -3,8 +3,10 @@
from loguru import logger
+from valuecell.core.task.models import ScheduleConfig
-def calculate_next_execution_delay(schedule_config) -> Optional[float]:
+
+def calculate_next_execution_delay(schedule_config: ScheduleConfig) -> Optional[float]:
"""Calculate the delay in seconds until the next scheduled execution.
Args:
From 5264b942665e4467a5f4d42e74df4338ca7eb3df Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Tue, 28 Oct 2025 14:52:22 +0800
Subject: [PATCH 29/30] refactor: move tests
---
python/valuecell/{ => core/plan}/tests/test_planner_thread_id.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename python/valuecell/{ => core/plan}/tests/test_planner_thread_id.py (100%)
diff --git a/python/valuecell/tests/test_planner_thread_id.py b/python/valuecell/core/plan/tests/test_planner_thread_id.py
similarity index 100%
rename from python/valuecell/tests/test_planner_thread_id.py
rename to python/valuecell/core/plan/tests/test_planner_thread_id.py
From c82ec98ca1513eaaff0cb7f1f61e76f929a8b147 Mon Sep 17 00:00:00 2001
From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com>
Date: Tue, 28 Oct 2025 15:07:39 +0800
Subject: [PATCH 30/30] refactor: disable Crawl4AI tools temporarily
---
python/valuecell/core/super_agent/core.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/python/valuecell/core/super_agent/core.py b/python/valuecell/core/super_agent/core.py
index d9b7b03bb..099f7a235 100644
--- a/python/valuecell/core/super_agent/core.py
+++ b/python/valuecell/core/super_agent/core.py
@@ -4,7 +4,6 @@
from agno.agent import Agent
from agno.db.in_memory import InMemoryDb
-from agno.tools.crawl4ai import Crawl4aiTools
from pydantic import BaseModel, Field
from valuecell.core.super_agent.prompts import (
@@ -46,7 +45,8 @@ class SuperAgent:
def __init__(self) -> None:
self.agent = Agent(
model=get_model("PLANNER_MODEL_ID"),
- tools=[Crawl4aiTools()],
+ # TODO: enable tools when needed
+ # tools=[Crawl4aiTools()],
markdown=False,
debug_mode=agent_debug_mode_enabled(),
instructions=[SUPER_AGENT_INSTRUCTION],