From 88c79d7891edbbc2bb0c035eed70212e66d5e435 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Tue, 18 Nov 2025 16:38:03 +0800 Subject: [PATCH 01/15] refactor: enhance guidelines for type safety and function design --- AGENTS.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/AGENTS.md b/AGENTS.md index d9791ae37..7bd51f944 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -99,6 +99,7 @@ async def critical_operation() -> None: * Add type hints across public and internal APIs. * Comments and docstrings should be in English and explain why, not only what. * Use Protocols and TypedDict or pydantic models where appropriate. +* Avoid excessive literal dict access (for example, using `obj['key']` everywhere); prefer typed structures such as `dataclass`, pydantic models, or `TypedDict` for clearer contracts and better type safety. ### Error Handling @@ -124,6 +125,7 @@ def parse_payload(raw: str) -> dict: * Avoid nested functions; extract helpers at module level. * Keep functions under 200 lines. Split into well-named helpers. +* Avoid functions with more than 10 parameters; prefer wrapping parameters in a struct or object. * Separate concerns: I/O, parsing, business logic, and orchestration. ### Strings and Literals From 43f1d8b0a6730e61a4035bc903d2cb41a2497461 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Tue, 18 Nov 2025 16:49:27 +0800 Subject: [PATCH 02/15] refactor: remove obsolete tests for StrategyAgent --- .../agents/strategy_agent/tests/__init__.py | 1 - .../agents/strategy_agent/tests/test_agent.py | 47 ------------------- 2 files changed, 48 deletions(-) delete mode 100644 python/valuecell/agents/strategy_agent/tests/__init__.py delete mode 100644 python/valuecell/agents/strategy_agent/tests/test_agent.py diff --git a/python/valuecell/agents/strategy_agent/tests/__init__.py b/python/valuecell/agents/strategy_agent/tests/__init__.py deleted file mode 100644 index 616ed7f21..000000000 --- a/python/valuecell/agents/strategy_agent/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Tests for strategy_agent diff --git a/python/valuecell/agents/strategy_agent/tests/test_agent.py b/python/valuecell/agents/strategy_agent/tests/test_agent.py deleted file mode 100644 index ba11f0429..000000000 --- a/python/valuecell/agents/strategy_agent/tests/test_agent.py +++ /dev/null @@ -1,47 +0,0 @@ -import asyncio -import json -import os -from pprint import pprint - -from valuecell.agents.strategy_agent.agent import StrategyAgent - - -# @pytest.mark.asyncio -async def strategy_agent_basic_stream(): - """Test basic functionality of StrategyAgent stream method.""" - agent = StrategyAgent() - - # Prepare a valid JSON query based on UserRequest structure - query = json.dumps( - { - "llm_model_config": { - "provider": "openrouter", - "model_id": "deepseek/deepseek-v3.1-terminus", - "api_key": os.getenv("OPENROUTER_API_KEY"), - }, - "exchange_config": { - "exchange_id": "binance", - "trading_mode": "virtual", - "api_key": "test-exchange-key", - "secret_key": "test-secret-key", - }, - "trading_config": { - "strategy_name": "Test Strategy", - "initial_capital": 10000.0, - "max_leverage": 5.0, - "max_positions": 5, - "symbols": ["BTC/USDT", "ETH/USDT", "SOL/USDT"], - "decide_interval": 60, - "template_id": "aggressive", - "custom_prompt": "no custom prompt", - }, - } - ) - - async for response in agent.stream(query, "test-conversation", "test-task"): - pprint(response.metadata) - pprint(json.loads(response.content)) - print("\n\n") - - -asyncio.run(strategy_agent_basic_stream()) From 456041e629969ace99f4cbc2aeeb2d4abf552092 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Tue, 18 Nov 2025 17:44:47 +0800 Subject: [PATCH 03/15] refactor StrategyAgent to be customizable and modular --- .../configs/agent_cards/strategy_agent.json | 2 +- .../agents/strategy_agent/__main__.py | 2 +- .../strategy_agent/_internal/__init__.py | 1 + .../_internal/stream_controller.py | 200 ++++++++++++ .../valuecell/agents/strategy_agent/agent.py | 285 ++++++++++-------- .../valuecell/agents/strategy_agent/core.py | 129 ++------ .../agents/strategy_agent/data/market.py | 20 +- .../strategy_agent/decision/composer.py | 22 +- .../strategy_agent/execution/interfaces.py | 11 + .../strategy_agent/execution/paper_trading.py | 4 + .../features/{simple.py => candle.py} | 4 +- .../strategy_agent/features/interfaces.py | 27 +- .../exchanges.py => features/multimodal.py} | 0 .../{multimodal_analysis.py => news.py} | 0 .../strategy_agent/features/news_analysis.py | 0 .../strategy_agent/features/pipeline.py | 86 ++++++ .../features/technical_indicators.py | 0 .../valuecell/agents/strategy_agent/models.py | 1 - .../agents/strategy_agent/prompt_agent.py | 47 +++ .../agents/strategy_agent/runtime.py | 213 ++++--------- .../trading_history/interfaces.py | 5 + .../trading_history/recorder.py | 8 +- .../valuecell/agents/strategy_agent/utils.py | 63 ++++ 23 files changed, 738 insertions(+), 392 deletions(-) create mode 100644 python/valuecell/agents/strategy_agent/_internal/__init__.py create mode 100644 python/valuecell/agents/strategy_agent/_internal/stream_controller.py rename python/valuecell/agents/strategy_agent/features/{simple.py => candle.py} (97%) rename python/valuecell/agents/strategy_agent/{execution/exchanges.py => features/multimodal.py} (100%) rename python/valuecell/agents/strategy_agent/features/{multimodal_analysis.py => news.py} (100%) delete mode 100644 python/valuecell/agents/strategy_agent/features/news_analysis.py create mode 100644 python/valuecell/agents/strategy_agent/features/pipeline.py delete mode 100644 python/valuecell/agents/strategy_agent/features/technical_indicators.py create mode 100644 python/valuecell/agents/strategy_agent/prompt_agent.py diff --git a/python/configs/agent_cards/strategy_agent.json b/python/configs/agent_cards/strategy_agent.json index 4256f8b29..76a78d4ba 100644 --- a/python/configs/agent_cards/strategy_agent.json +++ b/python/configs/agent_cards/strategy_agent.json @@ -26,6 +26,6 @@ "author": "ValueCell Team", "tags": ["strategy", "trading", "llm", "demo"], "notes": "This card is a lightweight example; replace model api_key and tune parameters for production use.", - "local_agent_class": "valuecell.agents.strategy_agent.agent:StrategyAgent" + "local_agent_class": "valuecell.agents.strategy_agent.prompt_agent:StrategyAgent" } } diff --git a/python/valuecell/agents/strategy_agent/__main__.py b/python/valuecell/agents/strategy_agent/__main__.py index 0f6d27cf7..6c2019369 100644 --- a/python/valuecell/agents/strategy_agent/__main__.py +++ b/python/valuecell/agents/strategy_agent/__main__.py @@ -2,7 +2,7 @@ from valuecell.core.agent import create_wrapped_agent -from .agent import StrategyAgent +from .prompt_agent import StrategyAgent if __name__ == "__main__": agent = create_wrapped_agent(StrategyAgent) diff --git a/python/valuecell/agents/strategy_agent/_internal/__init__.py b/python/valuecell/agents/strategy_agent/_internal/__init__.py new file mode 100644 index 000000000..bf4ab9c4c --- /dev/null +++ b/python/valuecell/agents/strategy_agent/_internal/__init__.py @@ -0,0 +1 @@ +"""Internal orchestration utilities for strategy agent runtime.""" diff --git a/python/valuecell/agents/strategy_agent/_internal/stream_controller.py b/python/valuecell/agents/strategy_agent/_internal/stream_controller.py new file mode 100644 index 000000000..f6a48b7c2 --- /dev/null +++ b/python/valuecell/agents/strategy_agent/_internal/stream_controller.py @@ -0,0 +1,200 @@ +"""Stream controller for strategy agent lifecycle and persistence orchestration. + +This module encapsulates the stream/persistence/lifecycle logic so that users +developing custom strategies only need to focus on decision logic, data sources, +and features. +""" + +from __future__ import annotations + +import asyncio +from datetime import datetime +from enum import Enum +from typing import TYPE_CHECKING + +from loguru import logger + +from valuecell.server.services import strategy_persistence + +from ..utils import get_current_timestamp_ms + +if TYPE_CHECKING: + from ..core import DecisionCycleResult + from ..runtime import StrategyRuntime + + +class ControllerState(str, Enum): + """Internal state machine for stream controller.""" + + INITIALIZING = "INITIALIZING" + WAITING_RUNNING = "WAITING_RUNNING" + RUNNING = "RUNNING" + STOPPED = "STOPPED" + + +class StreamController: + """Orchestrates strategy lifecycle, streaming, and persistence. + + This controller manages: + - State transitions (INITIALIZING -> WAITING_RUNNING -> RUNNING -> STOPPED) + - Persistence of initial state, cycle results, and finalization + - Waiting for external "running" signal from persistence layer + """ + + def __init__(self, strategy_id: str, timeout_s: int = 300) -> None: + self.strategy_id = strategy_id + self.timeout_s = timeout_s + self._state = ControllerState.INITIALIZING + + @property + def state(self) -> ControllerState: + """Current controller state.""" + return self._state + + def transition_to(self, new_state: ControllerState) -> None: + """Transition to a new state.""" + logger.info( + "StreamController for strategy={}: {} -> {}", + self.strategy_id, + self._state.value, + new_state.value, + ) + self._state = new_state + + async def wait_running(self) -> None: + """Wait until persistence marks strategy as running or timeout. + + Transitions from WAITING_RUNNING to RUNNING when successful. + Swallows exceptions to avoid nested error handling. + """ + self.transition_to(ControllerState.WAITING_RUNNING) + since = datetime.now() + try: + while not strategy_persistence.strategy_running(self.strategy_id): + elapsed = (datetime.now() - since).total_seconds() + if elapsed > self.timeout_s: + logger.warning( + "Timeout waiting for strategy_id={} to be marked as running ({}s)", + self.strategy_id, + self.timeout_s, + ) + break + await asyncio.sleep(1) + logger.info( + "Waiting for strategy_id={} to be marked as running", + self.strategy_id, + ) + except Exception: + logger.exception( + "Error while waiting for strategy {} to be marked running", + self.strategy_id, + ) + self.transition_to(ControllerState.RUNNING) + + def persist_initial_state(self, runtime: StrategyRuntime) -> None: + """Persist initial portfolio snapshot and strategy summary. + + Logs and swallows errors to keep controller resilient. + """ + try: + initial_portfolio = runtime.coordinator.portfolio_service.get_view() + try: + initial_portfolio.strategy_id = self.strategy_id + except Exception: + pass + + ok = strategy_persistence.persist_portfolio_view(initial_portfolio) + if ok: + logger.info( + "Persisted initial portfolio view for strategy={}", self.strategy_id + ) + + timestamp_ms = get_current_timestamp_ms() + initial_summary = runtime.coordinator.build_summary(timestamp_ms, []) + ok = strategy_persistence.persist_strategy_summary(initial_summary) + if ok: + logger.info( + "Persisted initial strategy summary for strategy={}", + self.strategy_id, + ) + except Exception: + logger.exception( + "Failed to persist initial portfolio/summary for {}", self.strategy_id + ) + + def persist_cycle_results(self, result: DecisionCycleResult) -> None: + """Persist trades, portfolio view, and strategy summary for a cycle. + + Errors are logged but not raised to keep the decision loop resilient. + """ + try: + for trade in result.trades: + item = strategy_persistence.persist_trade_history( + self.strategy_id, trade + ) + if item: + logger.info( + "Persisted trade {} for strategy={}", + trade.trade_id, + self.strategy_id, + ) + + ok = strategy_persistence.persist_portfolio_view(result.portfolio_view) + if ok: + logger.info( + "Persisted portfolio view for strategy={}", self.strategy_id + ) + + ok = strategy_persistence.persist_strategy_summary(result.strategy_summary) + if ok: + logger.info( + "Persisted strategy summary for strategy={}", self.strategy_id + ) + except Exception: + logger.exception("Error persisting cycle results for {}", self.strategy_id) + + async def finalize( + self, runtime: StrategyRuntime, reason: str = "normal_exit" + ) -> None: + """Finalize strategy: close resources and mark as stopped. + + Args: + runtime: The strategy runtime to finalize + reason: Reason for stopping (e.g., 'normal_exit', 'cancelled', 'error') + """ + self.transition_to(ControllerState.STOPPED) + # Close runtime resources (e.g., CCXT exchange) + try: + await runtime.coordinator.close() + logger.info( + "Closed runtime coordinator resources for strategy {} (reason: {})", + self.strategy_id, + reason, + ) + except Exception: + logger.exception( + "Failed to close runtime resources for strategy {}", self.strategy_id + ) + + # Mark strategy as stopped in persistence + try: + strategy_persistence.mark_strategy_stopped(self.strategy_id) + logger.info( + "Marked strategy {} as stopped (reason: {})", self.strategy_id, reason + ) + except Exception: + logger.exception( + "Failed to mark strategy stopped for {} (reason: {})", + self.strategy_id, + reason, + ) + + def is_running(self) -> bool: + """Check if strategy is still running according to persistence layer.""" + try: + return strategy_persistence.strategy_running(self.strategy_id) + except Exception: + logger.warning( + "Error checking running status for strategy {}", self.strategy_id + ) + return False diff --git a/python/valuecell/agents/strategy_agent/agent.py b/python/valuecell/agents/strategy_agent/agent.py index 33ddc6b13..de8eaf50d 100644 --- a/python/valuecell/agents/strategy_agent/agent.py +++ b/python/valuecell/agents/strategy_agent/agent.py @@ -1,111 +1,118 @@ from __future__ import annotations import asyncio -from datetime import datetime -from typing import AsyncGenerator, Dict, Optional +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, AsyncGenerator, Dict, Optional from loguru import logger from valuecell.core.agent.responses import streaming from valuecell.core.types import BaseAgent, StreamResponse -from valuecell.server.services import strategy_persistence +from ._internal.stream_controller import StreamController from .models import ( ComponentType, StrategyStatus, StrategyStatusContent, UserRequest, ) -from .runtime import create_strategy_runtime_async +from .runtime import create_strategy_runtime +if TYPE_CHECKING: + from .decision.interfaces import Composer + from .features.interfaces import FeaturesPipeline + from .runtime import DecisionCycleResult, StrategyRuntime -class StrategyAgent(BaseAgent): - """Top-level Strategy Agent integrating the decision coordinator.""" - async def _wait_until_marked_running( - self, strategy_id: str, timeout_s: int = 300 - ) -> None: - """Wait until persistence marks the strategy as running or timeout. +class BaseStrategyAgent(BaseAgent, ABC): + """Abstract base class for strategy agents. + + Users should subclass this and implement: + - _build_features_pipeline: Define feature computation logic + - _create_decision_composer: Define decision composer (optional, defaults to LLM) + - _on_start: Custom initialization after runtime creation (optional) + - _on_cycle_result: Hook for post-cycle custom logic (optional) + - _on_stop: Custom cleanup before finalization (optional) + + The base class handles: + - Stream lifecycle and state transitions + - Persistence orchestration (initial state, cycle results, finalization) + - Error handling and resource cleanup + """ + + @abstractmethod + def _build_features_pipeline(self, request: UserRequest) -> FeaturesPipeline | None: + """Build the features pipeline for the strategy. - This helper logs progress and returns when either the strategy is running - or the timeout elapses. It swallows exceptions from the persistence layer - to avoid bubbling nested try/except into `stream`. + Return a `FeaturesPipeline` implementation to customize how market data + and feature vectors are produced for each decision cycle. Returning + ``None`` instructs the runtime to use the default pipeline. + + Args: + request: The user request with strategy configuration + + Returns: + FeaturesPipeline instance or None for default behaviour """ - since = datetime.now() - try: - while not strategy_persistence.strategy_running(strategy_id): - if (datetime.now() - since).total_seconds() > timeout_s: - logger.error( - "Timeout waiting for strategy_id={} to be marked as running", - strategy_id, - ) - break + raise NotImplementedError - await asyncio.sleep(1) - logger.info( - "Waiting for strategy_id={} to be marked as running", strategy_id - ) - except Exception: - # Avoid raising from persistence checks; we still proceed to start the runtime. - logger.exception( - "Error while waiting for strategy {} to be marked running", strategy_id - ) + def _create_decision_composer(self, request: UserRequest) -> Composer | None: + """Build the decision composer for the strategy. - def _persist_initial_state(self, runtime, strategy_id: str) -> None: - """Persist initial portfolio snapshot and strategy summary. + Override to provide a custom composer. Return None to use default LLM composer. - This helper captures and logs any errors internally so callers don't need - additional try/except nesting. + Args: + request: The user request with strategy configuration + + Returns: + Composer instance or None for default composer """ - try: - initial_portfolio = runtime.coordinator._portfolio_service.get_view() - try: - initial_portfolio.strategy_id = strategy_id - except Exception: - pass + return None - ok = strategy_persistence.persist_portfolio_view(initial_portfolio) - if ok: - logger.info( - "Persisted initial portfolio view for strategy={}", strategy_id - ) + def _on_start(self, runtime: StrategyRuntime, request: UserRequest) -> None: + """Hook called after runtime creation, before first cycle. - timestamp_ms = int(runtime.coordinator._clock().timestamp() * 1000) - initial_summary = runtime.coordinator._build_summary(timestamp_ms, []) - ok = strategy_persistence.persist_strategy_summary(initial_summary) - if ok: - logger.info( - "Persisted initial strategy summary for strategy={}", strategy_id - ) - except Exception: - logger.exception( - "Failed to persist initial portfolio/summary for {}", strategy_id - ) + Use for custom initialization, caching, or metric registration. + Exceptions are logged but don't prevent runtime startup. + + Args: + runtime: The created strategy runtime + request: The user request + """ + pass - def _persist_cycle_results(self, strategy_id: str, result) -> None: - """Persist trades, portfolio view and strategy summary for a cycle. + def _on_cycle_result( + self, + result: DecisionCycleResult, + runtime: StrategyRuntime, + request: UserRequest, + ) -> None: + """Hook called after each decision cycle completes. - Errors are logged but not raised to keep the decision loop resilient. + Non-blocking; exceptions are swallowed and logged. + Use for custom metrics, logging, or side effects. + + Args: + result: The DecisionCycleResult from the cycle + runtime: The strategy runtime + request: The user request """ - try: - for trade in result.trades: - item = strategy_persistence.persist_trade_history(strategy_id, trade) - if item: - logger.info( - "Persisted trade {} for strategy={}", - getattr(trade, "trade_id", None), - strategy_id, - ) + pass - ok = strategy_persistence.persist_portfolio_view(result.portfolio_view) - if ok: - logger.info("Persisted portfolio view for strategy={}", strategy_id) + def _on_stop( + self, runtime: StrategyRuntime, request: UserRequest, reason: str + ) -> None: + """Hook called before finalization when strategy stops. - ok = strategy_persistence.persist_strategy_summary(result.strategy_summary) - if ok: - logger.info("Persisted strategy summary for strategy={}", strategy_id) - except Exception: - logger.exception("Error persisting cycle results for {}", strategy_id) + Use for cleanup or final reporting. + Exceptions are logged but don't prevent finalization. + + Args: + runtime: The strategy runtime + request: The user request + reason: Reason for stopping (e.g., 'normal_exit', 'cancelled', 'error') + """ + pass async def stream( self, @@ -114,6 +121,16 @@ async def stream( task_id: str, dependencies: Optional[Dict] = None, ) -> AsyncGenerator[StreamResponse, None]: + """Stream strategy execution with lifecycle management. + + Handles: + - Request parsing and validation + - Runtime creation with custom hooks + - State transitions and persistence + - Decision loop execution + - Resource cleanup and finalization + """ + # Parse and validate request try: request = UserRequest.model_validate_json(query) except ValueError as exc: @@ -122,7 +139,8 @@ async def stream( yield streaming.done() return - runtime = await create_strategy_runtime_async(request) + # Create runtime (calls _build_decision, _build_features_pipeline internally) + runtime = await self._create_runtime(request) strategy_id = runtime.strategy_id logger.info( "Created runtime for strategy_id={} conversation={} task={}", @@ -130,6 +148,11 @@ async def stream( conversation_id, task_id, ) + + # Initialize stream controller + controller = StreamController(strategy_id) + + # Emit initial RUNNING status initial_payload = StrategyStatusContent( strategy_id=strategy_id, status=StrategyStatus.RUNNING, @@ -140,28 +163,38 @@ async def stream( ) # Wait until strategy is marked as running in persistence layer - await self._wait_until_marked_running(strategy_id) + await controller.wait_running() + + # Call user hook for custom initialization + try: + self._on_start(runtime, request) + except Exception: + logger.exception("Error in _on_start hook for strategy {}", strategy_id) try: logger.info("Starting decision loop for strategy_id={}", strategy_id) - # Persist initial portfolio snapshot and strategy summary before entering the loop - self._persist_initial_state(runtime, strategy_id) - while True: - if not strategy_persistence.strategy_running(strategy_id): - logger.info( - "Strategy_id={} is no longer running, exiting decision loop", - strategy_id, - ) - break + # Persist initial portfolio snapshot and strategy summary + controller.persist_initial_state(runtime) + # Main decision loop + while controller.is_running(): result = await runtime.run_cycle() logger.info( "Run cycle completed for strategy={} trades_count={}", strategy_id, len(result.trades), ) - # Persist and stream cycle results (trades, portfolio, summary) - self._persist_cycle_results(strategy_id, result) + + # Persist cycle results + controller.persist_cycle_results(result) + + # Call user hook for post-cycle logic + try: + self._on_cycle_result(result, runtime, request) + except Exception: + logger.exception( + "Error in _on_cycle_result hook for strategy {}", strategy_id + ) logger.info( "Waiting for next decision cycle for strategy_id={}, interval={}seconds", @@ -170,43 +203,53 @@ async def stream( ) await asyncio.sleep(request.trading_config.decide_interval) + logger.info( + "Strategy_id={} is no longer running, exiting decision loop", + strategy_id, + ) + stop_reason = "normal_exit" + except asyncio.CancelledError: - # Ensure strategy is marked stopped on cancellation - try: - strategy_persistence.mark_strategy_stopped(strategy_id) - logger.info( - "Marked strategy {} as stopped due to cancellation", strategy_id - ) - except Exception: - logger.exception( - "Failed to mark strategy stopped for {} on cancellation", - strategy_id, - ) + stop_reason = "cancelled" + logger.info("Strategy {} cancelled", strategy_id) raise except Exception as err: # noqa: BLE001 + stop_reason = "error" logger.exception("StrategyAgent stream failed: {}", err) yield streaming.message_chunk(f"StrategyAgent error: {err}") finally: - # Close runtime resources (e.g., CCXT exchange) before marking stopped - try: - if hasattr(runtime, "coordinator") and hasattr( - runtime.coordinator, "close" - ): - await runtime.coordinator.close() - logger.info( - "Closed runtime coordinator resources for strategy {}", - strategy_id, - ) - except Exception: - logger.exception( - "Failed to close runtime resources for strategy {}", strategy_id - ) - # Always mark strategy as stopped when stream ends for any reason + # Call user hook before finalization try: - strategy_persistence.mark_strategy_stopped(strategy_id) - logger.info("Marked strategy {} as stopped in finalizer", strategy_id) + self._on_stop(runtime, request, stop_reason) except Exception: - logger.exception( - "Failed to mark strategy stopped for {} in finalizer", strategy_id - ) + logger.exception("Error in _on_stop hook for strategy {}", strategy_id) + + # Finalize: close resources and mark stopped + await controller.finalize(runtime, reason=stop_reason) yield streaming.done() + + async def _create_runtime(self, request: UserRequest) -> StrategyRuntime: + """Create strategy runtime with custom components. + + Calls user hooks to build custom decision composer and features pipeline. + Falls back to defaults if hooks return None. + + Args: + request: User request with strategy configuration + + Returns: + StrategyRuntime instance + """ + # Let user build custom composer (or None for default) + composer = self._create_decision_composer(request) + + # Let user build custom features pipeline (or None for default) + # The coordinator invokes this pipeline each cycle to fetch data + # and compute the feature vectors consumed by the decision step. + features_pipeline = self._build_features_pipeline(request) + + # Create runtime with custom components + # The runtime factory will use defaults if composer/features are None + return await create_strategy_runtime( + request, composer=composer, features_pipeline=features_pipeline + ) diff --git a/python/valuecell/agents/strategy_agent/core.py b/python/valuecell/agents/strategy_agent/core.py index 6ed0c5454..3a7e30f1a 100644 --- a/python/valuecell/agents/strategy_agent/core.py +++ b/python/valuecell/agents/strategy_agent/core.py @@ -2,17 +2,15 @@ from abc import ABC, abstractmethod from dataclasses import dataclass -from datetime import datetime, timezone -from typing import Callable, List +from typing import List from loguru import logger from valuecell.utils.uuid import generate_uuid -from .data.interfaces import MarketDataSource from .decision.interfaces import Composer from .execution.interfaces import ExecutionGateway -from .features.interfaces import FeatureComputer +from .features.interfaces import FeaturesPipeline from .models import ( ComposeContext, FeatureVector, @@ -33,6 +31,10 @@ ) from .portfolio.interfaces import PortfolioService from .trading_history.interfaces import DigestBuilder, HistoryRecorder +from .utils import ( + fetch_free_cash_from_gateway, + get_current_timestamp_ms, +) @dataclass @@ -71,11 +73,10 @@ async def run_once(self) -> DecisionCycleResult: """Execute one decision cycle and return the result.""" raise NotImplementedError - -def _default_clock() -> datetime: - """Return current time in UTC.""" - - return datetime.now(timezone.utc) + @abstractmethod + async def close(self) -> None: + """Release any held resources.""" + raise NotImplementedError class DefaultDecisionCoordinator(DecisionCoordinator): @@ -87,82 +88,37 @@ def __init__( request: UserRequest, strategy_id: str, portfolio_service: PortfolioService, - market_data_source: MarketDataSource, - feature_computer: FeatureComputer, + features_pipeline: FeaturesPipeline, composer: Composer, execution_gateway: ExecutionGateway, history_recorder: HistoryRecorder, digest_builder: DigestBuilder, - prompt_provider: Callable[[UserRequest], str], - history_limit: int = 200, ) -> None: self._request = request self.strategy_id = strategy_id - self._portfolio_service = portfolio_service - self._market_data_source = market_data_source - self._feature_computer = feature_computer + self.portfolio_service = portfolio_service + self._features_pipeline = features_pipeline self._composer = composer self._execution_gateway = execution_gateway self._history_recorder = history_recorder self._digest_builder = digest_builder - self._history_limit = max(history_limit, 1) self._symbols = list(dict.fromkeys(request.trading_config.symbols)) - # prompt_provider is a required parameter (caller must supply a prompt builder) - self._prompt_provider = prompt_provider - # Use the default clock internally; clock is not a constructor parameter - self._clock = _default_clock - self._history_records: List[HistoryRecord] = [] self._realized_pnl: float = 0.0 self._unrealized_pnl: float = 0.0 self._cycle_index: int = 0 self._strategy_name = request.trading_config.strategy_name or strategy_id async def run_once(self) -> DecisionCycleResult: - timestamp_ms = int(self._clock().timestamp() * 1000) + timestamp_ms = get_current_timestamp_ms() compose_id = generate_uuid("compose") - portfolio = self._portfolio_service.get_view() + portfolio = self.portfolio_service.get_view() # LIVE mode: sync cash from exchange free balance; set buying power to cash try: - if ( - self._request.exchange_config.trading_mode == TradingMode.LIVE - and hasattr(self._execution_gateway, "fetch_balance") - ): - balance = await self._execution_gateway.fetch_balance() - free_map = {} - free_section = ( - balance.get("free") if isinstance(balance, dict) else None + if self._request.exchange_config.trading_mode == TradingMode.LIVE: + free_cash = await fetch_free_cash_from_gateway( + self._execution_gateway, self._symbols ) - if isinstance(free_section, dict): - free_map = { - str(k).upper(): float(v or 0.0) for k, v in free_section.items() - } - else: - # Handle nested per-currency dict shapes - iterable = balance.items() if isinstance(balance, dict) else [] - for k, v in iterable: - if isinstance(v, dict) and "free" in v: - try: - free_map[str(k).upper()] = float(v.get("free") or 0.0) - except Exception: - continue - # Derive quote currencies from symbols, fallback to common USD-stable quotes - quotes = [] - for sym in self._symbols or []: - s = str(sym).upper() - if "/" in s and len(s.split("/")) == 2: - quotes.append(s.split("/")[1]) - elif "-" in s and len(s.split("-")) == 2: - quotes.append(s.split("-")[1]) - # Deduplicate preserving order - quotes = list(dict.fromkeys(quotes)) - free_cash = 0.0 - if quotes: - for q in quotes: - free_cash += float(free_map.get(q, 0.0) or 0.0) - else: - for q in ("USDT", "USD", "USDC"): - free_cash += float(free_map.get(q, 0.0) or 0.0) portfolio.account_balance = float(free_cash) if self._request.exchange_config.market_type == MarketType.SPOT: portfolio.buying_power = max(0.0, float(portfolio.account_balance)) @@ -174,29 +130,10 @@ async def run_once(self) -> DecisionCycleResult: if self._request.exchange_config.market_type == MarketType.SPOT: portfolio.buying_power = max(0.0, float(portfolio.account_balance)) - # Use fixed 1-second interval and lookback of 3 minutes (60 * 3 seconds) - candles_1s = await self._market_data_source.get_recent_candles( - self._symbols, "1s", 60 * 3 - ) - # Compute micro (1s) features with meta preserved - micro_features = self._feature_computer.compute_features(candles=candles_1s) - - # Use fixed 1-minute interval and lookback of 4 hours (60 * 4 minutes) - candles_1m = await self._market_data_source.get_recent_candles( - self._symbols, "1m", 60 * 4 - ) - minute_features = self._feature_computer.compute_features(candles=candles_1m) - - # Compose full features list: minute-level features (structural) then micro-level (freshness). - features = [] - features.extend(minute_features) - features.extend(micro_features) - - # Ask the data source for an authoritative market snapshot (exchange-ticker based) - market_snapshot = await self._market_data_source.get_market_snapshot( - self._symbols - ) - digest = self._digest_builder.build(list(self._history_records)) + pipeline_result = await self._features_pipeline.build() + features = list(pipeline_result.features or []) + market_snapshot = pipeline_result.market_snapshot or {} + digest = self._digest_builder.build(self._history_recorder.get_records()) context = ComposeContext( ts=timestamp_ms, @@ -205,7 +142,6 @@ async def run_once(self) -> DecisionCycleResult: features=features, portfolio=portfolio, digest=digest, - prompt_text=self._prompt_provider(self._request), market_snapshot=market_snapshot, ) @@ -233,8 +169,8 @@ async def run_once(self) -> DecisionCycleResult: ) trades = self._create_trades(tx_results, compose_id, timestamp_ms) - self._portfolio_service.apply_trades(trades, market_snapshot) - summary = self._build_summary(timestamp_ms, trades) + self.portfolio_service.apply_trades(trades, market_snapshot) + summary = self.build_summary(timestamp_ms, trades) history_records = self._create_history_records( timestamp_ms, compose_id, features, instructions, trades, summary @@ -243,14 +179,10 @@ async def run_once(self) -> DecisionCycleResult: for record in history_records: self._history_recorder.record(record) - self._history_records.extend(history_records) - if len(self._history_records) > self._history_limit: - self._history_records = self._history_records[-self._history_limit :] - - digest = self._digest_builder.build(list(self._history_records)) + digest = self._digest_builder.build(self._history_recorder.get_records()) self._cycle_index += 1 - portfolio = self._portfolio_service.get_view() + portfolio = self.portfolio_service.get_view() return DecisionCycleResult( compose_id=compose_id, timestamp_ms=timestamp_ms, @@ -271,7 +203,7 @@ def _create_trades( trades: List[TradeHistoryEntry] = [] # Current portfolio view (pre-apply) used to detect closes try: - pre_view = self._portfolio_service.get_view() + pre_view = self.portfolio_service.get_view() except Exception: pre_view = None @@ -419,7 +351,7 @@ def _create_trades( if is_closing and not is_full_close: # scan history records (most recent first) to find an open trade for this symbol paired_id = None - for record in reversed(self._history_records): + for record in reversed(self._history_recorder.get_records()): if record.kind != "execution": continue trades_payload = record.payload.get("trades", []) or [] @@ -462,7 +394,7 @@ def _create_trades( trades.append(trade) return trades - def _build_summary( + def build_summary( self, timestamp_ms: int, trades: List[TradeHistoryEntry], @@ -471,7 +403,7 @@ def _build_summary( self._realized_pnl += realized_delta # Prefer authoritative unrealized PnL from the portfolio view when available. try: - view = self._portfolio_service.get_view() + view = self.portfolio_service.get_view() unrealized = float(view.total_unrealized_pnl or 0.0) # In LIVE mode, treat equity as available cash (disallow financing) try: @@ -542,7 +474,6 @@ def _create_history_records( kind="compose", reference_id=compose_id, payload={ - "prompt": self._prompt_provider(self._request), "summary": summary.model_dump(mode="json"), }, ), diff --git a/python/valuecell/agents/strategy_agent/data/market.py b/python/valuecell/agents/strategy_agent/data/market.py index 6134d9baa..7612a9b95 100644 --- a/python/valuecell/agents/strategy_agent/data/market.py +++ b/python/valuecell/agents/strategy_agent/data/market.py @@ -1,5 +1,5 @@ from collections import defaultdict -from typing import Dict, List, Optional +from typing import List, Optional from loguru import logger @@ -18,16 +18,8 @@ class SimpleMarketDataSource(MarketDataSource): generator so the runtime remains functional in tests and offline. """ - def __init__( - self, - base_prices: Optional[Dict[str, float]] = None, - exchange_id: Optional[str] = None, - ccxt_options: Optional[Dict] = None, - ) -> None: - self._base_prices = base_prices or {} - self._counters: Dict[str, int] = defaultdict(int) + def __init__(self, exchange_id: Optional[str] = None) -> None: self._exchange_id = exchange_id or "binance" - self._ccxt_options = ccxt_options or {} async def get_recent_candles( self, symbols: List[str], interval: str, lookback: int @@ -35,7 +27,7 @@ async def get_recent_candles( async def _fetch(symbol: str) -> List[List]: # instantiate exchange class by name (e.g., ccxtpro.kraken) exchange_cls = get_exchange_cls(self._exchange_id) - exchange = exchange_cls({"newUpdates": False, **self._ccxt_options}) + exchange = exchange_cls({"newUpdates": False}) try: # ccxt.pro uses async fetch_ohlcv data = await exchange.fetch_ohlcv( @@ -86,8 +78,8 @@ async def get_market_snapshot(self, symbols: List[str]) -> MarketSnapShotType: The method tries to use the exchange's `fetch_ticker` (and optionally `fetch_open_interest` / `fetch_funding_rate` when available) to build - a mapping symbol -> last price. On any failure for a symbol, it will - fall back to `base_prices` if provided or omit the symbol. + a mapping symbol -> last price. On any failure for a symbol, the + symbol will be omitted from the snapshot. Example: ``` "BTC/USDT": { @@ -165,7 +157,7 @@ async def get_market_snapshot(self, symbols: List[str]) -> MarketSnapShotType: snapshot = defaultdict(dict) exchange_cls = get_exchange_cls(self._exchange_id) - exchange = exchange_cls({"newUpdates": False, **self._ccxt_options}) + exchange = exchange_cls({"newUpdates": False}) try: for symbol in symbols: sym = normalize_symbol(symbol) diff --git a/python/valuecell/agents/strategy_agent/decision/composer.py b/python/valuecell/agents/strategy_agent/decision/composer.py index 0d489b84b..3684e3119 100644 --- a/python/valuecell/agents/strategy_agent/decision/composer.py +++ b/python/valuecell/agents/strategy_agent/decision/composer.py @@ -53,6 +53,26 @@ def __init__( self._default_slippage_bps = default_slippage_bps self._quantity_precision = quantity_precision + def _build_prompt_text(self) -> str: + """Return a resolved prompt text by fusing custom_prompt and prompt_text. + + Fusion logic: + - If custom_prompt exists, use it as base + - If prompt_text also exists, append it after custom_prompt + - If only prompt_text exists, use it + - Fallback: simple generated mention of symbols + """ + custom = self._request.trading_config.custom_prompt + prompt = self._request.trading_config.prompt_text + if custom and prompt: + return f"{custom}\n\n{prompt}" + elif custom: + return custom + elif prompt: + return prompt + symbols = ", ".join(self._request.trading_config.symbols) + return f"Compose trading instructions for symbols: {symbols}." + async def compose(self, context: ComposeContext) -> List[TradeInstruction]: prompt = self._build_llm_prompt(context) try: @@ -214,7 +234,7 @@ def _build_llm_prompt(self, context: ComposeContext) -> str: payload = self._prune_none( { - "strategy_prompt": context.prompt_text, + "strategy_prompt": self._build_prompt_text(), "summary": summary, "market": market, "features": features, diff --git a/python/valuecell/agents/strategy_agent/execution/interfaces.py b/python/valuecell/agents/strategy_agent/execution/interfaces.py index 706a51f57..26dee18c5 100644 --- a/python/valuecell/agents/strategy_agent/execution/interfaces.py +++ b/python/valuecell/agents/strategy_agent/execution/interfaces.py @@ -27,3 +27,14 @@ async def execute( """ raise NotImplementedError + + @abstractmethod + async def close(self) -> None: + """Close the gateway and release any held resources. + + Implementations should cleanup network connections, clients, or other + resources they hold. This method is optional to call but should be + implemented by gateways that allocate external resources. + """ + + raise NotImplementedError diff --git a/python/valuecell/agents/strategy_agent/execution/paper_trading.py b/python/valuecell/agents/strategy_agent/execution/paper_trading.py index f44769e97..c57e9f140 100644 --- a/python/valuecell/agents/strategy_agent/execution/paper_trading.py +++ b/python/valuecell/agents/strategy_agent/execution/paper_trading.py @@ -65,3 +65,7 @@ async def execute( ) return results + + async def close(self) -> None: + """No-op close for paper gateway (nothing to cleanup).""" + return None diff --git a/python/valuecell/agents/strategy_agent/features/simple.py b/python/valuecell/agents/strategy_agent/features/candle.py similarity index 97% rename from python/valuecell/agents/strategy_agent/features/simple.py rename to python/valuecell/agents/strategy_agent/features/candle.py index ce2b90833..e5c80739b 100644 --- a/python/valuecell/agents/strategy_agent/features/simple.py +++ b/python/valuecell/agents/strategy_agent/features/candle.py @@ -5,10 +5,10 @@ import pandas as pd from ..models import Candle, FeatureVector -from .interfaces import FeatureComputer +from .interfaces import CandleBasedFeatureComputer -class SimpleFeatureComputer(FeatureComputer): +class SimpleCandleFeatureComputer(CandleBasedFeatureComputer): """Computes basic momentum and volume features.""" def compute_features( diff --git a/python/valuecell/agents/strategy_agent/features/interfaces.py b/python/valuecell/agents/strategy_agent/features/interfaces.py index 1702c3d4e..2d2044380 100644 --- a/python/valuecell/agents/strategy_agent/features/interfaces.py +++ b/python/valuecell/agents/strategy_agent/features/interfaces.py @@ -1,15 +1,16 @@ from __future__ import annotations from abc import ABC, abstractmethod +from dataclasses import dataclass from typing import Any, Dict, List, Optional -from ..models import Candle, FeatureVector +from ..models import Candle, FeatureVector, MarketSnapShotType # Contracts for feature computation (module-local abstract interfaces). # Plain ABCs (not Pydantic) to keep implementations lightweight. -class FeatureComputer(ABC): +class CandleBasedFeatureComputer(ABC): """Computes feature vectors from raw market data (ticks/candles). Implementations may cache windows, offload CPU-heavy parts, or compose @@ -33,3 +34,25 @@ def compute_features( A list of FeatureVector items, one or more per instrument. """ raise NotImplementedError + + +@dataclass +class FeaturesPipelineResult: + """Result of running a features pipeline.""" + + features: List[FeatureVector] + market_snapshot: MarketSnapShotType + + +class FeaturesPipeline(ABC): + """Abstract pipeline that produces features and supporting market context.""" + + @abstractmethod + async def build(self) -> FeaturesPipelineResult: + """Compute feature vectors and associated market snapshot. + + Implementations should use their configured request/inputs to determine + which symbols to process; callers should not pass runtime parameters + into this call. + """ + raise NotImplementedError diff --git a/python/valuecell/agents/strategy_agent/execution/exchanges.py b/python/valuecell/agents/strategy_agent/features/multimodal.py similarity index 100% rename from python/valuecell/agents/strategy_agent/execution/exchanges.py rename to python/valuecell/agents/strategy_agent/features/multimodal.py diff --git a/python/valuecell/agents/strategy_agent/features/multimodal_analysis.py b/python/valuecell/agents/strategy_agent/features/news.py similarity index 100% rename from python/valuecell/agents/strategy_agent/features/multimodal_analysis.py rename to python/valuecell/agents/strategy_agent/features/news.py diff --git a/python/valuecell/agents/strategy_agent/features/news_analysis.py b/python/valuecell/agents/strategy_agent/features/news_analysis.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python/valuecell/agents/strategy_agent/features/pipeline.py b/python/valuecell/agents/strategy_agent/features/pipeline.py new file mode 100644 index 000000000..7e49755a4 --- /dev/null +++ b/python/valuecell/agents/strategy_agent/features/pipeline.py @@ -0,0 +1,86 @@ +"""Feature pipeline abstractions for the strategy agent. + +This module encapsulates the data-fetch and feature-computation steps used by +strategy runtimes. Introducing a dedicated pipeline object means the decision +coordinator no longer needs direct access to the market data source or feature +computer—everything is orchestrated by the pipeline. +""" + +from __future__ import annotations + +from typing import List + +from ..data.interfaces import MarketDataSource +from ..data.market import SimpleMarketDataSource +from ..models import FeatureVector, UserRequest +from .candle import SimpleCandleFeatureComputer +from .interfaces import ( + CandleBasedFeatureComputer, + FeaturesPipeline, + FeaturesPipelineResult, +) + + +class DefaultFeaturesPipeline(FeaturesPipeline): + """Default pipeline using the simple data source and feature computer.""" + + def __init__( + self, + *, + request: UserRequest, + market_data_source: MarketDataSource, + feature_computer: CandleBasedFeatureComputer, + micro_interval: str = "1s", + micro_lookback: int = 60 * 3, + medium_interval: str = "1m", + medium_lookback: int = 60 * 4, + ) -> None: + self._request = request + self._market_data_source = market_data_source + self._feature_computer = feature_computer + self._micro_interval = micro_interval + self._micro_lookback = micro_lookback + self._medium_interval = medium_interval + self._medium_lookback = medium_lookback + self._symbols = list(dict.fromkeys(request.trading_config.symbols)) + + async def build(self) -> FeaturesPipelineResult: + """Fetch candles, compute feature vectors, and return market snapshot.""" + # Determine symbols from the configured request so caller doesn't pass them + candles_micro = await self._market_data_source.get_recent_candles( + self._symbols, self._micro_interval, self._micro_lookback + ) + micro_features = self._feature_computer.compute_features(candles=candles_micro) + + candles_medium = await self._market_data_source.get_recent_candles( + self._symbols, self._medium_interval, self._medium_lookback + ) + medium_features = self._feature_computer.compute_features( + candles=candles_medium + ) + + features: List[FeatureVector] = [] + features.extend(medium_features or []) + features.extend(micro_features or []) + + market_snapshot = await self._market_data_source.get_market_snapshot( + self._symbols + ) + market_snapshot = market_snapshot or {} + + return FeaturesPipelineResult( + features=features, market_snapshot=market_snapshot + ) + + @classmethod + def from_request(cls, request: UserRequest) -> DefaultFeaturesPipeline: + """Factory creating the default pipeline from a user request.""" + market_data_source = SimpleMarketDataSource( + exchange_id=request.exchange_config.exchange_id + ) + feature_computer = SimpleCandleFeatureComputer() + return cls( + request=request, + market_data_source=market_data_source, + feature_computer=feature_computer, + ) diff --git a/python/valuecell/agents/strategy_agent/features/technical_indicators.py b/python/valuecell/agents/strategy_agent/features/technical_indicators.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python/valuecell/agents/strategy_agent/models.py b/python/valuecell/agents/strategy_agent/models.py index 08468a467..19708c465 100644 --- a/python/valuecell/agents/strategy_agent/models.py +++ b/python/valuecell/agents/strategy_agent/models.py @@ -679,7 +679,6 @@ class ComposeContext(BaseModel): ) portfolio: PortfolioView digest: "TradeDigest" - prompt_text: str = Field(..., description="Strategy/style prompt text") market_snapshot: MarketSnapShotType = Field( default=None, description="Optional map symbol -> current reference price" ) diff --git a/python/valuecell/agents/strategy_agent/prompt_agent.py b/python/valuecell/agents/strategy_agent/prompt_agent.py new file mode 100644 index 000000000..8c2cefd9e --- /dev/null +++ b/python/valuecell/agents/strategy_agent/prompt_agent.py @@ -0,0 +1,47 @@ +"""Default strategy agent implementation with standard behavior. + +This module provides a concrete implementation of StrategyAgent that uses +the default feature computation and LLM-based decision making. Users can +extend this class or StrategyAgent directly for custom strategies. +""" + +from __future__ import annotations + +from .agent import BaseStrategyAgent +from .decision.composer import LlmComposer +from .decision.interfaces import Composer +from .features.pipeline import DefaultFeaturesPipeline, FeaturesPipeline +from .models import UserRequest + + +class StrategyAgent(BaseStrategyAgent): + """Default strategy agent with standard feature computation and LLM composer. + + This implementation uses: + - SimpleFeatureComputer for feature extraction + - LlmComposer for decision making + - Default data sources and execution + + Users can subclass this to customize specific aspects while keeping + other defaults, or subclass StrategyAgent directly for full control. + + Example: + # Use the default agent directly + agent = DefaultStrategyAgent() + + # Or customize just the features + class MyCustomAgent(DefaultStrategyAgent): + def _build_features_pipeline(self, request): + # Custom feature pipeline encapsulating data + features + return MyCustomPipeline(request) + """ + + def _build_features_pipeline(self, request: UserRequest) -> FeaturesPipeline | None: + """Use the default features pipeline built from the user request.""" + + return DefaultFeaturesPipeline.from_request(request) + + def _create_decision_composer(self, request: UserRequest) -> Composer | None: + """Use default LLM-based composer.""" + + return LlmComposer(request=request) diff --git a/python/valuecell/agents/strategy_agent/runtime.py b/python/valuecell/agents/strategy_agent/runtime.py index d48d8f50f..ebc871cd7 100644 --- a/python/valuecell/agents/strategy_agent/runtime.py +++ b/python/valuecell/agents/strategy_agent/runtime.py @@ -4,36 +4,35 @@ from valuecell.utils.uuid import generate_uuid from .core import DecisionCycleResult, DefaultDecisionCoordinator -from .data.market import SimpleMarketDataSource from .decision.composer import LlmComposer -from .execution.factory import create_execution_gateway, create_execution_gateway_sync +from .decision.interfaces import Composer +from .execution.factory import create_execution_gateway from .execution.interfaces import ExecutionGateway -from .features.simple import SimpleFeatureComputer +from .features.interfaces import FeaturesPipeline +from .features.pipeline import DefaultFeaturesPipeline from .models import Constraints, TradingMode, UserRequest from .portfolio.in_memory import InMemoryPortfolioService from .trading_history.digest import RollingDigestBuilder from .trading_history.recorder import InMemoryHistoryRecorder +from .utils import fetch_free_cash_from_gateway -def _simple_prompt_provider(request: UserRequest) -> str: - """Return a resolved prompt text by fusing custom_prompt and prompt_text. +async def _create_execution_gateway(request: UserRequest) -> ExecutionGateway: + """Create execution gateway asynchronously, handling LIVE mode balance fetching.""" + execution_gateway = await create_execution_gateway(request.exchange_config) - Fusion logic: - - If custom_prompt exists, use it as base - - If prompt_text also exists, append it after custom_prompt - - If only prompt_text exists, use it - - Fallback: simple generated mention of symbols - """ - custom = request.trading_config.custom_prompt - prompt = request.trading_config.prompt_text - if custom and prompt: - return f"{prompt}\n\n{custom}" - elif custom: - return custom - elif prompt: - return prompt - symbols = ", ".join(request.trading_config.symbols) - return f"Compose trading instructions for symbols: {symbols}." + # In LIVE mode, fetch exchange balance and set initial capital from free cash + try: + if request.exchange_config.trading_mode == TradingMode.LIVE: + free_cash = await fetch_free_cash_from_gateway( + execution_gateway, request.trading_config.symbols + ) + request.trading_config.initial_capital = float(free_cash) + except Exception: + # Do not fail runtime creation if balance fetch or parsing fails + pass + + return execution_gateway @dataclass @@ -46,27 +45,54 @@ async def run_cycle(self) -> DecisionCycleResult: return await self.coordinator.run_once() -def create_strategy_runtime( +async def create_strategy_runtime( request: UserRequest, - execution_gateway: Optional[ExecutionGateway] = None, + composer: Optional[Composer] = None, + features_pipeline: Optional[FeaturesPipeline] = None, ) -> StrategyRuntime: - """Create a strategy runtime with synchronous initialization. + """Create a strategy runtime with async initialization (supports both paper and live trading). - Note: This function only supports paper trading by default. For live trading, - use create_strategy_runtime_async() instead, which properly initializes - the CCXT exchange connection. + This function properly initializes CCXT exchange connections for live trading + and can also be used for paper trading. + + In LIVE mode, it fetches the exchange balance and sets the + initial capital to the available (free) cash for the strategy's + quote currencies. Opening positions will therefore draw down cash + and cannot borrow (no financing). Args: request: User request with strategy configuration - execution_gateway: Optional pre-initialized execution gateway. - If None, will be created based on request.exchange_config. + composer: Optional custom decision composer. If None, uses LlmComposer. + features_pipeline: Optional custom features pipeline. If None, uses + `DefaultFeaturesPipeline`. Returns: - StrategyRuntime instance + StrategyRuntime instance with initialized execution gateway - Raises: - RuntimeError: If live trading is requested without providing a gateway + Example: + >>> request = UserRequest( + ... exchange_config=ExchangeConfig( + ... exchange_id='binance', + ... trading_mode=TradingMode.LIVE, + ... api_key='YOUR_KEY', + ... secret_key='YOUR_SECRET', + ... market_type=MarketType.SWAP, + ... margin_mode=MarginMode.ISOLATED, + ... testnet=True, + ... ), + ... trading_config=TradingConfig( + ... symbols=['BTC-USDT', 'ETH-USDT'], + ... initial_capital=10000.0, + ... max_leverage=10.0, + ... max_positions=5, + ... ) + ... ) + >>> runtime = await create_strategy_runtime(request) """ + # Create execution gateway asynchronously + execution_gateway = await _create_execution_gateway(request) + + # Create strategy runtime components strategy_id = generate_uuid("strategy") initial_capital = request.trading_config.initial_capital or 0.0 constraints = Constraints( @@ -81,24 +107,12 @@ def create_strategy_runtime( strategy_id=strategy_id, ) - base_prices = { - symbol: 120.0 + index * 15.0 - for index, symbol in enumerate(request.trading_config.symbols) - } - market_data_source = SimpleMarketDataSource( - base_prices=base_prices, exchange_id=request.exchange_config.exchange_id - ) - feature_computer = SimpleFeatureComputer() - composer = LlmComposer(request=request) + # Use custom composer if provided, otherwise default to LlmComposer + if composer is None: + composer = LlmComposer(request=request) - # Create execution gateway if not provided - if execution_gateway is None: - if request.exchange_config.trading_mode == TradingMode.LIVE: - raise RuntimeError( - "Live trading requires async initialization. " - "Use create_strategy_runtime_async() or provide a pre-initialized gateway." - ) - execution_gateway = create_execution_gateway_sync(request.exchange_config) + if features_pipeline is None: + features_pipeline = DefaultFeaturesPipeline.from_request(request) history_recorder = InMemoryHistoryRecorder() digest_builder = RollingDigestBuilder() @@ -107,13 +121,11 @@ def create_strategy_runtime( request=request, strategy_id=strategy_id, portfolio_service=portfolio_service, - market_data_source=market_data_source, - feature_computer=feature_computer, + features_pipeline=features_pipeline, composer=composer, execution_gateway=execution_gateway, history_recorder=history_recorder, digest_builder=digest_builder, - prompt_provider=_simple_prompt_provider, ) return StrategyRuntime( @@ -121,100 +133,3 @@ def create_strategy_runtime( strategy_id=strategy_id, coordinator=coordinator, ) - - -async def create_strategy_runtime_async(request: UserRequest) -> StrategyRuntime: - """Create a strategy runtime with async initialization (supports live trading). - - This function properly initializes CCXT exchange connections for live trading. - It can also be used for paper trading. - - In LIVE mode, it fetches the exchange balance and sets the - initial capital to the available (free) cash for the strategy's - quote currencies. Opening positions will therefore draw down cash - and cannot borrow (no financing). - - Args: - request: User request with strategy configuration - - Returns: - StrategyRuntime instance with initialized execution gateway - - Example: - >>> request = UserRequest( - ... exchange_config=ExchangeConfig( - ... exchange_id='binance', - ... trading_mode=TradingMode.LIVE, - ... api_key='YOUR_KEY', - ... secret_key='YOUR_SECRET', - ... market_type=MarketType.SWAP, - ... margin_mode=MarginMode.ISOLATED, - ... testnet=True, - ... ), - ... trading_config=TradingConfig( - ... symbols=['BTC-USDT', 'ETH-USDT'], - ... initial_capital=10000.0, - ... max_leverage=10.0, - ... max_positions=5, - ... ) - ... ) - >>> runtime = await create_strategy_runtime_async(request) - """ - # Create execution gateway asynchronously - execution_gateway = await create_execution_gateway(request.exchange_config) - - # In LIVE mode, fetch exchange balance and set initial capital from free cash - try: - if request.exchange_config.trading_mode == TradingMode.LIVE and hasattr( - execution_gateway, "fetch_balance" - ): - balance = await execution_gateway.fetch_balance() - free_map = {} - # ccxt balance may be shaped as: {'free': {...}, 'used': {...}, 'total': {...}} - try: - free_section = ( - balance.get("free") if isinstance(balance, dict) else None - ) - except Exception: - free_section = None - if isinstance(free_section, dict): - free_map = { - str(k).upper(): float(v or 0.0) for k, v in free_section.items() - } - else: - # fallback: per-ccy dicts: balance['USDT'] = {'free': x, 'used': y, 'total': z} - for k, v in balance.items() if isinstance(balance, dict) else []: - if isinstance(v, dict) and "free" in v: - try: - free_map[str(k).upper()] = float(v.get("free") or 0.0) - except Exception: - continue - # collect quote currencies from configured symbols - quotes: list[str] = [] - for sym in request.trading_config.symbols or []: - s = str(sym).upper() - if "/" in s: - parts = s.split("/") - if len(parts) == 2: - quotes.append(parts[1]) - elif "-" in s: - parts = s.split("-") - if len(parts) == 2: - quotes.append(parts[1]) - quotes = list(dict.fromkeys(quotes)) # unique order-preserving - free_cash = 0.0 - if quotes: - for q in quotes: - free_cash += float(free_map.get(q, 0.0) or 0.0) - else: - # fallback to common stablecoins - for q in ("USDT", "USD", "USDC"): - free_cash += float(free_map.get(q, 0.0) or 0.0) - # Set initial capital to exchange free cash - request.trading_config.initial_capital = float(free_cash) - except Exception: - # Do not fail runtime creation if balance fetch or parsing fails - pass - - # Use the sync function with the pre-initialized gateway - return create_strategy_runtime(request, execution_gateway=execution_gateway) diff --git a/python/valuecell/agents/strategy_agent/trading_history/interfaces.py b/python/valuecell/agents/strategy_agent/trading_history/interfaces.py index 831ca7385..5d1139ab6 100644 --- a/python/valuecell/agents/strategy_agent/trading_history/interfaces.py +++ b/python/valuecell/agents/strategy_agent/trading_history/interfaces.py @@ -16,6 +16,11 @@ def record(self, record: HistoryRecord) -> None: """Persist a single history record.""" raise NotImplementedError + @abstractmethod + def get_records(self) -> List[HistoryRecord]: + """Get all current records.""" + raise NotImplementedError + class DigestBuilder(ABC): """Builds TradeDigest from historical records (incremental or batch).""" diff --git a/python/valuecell/agents/strategy_agent/trading_history/recorder.py b/python/valuecell/agents/strategy_agent/trading_history/recorder.py index e6fb7857b..a4f2f1ad3 100644 --- a/python/valuecell/agents/strategy_agent/trading_history/recorder.py +++ b/python/valuecell/agents/strategy_agent/trading_history/recorder.py @@ -7,8 +7,14 @@ class InMemoryHistoryRecorder(HistoryRecorder): """In-memory recorder storing history records.""" - def __init__(self) -> None: + def __init__(self, history_limit: int = 200) -> None: self.records: List[HistoryRecord] = [] + self.history_limit = history_limit def record(self, record: HistoryRecord) -> None: self.records.append(record) + if len(self.records) > self.history_limit: + self.records = self.records[-self.history_limit :] + + def get_records(self) -> List[HistoryRecord]: + return self.records diff --git a/python/valuecell/agents/strategy_agent/utils.py b/python/valuecell/agents/strategy_agent/utils.py index 2a993e314..db8201c89 100644 --- a/python/valuecell/agents/strategy_agent/utils.py +++ b/python/valuecell/agents/strategy_agent/utils.py @@ -1,4 +1,5 @@ import os +from datetime import datetime, timezone from typing import Dict, Optional import ccxt.pro as ccxtpro @@ -6,6 +7,68 @@ from loguru import logger +def get_current_timestamp_ms() -> int: + """Get current timestamp in milliseconds.""" + return int(datetime.now(timezone.utc).timestamp() * 1000) + + +async def fetch_free_cash_from_gateway(execution_gateway, symbols: list[str]) -> float: + """Fetch exchange balance via `execution_gateway.fetch_balance()` and + aggregate free cash for the given `symbols` (quote currencies). + + Returns aggregated free cash as float. Returns 0.0 on error or when + balance shape cannot be parsed. + """ + try: + if not hasattr(execution_gateway, "fetch_balance"): + return 0.0 + balance = await execution_gateway.fetch_balance() + except Exception: + return 0.0 + + free_map: dict[str, float] = {} + try: + free_section = balance.get("free") if isinstance(balance, dict) else None + except Exception: + free_section = None + + if isinstance(free_section, dict): + free_map = {str(k).upper(): float(v or 0.0) for k, v in free_section.items()} + else: + iterable = balance.items() if isinstance(balance, dict) else [] + for k, v in iterable: + if isinstance(v, dict) and "free" in v: + try: + free_map[str(k).upper()] = float(v.get("free") or 0.0) + except Exception: + continue + + # collect quote currencies from configured symbols + quotes: list[str] = [] + for sym in symbols or []: + s = str(sym).upper() + if "/" in s: + parts = s.split("/") + if len(parts) == 2: + quotes.append(parts[1]) + elif "-" in s: + parts = s.split("-") + if len(parts) == 2: + quotes.append(parts[1]) + quotes = list(dict.fromkeys(quotes)) # unique order-preserving + + free_cash = 0.0 + if quotes: + for q in quotes: + free_cash += float(free_map.get(q, 0.0) or 0.0) + else: + # fallback to common stablecoins + for q in ("USDT", "USD", "USDC"): + free_cash += float(free_map.get(q, 0.0) or 0.0) + + return float(free_cash) + + def extract_price_map(market_snapshot: Dict) -> Dict[str, float]: """Extract a simple symbol -> price mapping from market snapshot structure. From d859b88900524d053f70987bcddfb53806215494 Mon Sep 17 00:00:00 2001 From: paisley Date: Wed, 19 Nov 2025 14:31:55 +0800 Subject: [PATCH 04/15] feat: add grid strategy agent --- python/configs/agent_cards/grid_agent.json | 19 ++ .../strategy_agent/decision/grid_composer.py | 273 ++++++++++++++++++ .../agents/strategy_agent/grid_agent.py | 40 +++ 3 files changed, 332 insertions(+) create mode 100644 python/configs/agent_cards/grid_agent.json create mode 100644 python/valuecell/agents/strategy_agent/decision/grid_composer.py create mode 100644 python/valuecell/agents/strategy_agent/grid_agent.py diff --git a/python/configs/agent_cards/grid_agent.json b/python/configs/agent_cards/grid_agent.json new file mode 100644 index 000000000..286bee999 --- /dev/null +++ b/python/configs/agent_cards/grid_agent.json @@ -0,0 +1,19 @@ +{ + "name": "GridStrategyAgent", + "display_name": "Grid Strategy Agent", + "url": "http://localhost:10007/", + "description": "LLM-driven strategy composer that turns market features into normalized trade instructions. Includes a simple runtime for demo and testing.", + "capabilities": { + "streaming": true, + "push_notifications": true + }, + "enabled": true, + "metadata": { + "planner_passthrough": true, + "version": "0.1.0", + "author": "ValueCell Team", + "tags": ["strategy", "trading", "llm", "demo"], + "notes": "This card is a lightweight example; replace model api_key and tune parameters for production use.", + "local_agent_class": "valuecell.agents.strategy_agent.grid_agent:GridStrategyAgent" + } +} diff --git a/python/valuecell/agents/strategy_agent/decision/grid_composer.py b/python/valuecell/agents/strategy_agent/decision/grid_composer.py new file mode 100644 index 000000000..0210e210c --- /dev/null +++ b/python/valuecell/agents/strategy_agent/decision/grid_composer.py @@ -0,0 +1,273 @@ +from __future__ import annotations + +import math +from typing import List, Optional + +from loguru import logger + +from ..models import ( + ComposeContext, + InstrumentRef, + LlmDecisionAction, + LlmDecisionItem, + LlmPlanProposal, + MarketType, + TradeInstruction, + UserRequest, +) +from .composer import LlmComposer + + +class GridComposer(LlmComposer): + """Rule-based grid strategy composer. + + Goal: avoid LLM usage by applying simple mean-reversion grid rules to + produce an `LlmPlanProposal`, then reuse the parent normalization and + risk controls (`_normalize_plan`) to output executable `TradeInstruction`s. + + Key rules: + - Define grid step with `step_pct` (e.g., 0.5%). + - With positions: price falling ≥ 1 step vs average adds; rising ≥ 1 step + reduces (max `max_steps` per cycle). + - Without positions: use recent change percent (prefer 1s feature) to + trigger open; spot opens long only, perps can open both directions. + - Base size is `equity * base_fraction / price`; `_normalize_plan` later + clamps by filters and buying power. + """ + + def __init__( + self, + request: UserRequest, + *, + step_pct: float = 0.005, + max_steps: int = 3, + base_fraction: float = 0.08, + default_slippage_bps: int = 25, + quantity_precision: float = 1e-9, + ) -> None: + super().__init__( + request, + default_slippage_bps=default_slippage_bps, + quantity_precision=quantity_precision, + ) + self._step_pct = float(step_pct) + self._max_steps = int(max_steps) + self._base_fraction = float(base_fraction) + + async def compose(self, context: ComposeContext) -> List[TradeInstruction]: + # Prepare buying power/constraints/price map, then generate plan and reuse parent normalization + equity, allowed_lev, constraints, _projected_gross, price_map = ( + self._init_buying_power_context(context) + ) + + items: List[LlmDecisionItem] = [] + ts = int(context.ts) + + # Pre-fetch micro change percentage from features (prefer 1s, fallback 1m) + def latest_change_pct(symbol: str) -> Optional[float]: + best: Optional[float] = None + best_rank = 999 + for fv in context.features or []: + try: + if str(getattr(fv.instrument, "symbol", "")) != symbol: + continue + interval = (fv.meta or {}).get("interval") + change = fv.values.get("change_pct") + if change is None: + continue + rank = 0 if interval == "1s" else (1 if interval == "1m" else 2) + if rank < best_rank: + best = float(change) + best_rank = rank + except Exception: + continue + return best + + symbols = list(dict.fromkeys(self._request.trading_config.symbols)) + is_spot = self._request.exchange_config.market_type == MarketType.SPOT + + for symbol in symbols: + price = float(price_map.get(symbol) or 0.0) + if price <= 0: + logger.debug("Skip {} due to missing/invalid price", symbol) + continue + + pos = context.portfolio.positions.get(symbol) + qty = float(getattr(pos, "quantity", 0.0) or 0.0) + avg_px = float(getattr(pos, "avg_price", 0.0) or 0.0) + + # Base order size: equity fraction converted to quantity; parent applies risk controls + base_qty = max(0.0, (equity * self._base_fraction) / price) + if base_qty <= 0: + continue + + # Compute steps from average price when holding; without average, trigger one step + def steps_from_avg(px: float, avg: float) -> int: + if avg <= 0: + return 1 + move_pct = abs(px / avg - 1.0) + k = int(math.floor(move_pct / max(self._step_pct, 1e-9))) + return max(0, min(k, self._max_steps)) + + # No position: use latest change to trigger direction (spot long-only) + if abs(qty) <= self._quantity_precision: + chg = latest_change_pct(symbol) + if chg is None: + # If no change feature available, skip conservatively + continue + if chg <= -self._step_pct: + # Short-term drop → open long + items.append( + LlmDecisionItem( + instrument=InstrumentRef( + symbol=symbol, + exchange_id=self._request.exchange_config.exchange_id, + ), + action=LlmDecisionAction.OPEN_LONG, + target_qty=base_qty, + leverage=( + 1.0 + if is_spot + else min( + float( + self._request.trading_config.max_leverage or 1.0 + ), + float( + constraints.max_leverage + or self._request.trading_config.max_leverage + or 1.0 + ), + ) + ), + confidence=min(1.0, abs(chg) / (2 * self._step_pct)), + rationale=f"Grid open-long: change_pct={chg:.4f} ≤ -step={self._step_pct:.4f}", + ) + ) + elif (not is_spot) and chg >= self._step_pct: + # Short-term rise → open short (perpetual only) + items.append( + LlmDecisionItem( + instrument=InstrumentRef( + symbol=symbol, + exchange_id=self._request.exchange_config.exchange_id, + ), + action=LlmDecisionAction.OPEN_SHORT, + target_qty=base_qty, + leverage=min( + float(self._request.trading_config.max_leverage or 1.0), + float( + constraints.max_leverage + or self._request.trading_config.max_leverage + or 1.0 + ), + ), + confidence=min(1.0, abs(chg) / (2 * self._step_pct)), + rationale=f"Grid open-short: change_pct={chg:.4f} ≥ step={self._step_pct:.4f}", + ) + ) + # Otherwise NOOP + continue + + # With position: adjust around average using grid + k = steps_from_avg(price, avg_px) + if k <= 0: + # No grid step triggered → NOOP + continue + + # Long: add on down, reduce on up + if qty > 0: + down = (avg_px > 0) and (price <= avg_px * (1.0 - self._step_pct)) + up = (avg_px > 0) and (price >= avg_px * (1.0 + self._step_pct)) + if down: + items.append( + LlmDecisionItem( + instrument=InstrumentRef( + symbol=symbol, + exchange_id=self._request.exchange_config.exchange_id, + ), + action=LlmDecisionAction.OPEN_LONG, + target_qty=base_qty * k, + leverage=1.0 + if is_spot + else min( + float(self._request.trading_config.max_leverage or 1.0), + float( + constraints.max_leverage + or self._request.trading_config.max_leverage + or 1.0 + ), + ), + confidence=min(1.0, k / float(self._max_steps)), + rationale=f"Grid long add: price {price:.4f} ≤ avg {avg_px:.4f} by {k} steps", + ) + ) + elif up: + items.append( + LlmDecisionItem( + instrument=InstrumentRef( + symbol=symbol, + exchange_id=self._request.exchange_config.exchange_id, + ), + action=LlmDecisionAction.CLOSE_LONG, + target_qty=min(abs(qty), base_qty * k), + leverage=1.0, + confidence=min(1.0, k / float(self._max_steps)), + rationale=f"Grid long reduce: price {price:.4f} ≥ avg {avg_px:.4f} by {k} steps", + ) + ) + continue + + # Short: add on up, cover on down + if qty < 0: + up = (avg_px > 0) and (price >= avg_px * (1.0 + self._step_pct)) + down = (avg_px > 0) and (price <= avg_px * (1.0 - self._step_pct)) + if up and (not is_spot): + items.append( + LlmDecisionItem( + instrument=InstrumentRef( + symbol=symbol, + exchange_id=self._request.exchange_config.exchange_id, + ), + action=LlmDecisionAction.OPEN_SHORT, + target_qty=base_qty * k, + leverage=min( + float(self._request.trading_config.max_leverage or 1.0), + float( + constraints.max_leverage + or self._request.trading_config.max_leverage + or 1.0 + ), + ), + confidence=min(1.0, k / float(self._max_steps)), + rationale=f"Grid short add: price {price:.4f} ≥ avg {avg_px:.4f} by {k} steps", + ) + ) + elif down: + items.append( + LlmDecisionItem( + instrument=InstrumentRef( + symbol=symbol, + exchange_id=self._request.exchange_config.exchange_id, + ), + action=LlmDecisionAction.CLOSE_SHORT, + target_qty=min(abs(qty), base_qty * k), + leverage=1.0, + confidence=min(1.0, k / float(self._max_steps)), + rationale=f"Grid short cover: price {price:.4f} ≤ avg {avg_px:.4f} by {k} steps", + ) + ) + continue + + if not items: + logger.debug( + "GridComposer produced NOOP plan for compose_id={}", context.compose_id + ) + return [] + + plan = LlmPlanProposal( + ts=ts, + items=items, + rationale=f"Grid step={self._step_pct:.4f}, base_fraction={self._base_fraction:.3f}", + ) + # Reuse parent normalization: quantity filters, buying power, cap_factor, reduceOnly, etc. + return self._normalize_plan(context, plan) diff --git a/python/valuecell/agents/strategy_agent/grid_agent.py b/python/valuecell/agents/strategy_agent/grid_agent.py new file mode 100644 index 000000000..6820c5e05 --- /dev/null +++ b/python/valuecell/agents/strategy_agent/grid_agent.py @@ -0,0 +1,40 @@ +"""Grid strategy agent following the same abstraction as the prompt agent. + +This agent reuses: +- Default features pipeline `DefaultFeaturesPipeline` +- Rule-based decision composer `GridComposer` + +Usage: + from valuecell.agents.strategy_agent.grid_agent import GridStrategyAgent + agent = GridStrategyAgent() + await agent.stream(request) +""" + +from __future__ import annotations + +from .agent import BaseStrategyAgent +from .decision.grid_composer import GridComposer +from .decision.interfaces import Composer +from .features.pipeline import DefaultFeaturesPipeline, FeaturesPipeline +from .models import UserRequest + + +class GridStrategyAgent(BaseStrategyAgent): + """Grid trading agent: default features + rule-based grid composer. + + - Spot: long-only grid add/reduce. + - Perpetual/derivatives: bi-directional grid; add short on up moves, + add long on down moves; reduce on reversals. + """ + + def _build_features_pipeline(self, request: UserRequest) -> FeaturesPipeline | None: + return DefaultFeaturesPipeline.from_request(request) + + def _create_decision_composer(self, request: UserRequest) -> Composer | None: + # Adjust step_pct / max_steps / base_fraction as needed + return GridComposer( + request=request, + step_pct=0.005, # ~0.5% per step + max_steps=3, # up to 3 steps per cycle + base_fraction=0.08, # base order size = equity * 8% + ) From 544730d77c04a6bd1b76406aac72b8f68dd38056 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Wed, 19 Nov 2025 14:50:17 +0800 Subject: [PATCH 05/15] refactor: replace market_snapshot with market_features in StrategyAgent components --- .../valuecell/agents/strategy_agent/README.md | 12 +- .../valuecell/agents/strategy_agent/core.py | 11 +- .../strategy_agent/decision/composer.py | 111 ++++++++++-------- .../strategy_agent/decision/system_prompt.py | 12 +- .../strategy_agent/execution/ccxt_trading.py | 10 +- .../strategy_agent/execution/interfaces.py | 6 +- .../strategy_agent/execution/paper_trading.py | 12 +- .../agents/strategy_agent/features/candle.py | 4 +- .../strategy_agent/features/interfaces.py | 7 +- .../features/market_snapshot.py | 93 +++++++++++++++ .../strategy_agent/features/pipeline.py | 22 ++-- .../valuecell/agents/strategy_agent/models.py | 3 - .../strategy_agent/portfolio/in_memory.py | 8 +- .../strategy_agent/portfolio/interfaces.py | 11 +- .../valuecell/agents/strategy_agent/utils.py | 62 +++++----- 15 files changed, 251 insertions(+), 133 deletions(-) create mode 100644 python/valuecell/agents/strategy_agent/features/market_snapshot.py diff --git a/python/valuecell/agents/strategy_agent/README.md b/python/valuecell/agents/strategy_agent/README.md index 7b2c8c58d..3602f2e0e 100644 --- a/python/valuecell/agents/strategy_agent/README.md +++ b/python/valuecell/agents/strategy_agent/README.md @@ -40,11 +40,11 @@ This document describes the design for the Strategy Agent: a lightweight, LLM-dr 1. DecisionCoordinator pulls `PortfolioView` (positions, cash, optional constraints) 1. DecisionCoordinator gets recent `Candle` from `MarketDataSource` 1. `FeatureComputer` produces `FeatureVector[]` -1. DecisionCoordinator assembles `ComposeContext`: features, portfolio, digest, prompt_text (string), optional market_snapshot and extra constraints +1. DecisionCoordinator assembles `ComposeContext`: features (including `features.market_snapshot`), portfolio, digest, prompt_text (string), and extra constraints 1. `Composer.compose(context)`: calls LLM with `ComposeContext` → `LlmPlanProposal`; normalizes plan (target position logic, limits, step size, min notional, cool-down, etc.); returns `TradeInstruction[]` -1. `ExecutionGateway.execute(instructions)` (no detailed order/fill handling at this stage) +1. `ExecutionGateway.execute(instructions, market_features)` (no detailed order/fill handling at this stage) 1. `HistoryRecorder.record(...)` checkpoints (including optional auditing metadata); DigestBuilder updates `TradeDigest` @@ -81,7 +81,7 @@ Defined in `models.py`: - `LlmDecisionItem { instrument, action: (buy|sell|flat|noop), target_qty, confidence?, rationale? }` - `LlmPlanProposal { ts, items: List[LlmDecisionItem], notes?, model_meta? }` - `TradeInstruction { instruction_id, compose_id, instrument, side: (buy|sell), quantity, price_mode, limit_price?, max_slippage_bps?, meta? }` - - `ComposeContext { ts, compose_id, strategy_id?, features, portfolio, digest, prompt_text, market_snapshot?, constraints? }` + - `ComposeContext { ts, compose_id, strategy_id?, features, portfolio, digest, prompt_text, constraints? }` - History and digest - `HistoryRecord { ts, kind, reference_id, payload }` @@ -136,7 +136,7 @@ Interfaces live in their respective modules as ABCs (not Pydantic models): - `decision/interfaces.py` - `Composer.compose(context: ComposeContext) -> List[TradeInstruction]` - `execution/interfaces.py` - - `ExecutionGateway.execute(instructions: List[TradeInstruction]) -> None` + - `ExecutionGateway.execute(instructions: List[TradeInstruction], market_features?: List[FeatureVector]) -> None` - `trading_history/interfaces.py` - `HistoryRecorder.record(record: HistoryRecord) -> None` - `DigestBuilder.build(records: List[HistoryRecord]) -> TradeDigest` @@ -203,8 +203,8 @@ A typical `run_once()` should: 1. `view = portfolio.get_view()` 2. Pull candles via `data` and compute `features = features.compute_features(candles=...)` -3. `context = ComposeContext(ts=..., features=features, portfolio=view, digest=..., prompt_text=..., market_snapshot=..., constraints=...)` +3. `context = ComposeContext(ts=..., features=features, portfolio=view, digest=..., prompt_text=..., constraints=...)` 4. `instructions = composer.compose(context)` -5. `executor.execute(instructions)` +5. `executor.execute(instructions, market_features)` 6. Record `HistoryRecord` for features, compose auditing metadata, and instructions 7. Update `TradeDigest` periodically or incrementally diff --git a/python/valuecell/agents/strategy_agent/core.py b/python/valuecell/agents/strategy_agent/core.py index 3a7e30f1a..ad0861ddc 100644 --- a/python/valuecell/agents/strategy_agent/core.py +++ b/python/valuecell/agents/strategy_agent/core.py @@ -132,7 +132,11 @@ async def run_once(self) -> DecisionCycleResult: pipeline_result = await self._features_pipeline.build() features = list(pipeline_result.features or []) - market_snapshot = pipeline_result.market_snapshot or {} + market_features = [ + fv + for fv in features + if (fv.meta or {}).get("group_by_key") == "market_snapshot" + ] digest = self._digest_builder.build(self._history_recorder.get_records()) context = ComposeContext( @@ -142,7 +146,6 @@ async def run_once(self) -> DecisionCycleResult: features=features, portfolio=portfolio, digest=digest, - market_snapshot=market_snapshot, ) instructions = await self._composer.compose(context) @@ -160,7 +163,7 @@ async def run_once(self) -> DecisionCycleResult: f" ExecutionGateway type: {type(self._execution_gateway).__name__}" ) tx_results = await self._execution_gateway.execute( - instructions, market_snapshot + instructions, market_features ) logger.info(f"✅ ExecutionGateway returned {len(tx_results)} results") for idx, tx in enumerate(tx_results): @@ -169,7 +172,7 @@ async def run_once(self) -> DecisionCycleResult: ) trades = self._create_trades(tx_results, compose_id, timestamp_ms) - self.portfolio_service.apply_trades(trades, market_snapshot) + self.portfolio_service.apply_trades(trades, market_features) summary = self.build_summary(timestamp_ms, trades) history_records = self._create_history_records( diff --git a/python/valuecell/agents/strategy_agent/decision/composer.py b/python/valuecell/agents/strategy_agent/decision/composer.py index 3684e3119..3ca8c4ddc 100644 --- a/python/valuecell/agents/strategy_agent/decision/composer.py +++ b/python/valuecell/agents/strategy_agent/decision/composer.py @@ -19,7 +19,7 @@ MarketType, TradeInstruction, TradeSide, - UserRequest, + UserRequest, FeatureVector, ) from ..utils import extract_price_map, send_discord_message from .interfaces import Composer @@ -115,69 +115,78 @@ def _prune_none(obj): return [v for v in pruned if v not in (None, {}, [])] return obj - def _compact_market_snapshot(self, snapshot: Dict) -> Dict: - """Extract decision-critical fields from market snapshot. + def _extract_market_section(self, market_data: List[Dict]) -> Dict: + """Extract decision-critical metrics from market feature entries.""" - Reduces ~70% token usage while preserving key signals. - """ - compact = {} - for symbol, data in snapshot.items(): - if not isinstance(data, dict): + compact: Dict[str, Dict] = {} + for item in market_data: + symbol = (item.get("instrument") or {}).get("symbol") + if not symbol: continue - entry = {} - # Price action - if price := data.get("price"): - if isinstance(price, dict): - entry["last"] = price.get("last") or price.get("close") - entry["change_pct"] = price.get("percentage") - entry["volume_24h"] = price.get("baseVolume") - - # Open interest - if oi := data.get("open_interest"): - if isinstance(oi, dict): - entry["open_interest"] = oi.get("openInterestAmount") or oi.get( - "baseVolume" - ) + values = item.get("values") or {} + entry: Dict[str, float] = {} + + for feature_key, alias in ( + ("price.last", "last"), + ("price.close", "close"), + ("price.open", "open"), + ("price.high", "high"), + ("price.low", "low"), + ("price.bid", "bid"), + ("price.ask", "ask"), + ("price.change_pct", "change_pct"), + ("price.volume", "volume"), + ): + if feature_key in values and values[feature_key] is not None: + entry[alias] = values[feature_key] - # Funding rate - if fr := data.get("funding_rate"): - if isinstance(fr, dict): - entry["funding_rate"] = fr.get("fundingRate") - entry["mark_price"] = fr.get("markPrice") + if values.get("open_interest") is not None: + entry["open_interest"] = values["open_interest"] - if entry: - compact[symbol] = {k: v for k, v in entry.items() if v is not None} + if values.get("funding.rate") is not None: + entry["funding_rate"] = values["funding.rate"] + if values.get("funding.mark_price") is not None: + entry["mark_price"] = values["funding.mark_price"] + + normalized = {k: v for k, v in entry.items() if v is not None} + if normalized: + compact[symbol] = normalized return compact - def _organize_features(self, features: List) -> Dict: - """Organize features by interval and remove redundant metadata. + def _organize_features(self, features: List[FeatureVector]) -> Dict: + """Organize features by grouping metadata and trim payload noise. - Dynamically groups features by their interval (e.g., 1s, 1m, 5m, 15m). + Prefers the FeatureVector.meta group_by_key when present, otherwise + falls back to the interval tag. This allows callers to introduce + ad-hoc groupings (e.g., market snapshots) without overloading the + interval field. """ - by_interval = {} + grouped: Dict[str, List] = {} for fv in features: data = fv.model_dump(mode="json") - interval = data.get("meta", {}).get("interval", "") + meta = data.get("meta") or {} + group_key = meta.get("group_by_key") - if not interval: + if not group_key: continue - # Remove window timestamps (not useful for LLM) - if "meta" in data: - data["meta"] = { - "interval": interval, - "count": data["meta"].get("count"), - } + # Keep only concise metadata helpful for the LLM prompt. + trimmed_meta = {} + if meta.get("interval"): + trimmed_meta["interval"] = meta["interval"] + if meta.get("count") is not None: + trimmed_meta["count"] = meta["count"] + if trimmed_meta: + data["meta"] = trimmed_meta + else: + data.pop("meta", None) - # Group by interval - if interval not in by_interval: - by_interval[interval] = [] - by_interval[interval].append(data) + grouped.setdefault(group_key, []).append(data) - return by_interval + return grouped def _build_summary(self, context: ComposeContext) -> Dict: """Build portfolio summary with risk metrics.""" @@ -210,8 +219,8 @@ def _build_llm_prompt(self, context: ComposeContext) -> str: # Build components summary = self._build_summary(context) - market = self._compact_market_snapshot(context.market_snapshot or {}) features = self._organize_features(context.features) + market = self._extract_market_section(features.get("market_snapshot", [])) # Portfolio positions positions = [ @@ -373,7 +382,7 @@ def _init_buying_power_context( ) # Initialize projected gross exposure - price_map = extract_price_map(context.market_snapshot or {}) + price_map = extract_price_map(context.features) if getattr(context.portfolio, "gross_exposure", None) is not None: projected_gross = float(context.portfolio.gross_exposure or 0.0) else: @@ -792,7 +801,7 @@ def _apply_quantity_filters( min_trade_qty: float, max_order_qty: Optional[float], min_notional: Optional[float], - market_snapshot: Dict[str, float], + price_map: Dict[str, float], ) -> float: qty = quantity logger.debug(f"Filtering {symbol}: initial qty={qty}") @@ -816,9 +825,9 @@ def _apply_quantity_filters( return 0.0 if min_notional is not None: - price = market_snapshot.get(symbol) + price = price_map.get(symbol) if price is None: - logger.warning(f"FILTERED: {symbol} no price in market_snapshot") + logger.warning(f"FILTERED: {symbol} no price reference available") return 0.0 notional = qty * price if notional < float(min_notional): diff --git a/python/valuecell/agents/strategy_agent/decision/system_prompt.py b/python/valuecell/agents/strategy_agent/decision/system_prompt.py index efbba017d..6ae7ae8ca 100644 --- a/python/valuecell/agents/strategy_agent/decision/system_prompt.py +++ b/python/valuecell/agents/strategy_agent/decision/system_prompt.py @@ -32,12 +32,14 @@ 3) Prefer fewer, higher-quality actions when signals are mixed. 4) When in doubt or edge is weak, choose noop. -MARKET SNAPSHOT -The `market_snapshot` provided in the Context is an authoritative, per-cycle reference issued by the data source. It is a mapping of symbol -> object with lightweight numeric fields (when available): +MARKET FEATURES +The Context includes `features.market_snapshot`: a compact, per-cycle bundle of references derived from the latest exchange snapshot. Each item corresponds to a tradable symbol and may include: -- `price`: a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market -- `open_interest`: open interest value (float) when available from the exchange (contracts or quote-ccy depending on exchange). Use it as a signal for liquidity and positioning interest, but treat units as exchange-specific. -- `funding_rate`: latest funding rate (decimal, e.g., 0.0001) when available. Use it to reason about carry costs for leveraged positions. +- `price.last`, `price.open`, `price.high`, `price.low`, `price.bid`, `price.ask`, `price.change_pct`, `price.volume` +- `open_interest`: liquidity / positioning interest indicator (units exchange-specific) +- `funding.rate`, `funding.mark_price`: carry cost context for perpetual swaps + +Treat these metrics as authoritative for the current decision loop. When missing, assume the datum is unavailable—do not infer. PERFORMANCE FEEDBACK & ADAPTIVE BEHAVIOR You will receive a Sharpe Ratio at each invocation (in Context.summary.sharpe_ratio): diff --git a/python/valuecell/agents/strategy_agent/execution/ccxt_trading.py b/python/valuecell/agents/strategy_agent/execution/ccxt_trading.py index f41a00120..f32c5a4a7 100644 --- a/python/valuecell/agents/strategy_agent/execution/ccxt_trading.py +++ b/python/valuecell/agents/strategy_agent/execution/ccxt_trading.py @@ -16,10 +16,14 @@ from loguru import logger from ..models import ( - MarketSnapShotType, + FeatureVector, + MarketType, + MarginMode, PriceMode, TradeInstruction, TradeSide, + TradeType, + TradingMode, TxResult, TxStatus, derive_side_from_action, @@ -497,13 +501,13 @@ async def _get_free_usdt_binance(self, exchange: ccxt.Exchange) -> Optional[floa async def execute( self, instructions: List[TradeInstruction], - market_snapshot: Optional[MarketSnapShotType] = None, + market_features: Optional[List[FeatureVector]] = None, ) -> List[TxResult]: """Execute trade instructions on the real exchange via CCXT. Args: instructions: List of trade instructions to execute - market_snapshot: Optional market snapshot (not used for real execution) + market_features: Optional market features (not used for real execution) Returns: List of transaction results with fill details diff --git a/python/valuecell/agents/strategy_agent/execution/interfaces.py b/python/valuecell/agents/strategy_agent/execution/interfaces.py index 26dee18c5..d503cfd86 100644 --- a/python/valuecell/agents/strategy_agent/execution/interfaces.py +++ b/python/valuecell/agents/strategy_agent/execution/interfaces.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from typing import List, Optional -from ..models import MarketSnapShotType, TradeInstruction, TxResult +from ..models import FeatureVector, TradeInstruction, TxResult # Contracts for execution gateways (module-local abstract interfaces). # An implementation may route to a real exchange or a paper broker. @@ -16,13 +16,13 @@ class ExecutionGateway(ABC): async def execute( self, instructions: List[TradeInstruction], - market_snapshot: Optional[MarketSnapShotType] = None, + market_features: Optional[List[FeatureVector]] = None, ) -> List[TxResult]: """Execute the provided instructions and return TxResult items. Notes: - Implementations may simulate fills (paper) or submit to a real exchange. - - market_snapshot is optional context for pricing simulations. + - market_features contains interval="market" FeatureVector entries for pricing. - Lifecycle (partial fills, cancels) can be represented with PARTIAL/REJECTED. """ diff --git a/python/valuecell/agents/strategy_agent/execution/paper_trading.py b/python/valuecell/agents/strategy_agent/execution/paper_trading.py index c57e9f140..54df5bd1f 100644 --- a/python/valuecell/agents/strategy_agent/execution/paper_trading.py +++ b/python/valuecell/agents/strategy_agent/execution/paper_trading.py @@ -1,12 +1,6 @@ from typing import List, Optional -from ..models import ( - MarketSnapShotType, - TradeInstruction, - TradeSide, - TxResult, - derive_side_from_action, -) +from ..models import FeatureVector, TradeInstruction, TradeSide, TxResult, derive_side_from_action from ..utils import extract_price_map from .interfaces import ExecutionGateway @@ -26,10 +20,10 @@ def __init__(self, fee_bps: float = 10.0) -> None: async def execute( self, instructions: List[TradeInstruction], - market_snapshot: Optional[MarketSnapShotType] = None, + market_features: Optional[List[FeatureVector]] = None, ) -> List[TxResult]: results: List[TxResult] = [] - price_map = extract_price_map(market_snapshot or {}) + price_map = extract_price_map(market_features or []) for inst in instructions: self.executed.append(inst) ref_price = float(price_map.get(inst.instrument.symbol, 0.0) or 0.0) diff --git a/python/valuecell/agents/strategy_agent/features/candle.py b/python/valuecell/agents/strategy_agent/features/candle.py index e5c80739b..bb55693e5 100644 --- a/python/valuecell/agents/strategy_agent/features/candle.py +++ b/python/valuecell/agents/strategy_agent/features/candle.py @@ -132,8 +132,10 @@ def compute_features( # Build feature meta window_start_ts = int(rows[0]["ts"]) if rows else int(last["ts"]) window_end_ts = int(last["ts"]) + interval = series[-1].interval fv_meta = { - "interval": series[-1].interval, + "group_by_key": f"interval_{interval}", + "interval": interval, "count": len(series), "window_start_ts": window_start_ts, "window_end_ts": window_end_ts, diff --git a/python/valuecell/agents/strategy_agent/features/interfaces.py b/python/valuecell/agents/strategy_agent/features/interfaces.py index 2d2044380..4c69b5449 100644 --- a/python/valuecell/agents/strategy_agent/features/interfaces.py +++ b/python/valuecell/agents/strategy_agent/features/interfaces.py @@ -4,7 +4,7 @@ from dataclasses import dataclass from typing import Any, Dict, List, Optional -from ..models import Candle, FeatureVector, MarketSnapShotType +from ..models import Candle, FeatureVector # Contracts for feature computation (module-local abstract interfaces). # Plain ABCs (not Pydantic) to keep implementations lightweight. @@ -41,15 +41,14 @@ class FeaturesPipelineResult: """Result of running a features pipeline.""" features: List[FeatureVector] - market_snapshot: MarketSnapShotType class FeaturesPipeline(ABC): - """Abstract pipeline that produces features and supporting market context.""" + """Abstract pipeline that produces feature vectors (including market features).""" @abstractmethod async def build(self) -> FeaturesPipelineResult: - """Compute feature vectors and associated market snapshot. + """Compute feature vectors and return them. Implementations should use their configured request/inputs to determine which symbols to process; callers should not pass runtime parameters diff --git a/python/valuecell/agents/strategy_agent/features/market_snapshot.py b/python/valuecell/agents/strategy_agent/features/market_snapshot.py new file mode 100644 index 000000000..4883c7315 --- /dev/null +++ b/python/valuecell/agents/strategy_agent/features/market_snapshot.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from typing import Dict, List + +from ..models import FeatureVector, InstrumentRef, MarketSnapShotType +from ..utils import get_current_timestamp_ms + + +class MarketSnapshotFeatureComputer: + """Convert exchange market_snapshot structures into FeatureVector items. + + This class encapsulates the logic previously embedded in + `DefaultFeaturesPipeline._build_market_features`. Keeping it separate + makes the pipeline easier to test and replace. + """ + + def build(self, market_snapshot: MarketSnapShotType, exchange_id: str) -> List[FeatureVector]: + features: List[FeatureVector] = [] + now_ts = get_current_timestamp_ms() + + for symbol, data in (market_snapshot or {}).items(): + if not isinstance(data, dict): + continue + + price_obj = data.get("price") if isinstance(data, dict) else None + timestamp = None + values: Dict[str, float] = {} + + if isinstance(price_obj, dict): + timestamp = price_obj.get("timestamp") or price_obj.get("ts") + for key in ("last", "close", "open", "high", "low", "bid", "ask"): + val = price_obj.get(key) + if val is not None: + try: + values[f"price.{key}"] = float(val) + except (TypeError, ValueError): + continue + + change = price_obj.get("percentage") + if change is not None: + try: + values["price.change_pct"] = float(change) + except (TypeError, ValueError): + pass + + volume = price_obj.get("quoteVolume") or price_obj.get("baseVolume") + if volume is not None: + try: + values["price.volume"] = float(volume) + except (TypeError, ValueError): + pass + + if isinstance(data.get("open_interest"), dict): + oi = data["open_interest"] + for field in ("openInterest", "openInterestAmount", "baseVolume"): + val = oi.get(field) + if val is not None: + try: + values["open_interest"] = float(val) + except (TypeError, ValueError): + pass + break + + if isinstance(data.get("funding_rate"), dict): + fr = data["funding_rate"] + rate = fr.get("fundingRate") or fr.get("funding_rate") + if rate is not None: + try: + values["funding.rate"] = float(rate) + except (TypeError, ValueError): + pass + mark_price = fr.get("markPrice") or fr.get("mark_price") + if mark_price is not None: + try: + values["funding.mark_price"] = float(mark_price) + except (TypeError, ValueError): + pass + + if not values: + continue + + fv_ts = int(timestamp) if timestamp is not None else now_ts + feature = FeatureVector( + ts=int(fv_ts), + instrument=InstrumentRef(symbol=symbol, exchange_id=exchange_id), + values=values, + meta={ + "group_by_key": "market_snapshot", + }, + ) + features.append(feature) + + return features diff --git a/python/valuecell/agents/strategy_agent/features/pipeline.py b/python/valuecell/agents/strategy_agent/features/pipeline.py index 7e49755a4..745586330 100644 --- a/python/valuecell/agents/strategy_agent/features/pipeline.py +++ b/python/valuecell/agents/strategy_agent/features/pipeline.py @@ -19,6 +19,7 @@ FeaturesPipeline, FeaturesPipelineResult, ) +from .market_snapshot import MarketSnapshotFeatureComputer class DefaultFeaturesPipeline(FeaturesPipeline): @@ -29,7 +30,8 @@ def __init__( *, request: UserRequest, market_data_source: MarketDataSource, - feature_computer: CandleBasedFeatureComputer, + candle_feature_computer: CandleBasedFeatureComputer, + market_snapshot_computer: MarketSnapshotFeatureComputer, micro_interval: str = "1s", micro_lookback: int = 60 * 3, medium_interval: str = "1m", @@ -37,15 +39,16 @@ def __init__( ) -> None: self._request = request self._market_data_source = market_data_source - self._feature_computer = feature_computer + self._candle_feature_computer = candle_feature_computer self._micro_interval = micro_interval self._micro_lookback = micro_lookback self._medium_interval = medium_interval self._medium_lookback = medium_lookback self._symbols = list(dict.fromkeys(request.trading_config.symbols)) + self._market_snapshot_computer = market_snapshot_computer async def build(self) -> FeaturesPipelineResult: - """Fetch candles, compute feature vectors, and return market snapshot.""" + """Fetch candles, compute feature vectors, and append market features.""" # Determine symbols from the configured request so caller doesn't pass them candles_micro = await self._market_data_source.get_recent_candles( self._symbols, self._micro_interval, self._micro_lookback @@ -68,9 +71,12 @@ async def build(self) -> FeaturesPipelineResult: ) market_snapshot = market_snapshot or {} - return FeaturesPipelineResult( - features=features, market_snapshot=market_snapshot + market_features = self._market_snapshot_computer.build( + market_snapshot, self._request.exchange_config.exchange_id ) + features.extend(market_features) + + return FeaturesPipelineResult(features=features) @classmethod def from_request(cls, request: UserRequest) -> DefaultFeaturesPipeline: @@ -78,9 +84,11 @@ def from_request(cls, request: UserRequest) -> DefaultFeaturesPipeline: market_data_source = SimpleMarketDataSource( exchange_id=request.exchange_config.exchange_id ) - feature_computer = SimpleCandleFeatureComputer() + candle_feature_computer = SimpleCandleFeatureComputer() + market_snapshot_computer = MarketSnapshotFeatureComputer() return cls( request=request, market_data_source=market_data_source, - feature_computer=feature_computer, + feature_computer=candle_feature_computer, + market_snapshot_computer=market_snapshot_computer, ) diff --git a/python/valuecell/agents/strategy_agent/models.py b/python/valuecell/agents/strategy_agent/models.py index 19708c465..d1a4656e5 100644 --- a/python/valuecell/agents/strategy_agent/models.py +++ b/python/valuecell/agents/strategy_agent/models.py @@ -679,9 +679,6 @@ class ComposeContext(BaseModel): ) portfolio: PortfolioView digest: "TradeDigest" - market_snapshot: MarketSnapShotType = Field( - default=None, description="Optional map symbol -> current reference price" - ) class HistoryRecord(BaseModel): diff --git a/python/valuecell/agents/strategy_agent/portfolio/in_memory.py b/python/valuecell/agents/strategy_agent/portfolio/in_memory.py index de8fd4296..e59caa58c 100644 --- a/python/valuecell/agents/strategy_agent/portfolio/in_memory.py +++ b/python/valuecell/agents/strategy_agent/portfolio/in_memory.py @@ -3,7 +3,7 @@ from ..models import ( Constraints, - MarketSnapShotType, + FeatureVector, MarketType, PortfolioView, PositionSnapshot, @@ -70,7 +70,7 @@ def get_view(self) -> PortfolioView: return self._view def apply_trades( - self, trades: List[TradeHistoryEntry], market_snapshot: MarketSnapShotType + self, trades: List[TradeHistoryEntry], market_features: List[FeatureVector] ) -> None: """Apply trades and update portfolio positions and aggregates. @@ -81,8 +81,8 @@ def apply_trades( backward compatibility) - portfolio aggregates: gross_exposure, net_exposure, total_value (equity), total_unrealized_pnl, buying_power """ - # Extract price map from new market snapshot structure - price_map = extract_price_map(market_snapshot) + # Extract price map from market feature bundle + price_map = extract_price_map(market_features) for trade in trades: symbol = trade.instrument.symbol diff --git a/python/valuecell/agents/strategy_agent/portfolio/interfaces.py b/python/valuecell/agents/strategy_agent/portfolio/interfaces.py index fdb4dc1cf..49c8a39a2 100644 --- a/python/valuecell/agents/strategy_agent/portfolio/interfaces.py +++ b/python/valuecell/agents/strategy_agent/portfolio/interfaces.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from typing import List, Optional -from ..models import MarketSnapShotType, PortfolioView, TradeHistoryEntry +from ..models import FeatureVector, PortfolioView, TradeHistoryEntry class PortfolioService(ABC): @@ -18,14 +18,15 @@ def get_view(self) -> PortfolioView: raise NotImplementedError def apply_trades( - self, trades: List[TradeHistoryEntry], market_snapshot: MarketSnapShotType + self, trades: List[TradeHistoryEntry], market_features: List[FeatureVector] ) -> None: """Apply executed trades to the portfolio view (optional). Implementations that support state changes (paper trading, backtests) - should update their internal view accordingly. This method is optional - for read-only portfolio services, but providing it here makes the - contract explicit to callers. + should update their internal view accordingly. `market_features` + contains interval="market" vectors for price references. This method + is optional for read-only portfolio services, but providing it here + makes the contract explicit to callers. """ raise NotImplementedError diff --git a/python/valuecell/agents/strategy_agent/utils.py b/python/valuecell/agents/strategy_agent/utils.py index db8201c89..e86ff320a 100644 --- a/python/valuecell/agents/strategy_agent/utils.py +++ b/python/valuecell/agents/strategy_agent/utils.py @@ -1,11 +1,13 @@ import os from datetime import datetime, timezone -from typing import Dict, Optional +from typing import Dict, List, Optional import ccxt.pro as ccxtpro import httpx from loguru import logger +from .models import FeatureVector + def get_current_timestamp_ms() -> int: """Get current timestamp in milliseconds.""" @@ -69,36 +71,40 @@ async def fetch_free_cash_from_gateway(execution_gateway, symbols: list[str]) -> return float(free_cash) -def extract_price_map(market_snapshot: Dict) -> Dict[str, float]: - """Extract a simple symbol -> price mapping from market snapshot structure. - - The market snapshot structure is: - { - "BTC/USDT:USDT": { - "price": {ticker dict with "last", "close", etc.}, - "open_interest": {...}, - "funding_rate": {...} - } - } +def extract_price_map(features: List[FeatureVector]) -> Dict[str, float]: + """Extract symbol -> price map from market snapshot feature vectors.""" - Returns: - Dict[symbol, last_price] for internal use in quantity normalization. - """ price_map: Dict[str, float] = {} - for symbol, data in market_snapshot.items(): - if not isinstance(data, dict): + + for item in features: + if not isinstance(item, FeatureVector): continue - price_obj = data.get("price") - if isinstance(price_obj, dict): - # Prefer "last" over "close" for real-time pricing - last_price = price_obj.get("last") or price_obj.get("close") - if last_price is not None: - try: - price_map[symbol] = float(last_price) - except (ValueError, TypeError): - logger.warning( - "Failed to parse price for {}: {}", symbol, last_price - ) + + meta = item.meta or {} + group_key = meta.get("group_by_key") + if group_key != "market_snapshot": + continue + + instrument = getattr(item, "instrument", None) + symbol = getattr(instrument, "symbol", None) + if not symbol: + continue + + values = item.values or {} + price = ( + values.get("price.last") + or values.get("price.close") + or values.get("price.mark") + or values.get("funding.mark_price") + ) + if price is None: + continue + + try: + price_map[symbol] = float(price) + except (TypeError, ValueError): + logger.warning("Failed to parse feature price for {}", symbol) + return price_map From 52b38856a3930a5cbc4a5efae58ac0eea53e7839 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Wed, 19 Nov 2025 16:07:23 +0800 Subject: [PATCH 06/15] refactor: update feature computer references in DefaultFeaturesPipeline --- python/valuecell/agents/strategy_agent/features/pipeline.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/valuecell/agents/strategy_agent/features/pipeline.py b/python/valuecell/agents/strategy_agent/features/pipeline.py index 745586330..90d25e642 100644 --- a/python/valuecell/agents/strategy_agent/features/pipeline.py +++ b/python/valuecell/agents/strategy_agent/features/pipeline.py @@ -53,12 +53,12 @@ async def build(self) -> FeaturesPipelineResult: candles_micro = await self._market_data_source.get_recent_candles( self._symbols, self._micro_interval, self._micro_lookback ) - micro_features = self._feature_computer.compute_features(candles=candles_micro) + micro_features = self._candle_feature_computer.compute_features(candles=candles_micro) candles_medium = await self._market_data_source.get_recent_candles( self._symbols, self._medium_interval, self._medium_lookback ) - medium_features = self._feature_computer.compute_features( + medium_features = self._candle_feature_computer.compute_features( candles=candles_medium ) @@ -89,6 +89,6 @@ def from_request(cls, request: UserRequest) -> DefaultFeaturesPipeline: return cls( request=request, market_data_source=market_data_source, - feature_computer=candle_feature_computer, + candle_feature_computer=candle_feature_computer, market_snapshot_computer=market_snapshot_computer, ) From e207b98e24f5620cf43e6f3665efa7e4974e62c4 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Wed, 19 Nov 2025 16:23:43 +0800 Subject: [PATCH 07/15] reorganize module structure --- .../configs/agent_cards/strategy_agent.json | 2 +- .../{strategy_agent => common}/__init__.py | 0 .../trading}/README.md | 0 .../data => common/trading}/__init__.py | 0 .../trading}/_internal/__init__.py | 0 .../trading/_internal/coordinator.py} | 52 +++++++------------ .../trading/_internal}/runtime.py | 35 +++++++------ .../trading}/_internal/stream_controller.py | 9 ++-- .../agent.py => common/trading/base_agent.py} | 24 +++++---- .../trading}/constants.py | 0 .../trading/data}/__init__.py | 0 .../trading}/data/interfaces.py | 4 +- .../trading}/data/market.py | 13 +++-- .../trading}/data/news.py | 0 .../common/trading/decision/__init__.py | 6 +++ .../trading}/decision/interfaces.py | 16 ++---- .../decision/prompt_based}/__init__.py | 0 .../decision/prompt_based}/composer.py | 21 +++++--- .../decision/prompt_based}/system_prompt.py | 0 .../trading}/execution/__init__.py | 4 +- .../trading}/execution/ccxt_trading.py | 11 ++-- .../trading}/execution/factory.py | 8 +-- .../trading}/execution/interfaces.py | 8 ++- .../trading}/execution/paper_trading.py | 15 ++++-- .../common/trading/features/__init__.py | 6 +++ .../trading}/features/candle.py | 3 +- .../trading}/features/interfaces.py | 16 +++--- .../trading}/features/market_snapshot.py | 12 +++-- .../trading}/features/multimodal.py | 0 .../trading}/features/news.py | 0 .../trading}/features/pipeline.py | 20 ++++--- .../trading}/models.py | 32 +++++++++++- .../trading}/portfolio/__init__.py | 0 .../trading}/portfolio/in_memory.py | 9 ++-- .../trading}/portfolio/interfaces.py | 10 ++-- .../trading/trading_history/__init__.py | 12 +++++ .../trading}/trading_history/digest.py | 12 +++-- .../trading}/trading_history/interfaces.py | 6 +-- .../trading}/trading_history/recorder.py | 7 +-- .../trading}/utils.py | 2 +- .../__init__.py | 0 .../__main__.py | 2 +- .../core.py} | 19 ++++--- .../templates/aggressive.txt | 0 .../templates/default.txt | 0 .../templates/insane.txt | 0 python/valuecell/server/api/routers/models.py | 2 +- .../server/api/routers/strategy_agent.py | 2 +- python/valuecell/server/db/init_db.py | 2 +- .../server/services/strategy_persistence.py | 2 +- 50 files changed, 243 insertions(+), 161 deletions(-) rename python/valuecell/agents/{strategy_agent => common}/__init__.py (100%) rename python/valuecell/agents/{strategy_agent => common/trading}/README.md (100%) rename python/valuecell/agents/{strategy_agent/data => common/trading}/__init__.py (100%) rename python/valuecell/agents/{strategy_agent => common/trading}/_internal/__init__.py (100%) rename python/valuecell/agents/{strategy_agent/core.py => common/trading/_internal/coordinator.py} (95%) rename python/valuecell/agents/{strategy_agent => common/trading/_internal}/runtime.py (83%) rename python/valuecell/agents/{strategy_agent => common/trading}/_internal/stream_controller.py (96%) rename python/valuecell/agents/{strategy_agent/agent.py => common/trading/base_agent.py} (93%) rename python/valuecell/agents/{strategy_agent => common/trading}/constants.py (100%) rename python/valuecell/agents/{strategy_agent/decision => common/trading/data}/__init__.py (100%) rename python/valuecell/agents/{strategy_agent => common/trading}/data/interfaces.py (93%) rename python/valuecell/agents/{strategy_agent => common/trading}/data/market.py (96%) rename python/valuecell/agents/{strategy_agent => common/trading}/data/news.py (100%) create mode 100644 python/valuecell/agents/common/trading/decision/__init__.py rename python/valuecell/agents/{strategy_agent => common/trading}/decision/interfaces.py (75%) rename python/valuecell/agents/{strategy_agent/features => common/trading/decision/prompt_based}/__init__.py (100%) rename python/valuecell/agents/{strategy_agent/decision => common/trading/decision/prompt_based}/composer.py (99%) rename python/valuecell/agents/{strategy_agent/decision => common/trading/decision/prompt_based}/system_prompt.py (100%) rename python/valuecell/agents/{strategy_agent => common/trading}/execution/__init__.py (85%) rename python/valuecell/agents/{strategy_agent => common/trading}/execution/ccxt_trading.py (99%) rename python/valuecell/agents/{strategy_agent => common/trading}/execution/factory.py (90%) rename python/valuecell/agents/{strategy_agent => common/trading}/execution/interfaces.py (90%) rename python/valuecell/agents/{strategy_agent => common/trading}/execution/paper_trading.py (88%) create mode 100644 python/valuecell/agents/common/trading/features/__init__.py rename python/valuecell/agents/{strategy_agent => common/trading}/features/candle.py (98%) rename python/valuecell/agents/{strategy_agent => common/trading}/features/interfaces.py (87%) rename python/valuecell/agents/{strategy_agent => common/trading}/features/market_snapshot.py (91%) rename python/valuecell/agents/{strategy_agent => common/trading}/features/multimodal.py (100%) rename python/valuecell/agents/{strategy_agent => common/trading}/features/news.py (100%) rename python/valuecell/agents/{strategy_agent => common/trading}/features/pipeline.py (91%) rename python/valuecell/agents/{strategy_agent => common/trading}/models.py (97%) rename python/valuecell/agents/{strategy_agent => common/trading}/portfolio/__init__.py (100%) rename python/valuecell/agents/{strategy_agent => common/trading}/portfolio/in_memory.py (98%) rename python/valuecell/agents/{strategy_agent => common/trading}/portfolio/interfaces.py (88%) create mode 100644 python/valuecell/agents/common/trading/trading_history/__init__.py rename python/valuecell/agents/{strategy_agent => common/trading}/trading_history/digest.py (97%) rename python/valuecell/agents/{strategy_agent => common/trading}/trading_history/interfaces.py (85%) rename python/valuecell/agents/{strategy_agent => common/trading}/trading_history/recorder.py (76%) rename python/valuecell/agents/{strategy_agent => common/trading}/utils.py (98%) rename python/valuecell/agents/{strategy_agent/trading_history => prompt_strategy_agent}/__init__.py (100%) rename python/valuecell/agents/{strategy_agent => prompt_strategy_agent}/__main__.py (81%) rename python/valuecell/agents/{strategy_agent/prompt_agent.py => prompt_strategy_agent/core.py} (74%) rename python/valuecell/agents/{strategy_agent => prompt_strategy_agent}/templates/aggressive.txt (100%) rename python/valuecell/agents/{strategy_agent => prompt_strategy_agent}/templates/default.txt (100%) rename python/valuecell/agents/{strategy_agent => prompt_strategy_agent}/templates/insane.txt (100%) diff --git a/python/configs/agent_cards/strategy_agent.json b/python/configs/agent_cards/strategy_agent.json index 76a78d4ba..bd6c970c2 100644 --- a/python/configs/agent_cards/strategy_agent.json +++ b/python/configs/agent_cards/strategy_agent.json @@ -26,6 +26,6 @@ "author": "ValueCell Team", "tags": ["strategy", "trading", "llm", "demo"], "notes": "This card is a lightweight example; replace model api_key and tune parameters for production use.", - "local_agent_class": "valuecell.agents.strategy_agent.prompt_agent:StrategyAgent" + "local_agent_class": "valuecell.agents.prompt_strategy_agent.core:StrategyAgent" } } diff --git a/python/valuecell/agents/strategy_agent/__init__.py b/python/valuecell/agents/common/__init__.py similarity index 100% rename from python/valuecell/agents/strategy_agent/__init__.py rename to python/valuecell/agents/common/__init__.py diff --git a/python/valuecell/agents/strategy_agent/README.md b/python/valuecell/agents/common/trading/README.md similarity index 100% rename from python/valuecell/agents/strategy_agent/README.md rename to python/valuecell/agents/common/trading/README.md diff --git a/python/valuecell/agents/strategy_agent/data/__init__.py b/python/valuecell/agents/common/trading/__init__.py similarity index 100% rename from python/valuecell/agents/strategy_agent/data/__init__.py rename to python/valuecell/agents/common/trading/__init__.py diff --git a/python/valuecell/agents/strategy_agent/_internal/__init__.py b/python/valuecell/agents/common/trading/_internal/__init__.py similarity index 100% rename from python/valuecell/agents/strategy_agent/_internal/__init__.py rename to python/valuecell/agents/common/trading/_internal/__init__.py diff --git a/python/valuecell/agents/strategy_agent/core.py b/python/valuecell/agents/common/trading/_internal/coordinator.py similarity index 95% rename from python/valuecell/agents/strategy_agent/core.py rename to python/valuecell/agents/common/trading/_internal/coordinator.py index 33ab71191..14da8ec01 100644 --- a/python/valuecell/agents/strategy_agent/core.py +++ b/python/valuecell/agents/common/trading/_internal/coordinator.py @@ -1,25 +1,23 @@ from __future__ import annotations from abc import ABC, abstractmethod -from dataclasses import dataclass -from typing import List, Optional +from typing import List from loguru import logger from valuecell.utils.uuid import generate_uuid -from .decision.interfaces import Composer -from .execution.interfaces import ExecutionGateway -from .features.interfaces import FeaturesPipeline -from .models import ( +from ..decision import BaseComposer +from ..execution import BaseExecutionGateway +from ..features.interfaces import BaseFeaturesPipeline +from ..models import ( ComposeContext, + DecisionCycleResult, FeatureVector, HistoryRecord, MarketType, - PortfolioView, StrategyStatus, StrategySummary, - TradeDigest, TradeHistoryEntry, TradeInstruction, TradeSide, @@ -29,30 +27,16 @@ TxStatus, UserRequest, ) -from .portfolio.interfaces import PortfolioService -from .trading_history.interfaces import DigestBuilder, HistoryRecorder -from .utils import ( +from ..portfolio.interfaces import BasePortfolioService +from ..trading_history import ( + BaseDigestBuilder, + BaseHistoryRecorder, +) +from ..utils import ( fetch_free_cash_from_gateway, get_current_timestamp_ms, ) - -@dataclass -class DecisionCycleResult: - """Outcome of a single decision cycle.""" - - compose_id: str - timestamp_ms: int - cycle_index: int - rationale: Optional[str] - strategy_summary: StrategySummary - instructions: List[TradeInstruction] - trades: List[TradeHistoryEntry] - history_records: List[HistoryRecord] - digest: TradeDigest - portfolio_view: PortfolioView - - # Core interfaces for orchestration and portfolio service. # Plain ABCs to avoid runtime dependencies on pydantic. Concrete implementations # wire the pipeline: data -> features -> composer -> execution -> history/digest. @@ -89,12 +73,12 @@ def __init__( *, request: UserRequest, strategy_id: str, - portfolio_service: PortfolioService, - features_pipeline: FeaturesPipeline, - composer: Composer, - execution_gateway: ExecutionGateway, - history_recorder: HistoryRecorder, - digest_builder: DigestBuilder, + portfolio_service: BasePortfolioService, + features_pipeline: BaseFeaturesPipeline, + composer: BaseComposer, + execution_gateway: BaseExecutionGateway, + history_recorder: BaseHistoryRecorder, + digest_builder: BaseDigestBuilder, ) -> None: self._request = request self.strategy_id = strategy_id diff --git a/python/valuecell/agents/strategy_agent/runtime.py b/python/valuecell/agents/common/trading/_internal/runtime.py similarity index 83% rename from python/valuecell/agents/strategy_agent/runtime.py rename to python/valuecell/agents/common/trading/_internal/runtime.py index ebc871cd7..6c3137f07 100644 --- a/python/valuecell/agents/strategy_agent/runtime.py +++ b/python/valuecell/agents/common/trading/_internal/runtime.py @@ -3,21 +3,22 @@ from valuecell.utils.uuid import generate_uuid -from .core import DecisionCycleResult, DefaultDecisionCoordinator -from .decision.composer import LlmComposer -from .decision.interfaces import Composer -from .execution.factory import create_execution_gateway -from .execution.interfaces import ExecutionGateway -from .features.interfaces import FeaturesPipeline -from .features.pipeline import DefaultFeaturesPipeline -from .models import Constraints, TradingMode, UserRequest -from .portfolio.in_memory import InMemoryPortfolioService -from .trading_history.digest import RollingDigestBuilder -from .trading_history.recorder import InMemoryHistoryRecorder -from .utils import fetch_free_cash_from_gateway - - -async def _create_execution_gateway(request: UserRequest) -> ExecutionGateway: +from ..decision import BaseComposer, LlmComposer +from ..execution import BaseExecutionGateway +from ..execution.factory import create_execution_gateway +from ..features import DefaultFeaturesPipeline +from ..features.interfaces import BaseFeaturesPipeline +from ..models import Constraints, DecisionCycleResult, TradingMode, UserRequest +from ..portfolio.in_memory import InMemoryPortfolioService +from ..trading_history import ( + InMemoryHistoryRecorder, + RollingDigestBuilder, +) +from ..utils import fetch_free_cash_from_gateway +from .coordinator import DefaultDecisionCoordinator + + +async def _create_execution_gateway(request: UserRequest) -> BaseExecutionGateway: """Create execution gateway asynchronously, handling LIVE mode balance fetching.""" execution_gateway = await create_execution_gateway(request.exchange_config) @@ -47,8 +48,8 @@ async def run_cycle(self) -> DecisionCycleResult: async def create_strategy_runtime( request: UserRequest, - composer: Optional[Composer] = None, - features_pipeline: Optional[FeaturesPipeline] = None, + composer: Optional[BaseComposer] = None, + features_pipeline: Optional[BaseFeaturesPipeline] = None, ) -> StrategyRuntime: """Create a strategy runtime with async initialization (supports both paper and live trading). diff --git a/python/valuecell/agents/strategy_agent/_internal/stream_controller.py b/python/valuecell/agents/common/trading/_internal/stream_controller.py similarity index 96% rename from python/valuecell/agents/strategy_agent/_internal/stream_controller.py rename to python/valuecell/agents/common/trading/_internal/stream_controller.py index b3eb2c8be..ff24f267c 100644 --- a/python/valuecell/agents/strategy_agent/_internal/stream_controller.py +++ b/python/valuecell/agents/common/trading/_internal/stream_controller.py @@ -14,13 +14,14 @@ from loguru import logger +from valuecell.agents.common.trading.utils import get_current_timestamp_ms from valuecell.server.services import strategy_persistence -from ..utils import get_current_timestamp_ms - if TYPE_CHECKING: - from ..core import DecisionCycleResult - from ..runtime import StrategyRuntime + from valuecell.agents.common.trading._internal.coordinator import ( + DecisionCycleResult, + ) + from valuecell.agents.common.trading._internal.runtime import StrategyRuntime class ControllerState(str, Enum): diff --git a/python/valuecell/agents/strategy_agent/agent.py b/python/valuecell/agents/common/trading/base_agent.py similarity index 93% rename from python/valuecell/agents/strategy_agent/agent.py rename to python/valuecell/agents/common/trading/base_agent.py index de8eaf50d..84e22c42b 100644 --- a/python/valuecell/agents/strategy_agent/agent.py +++ b/python/valuecell/agents/common/trading/base_agent.py @@ -6,22 +6,24 @@ from loguru import logger -from valuecell.core.agent.responses import streaming -from valuecell.core.types import BaseAgent, StreamResponse - -from ._internal.stream_controller import StreamController -from .models import ( +from valuecell.agents.common.trading._internal.runtime import create_strategy_runtime +from valuecell.agents.common.trading._internal.stream_controller import StreamController +from valuecell.agents.common.trading.models import ( ComponentType, StrategyStatus, StrategyStatusContent, UserRequest, ) -from .runtime import create_strategy_runtime +from valuecell.core.agent.responses import streaming +from valuecell.core.types import BaseAgent, StreamResponse if TYPE_CHECKING: - from .decision.interfaces import Composer - from .features.interfaces import FeaturesPipeline - from .runtime import DecisionCycleResult, StrategyRuntime + from valuecell.agents.common.trading._internal.runtime import ( + DecisionCycleResult, + StrategyRuntime, + ) + from valuecell.agents.common.trading.decision import Composer + from valuecell.agents.common.trading.features.interfaces import BaseFeaturesPipeline class BaseStrategyAgent(BaseAgent, ABC): @@ -41,7 +43,9 @@ class BaseStrategyAgent(BaseAgent, ABC): """ @abstractmethod - def _build_features_pipeline(self, request: UserRequest) -> FeaturesPipeline | None: + def _build_features_pipeline( + self, request: UserRequest + ) -> BaseFeaturesPipeline | None: """Build the features pipeline for the strategy. Return a `FeaturesPipeline` implementation to customize how market data diff --git a/python/valuecell/agents/strategy_agent/constants.py b/python/valuecell/agents/common/trading/constants.py similarity index 100% rename from python/valuecell/agents/strategy_agent/constants.py rename to python/valuecell/agents/common/trading/constants.py diff --git a/python/valuecell/agents/strategy_agent/decision/__init__.py b/python/valuecell/agents/common/trading/data/__init__.py similarity index 100% rename from python/valuecell/agents/strategy_agent/decision/__init__.py rename to python/valuecell/agents/common/trading/data/__init__.py diff --git a/python/valuecell/agents/strategy_agent/data/interfaces.py b/python/valuecell/agents/common/trading/data/interfaces.py similarity index 93% rename from python/valuecell/agents/strategy_agent/data/interfaces.py rename to python/valuecell/agents/common/trading/data/interfaces.py index ee2702a7e..cbad57a47 100644 --- a/python/valuecell/agents/strategy_agent/data/interfaces.py +++ b/python/valuecell/agents/common/trading/data/interfaces.py @@ -3,14 +3,14 @@ from abc import ABC, abstractmethod from typing import List -from ..models import Candle, MarketSnapShotType +from valuecell.agents.common.trading.models import Candle, MarketSnapShotType # Contracts for market data sources (module-local abstract interfaces). # These are plain ABCs (not Pydantic models) so implementations can be # synchronous or asynchronous without runtime overhead. -class MarketDataSource(ABC): +class BaseMarketDataSource(ABC): """Abstract market data access used by feature computation. Implementations should fetch recent ticks or candles for the requested diff --git a/python/valuecell/agents/strategy_agent/data/market.py b/python/valuecell/agents/common/trading/data/market.py similarity index 96% rename from python/valuecell/agents/strategy_agent/data/market.py rename to python/valuecell/agents/common/trading/data/market.py index 7612a9b95..b999a7783 100644 --- a/python/valuecell/agents/strategy_agent/data/market.py +++ b/python/valuecell/agents/common/trading/data/market.py @@ -3,12 +3,17 @@ from loguru import logger -from ..models import Candle, InstrumentRef, MarketSnapShotType -from ..utils import get_exchange_cls, normalize_symbol -from .interfaces import MarketDataSource +from valuecell.agents.common.trading.models import ( + Candle, + InstrumentRef, + MarketSnapShotType, +) +from valuecell.agents.common.trading.utils import get_exchange_cls, normalize_symbol +from .interfaces import BaseMarketDataSource -class SimpleMarketDataSource(MarketDataSource): + +class SimpleMarketDataSource(BaseMarketDataSource): """Generates synthetic candle data for each symbol or fetches via ccxt.pro. If `exchange_id` was provided at construction time and `ccxt.pro` is diff --git a/python/valuecell/agents/strategy_agent/data/news.py b/python/valuecell/agents/common/trading/data/news.py similarity index 100% rename from python/valuecell/agents/strategy_agent/data/news.py rename to python/valuecell/agents/common/trading/data/news.py diff --git a/python/valuecell/agents/common/trading/decision/__init__.py b/python/valuecell/agents/common/trading/decision/__init__.py new file mode 100644 index 000000000..4176e7676 --- /dev/null +++ b/python/valuecell/agents/common/trading/decision/__init__.py @@ -0,0 +1,6 @@ +"""Decision making components.""" + +from .interfaces import BaseComposer +from .prompt_based.composer import LlmComposer + +__all__ = ["BaseComposer", "LlmComposer"] diff --git a/python/valuecell/agents/strategy_agent/decision/interfaces.py b/python/valuecell/agents/common/trading/decision/interfaces.py similarity index 75% rename from python/valuecell/agents/strategy_agent/decision/interfaces.py rename to python/valuecell/agents/common/trading/decision/interfaces.py index 22bd195a3..bb1752094 100644 --- a/python/valuecell/agents/strategy_agent/decision/interfaces.py +++ b/python/valuecell/agents/common/trading/decision/interfaces.py @@ -1,23 +1,17 @@ from __future__ import annotations from abc import ABC, abstractmethod -from dataclasses import dataclass -from typing import List, Optional - -from ..models import ComposeContext, TradeInstruction - - -@dataclass -class ComposeResult: - instructions: List[TradeInstruction] - rationale: Optional[str] = None +from valuecell.agents.common.trading.models import ( + ComposeContext, + ComposeResult, +) # Contracts for decision making (module-local abstract interfaces). # Composer hosts the LLM call and guardrails, producing executable instructions. -class Composer(ABC): +class BaseComposer(ABC): """LLM-driven decision composer with guardrails. Input: ComposeContext diff --git a/python/valuecell/agents/strategy_agent/features/__init__.py b/python/valuecell/agents/common/trading/decision/prompt_based/__init__.py similarity index 100% rename from python/valuecell/agents/strategy_agent/features/__init__.py rename to python/valuecell/agents/common/trading/decision/prompt_based/__init__.py diff --git a/python/valuecell/agents/strategy_agent/decision/composer.py b/python/valuecell/agents/common/trading/decision/prompt_based/composer.py similarity index 99% rename from python/valuecell/agents/strategy_agent/decision/composer.py rename to python/valuecell/agents/common/trading/decision/prompt_based/composer.py index 74a357876..46c842123 100644 --- a/python/valuecell/agents/strategy_agent/decision/composer.py +++ b/python/valuecell/agents/common/trading/decision/prompt_based/composer.py @@ -8,25 +8,30 @@ from agno.agent import Agent as AgnoAgent from loguru import logger -from valuecell.utils import env as env_utils -from valuecell.utils import model as model_utils - -from ..models import ( +from valuecell.agents.common.trading.models import ( ComposeContext, + ComposeResult, Constraints, + FeatureVector, LlmDecisionAction, LlmPlanProposal, MarketType, TradeInstruction, TradeSide, - UserRequest, FeatureVector, + UserRequest, +) +from valuecell.agents.common.trading.utils import ( + extract_price_map, + send_discord_message, ) -from ..utils import extract_price_map, send_discord_message -from .interfaces import Composer, ComposeResult +from valuecell.utils import env as env_utils +from valuecell.utils import model as model_utils + +from ..interfaces import BaseComposer from .system_prompt import SYSTEM_PROMPT -class LlmComposer(Composer): +class LlmComposer(BaseComposer): """LLM-driven composer that turns context into trade instructions. The core flow follows the README design: diff --git a/python/valuecell/agents/strategy_agent/decision/system_prompt.py b/python/valuecell/agents/common/trading/decision/prompt_based/system_prompt.py similarity index 100% rename from python/valuecell/agents/strategy_agent/decision/system_prompt.py rename to python/valuecell/agents/common/trading/decision/prompt_based/system_prompt.py diff --git a/python/valuecell/agents/strategy_agent/execution/__init__.py b/python/valuecell/agents/common/trading/execution/__init__.py similarity index 85% rename from python/valuecell/agents/strategy_agent/execution/__init__.py rename to python/valuecell/agents/common/trading/execution/__init__.py index 70d26af97..c8ffaccd4 100644 --- a/python/valuecell/agents/strategy_agent/execution/__init__.py +++ b/python/valuecell/agents/common/trading/execution/__init__.py @@ -2,11 +2,11 @@ from .ccxt_trading import CCXTExecutionGateway, create_ccxt_gateway from .factory import create_execution_gateway, create_execution_gateway_sync -from .interfaces import ExecutionGateway +from .interfaces import BaseExecutionGateway from .paper_trading import PaperExecutionGateway __all__ = [ - "ExecutionGateway", + "BaseExecutionGateway", "PaperExecutionGateway", "CCXTExecutionGateway", "create_ccxt_gateway", diff --git a/python/valuecell/agents/strategy_agent/execution/ccxt_trading.py b/python/valuecell/agents/common/trading/execution/ccxt_trading.py similarity index 99% rename from python/valuecell/agents/strategy_agent/execution/ccxt_trading.py rename to python/valuecell/agents/common/trading/execution/ccxt_trading.py index f32c5a4a7..fa97fd8d6 100644 --- a/python/valuecell/agents/strategy_agent/execution/ccxt_trading.py +++ b/python/valuecell/agents/common/trading/execution/ccxt_trading.py @@ -15,23 +15,20 @@ import ccxt.async_support as ccxt from loguru import logger -from ..models import ( +from valuecell.agents.common.trading.models import ( FeatureVector, - MarketType, - MarginMode, PriceMode, TradeInstruction, TradeSide, - TradeType, - TradingMode, TxResult, TxStatus, derive_side_from_action, ) -from .interfaces import ExecutionGateway +from .interfaces import BaseExecutionGateway -class CCXTExecutionGateway(ExecutionGateway): + +class CCXTExecutionGateway(BaseExecutionGateway): """Async execution gateway using CCXT unified API for real exchanges. Features: diff --git a/python/valuecell/agents/strategy_agent/execution/factory.py b/python/valuecell/agents/common/trading/execution/factory.py similarity index 90% rename from python/valuecell/agents/strategy_agent/execution/factory.py rename to python/valuecell/agents/common/trading/execution/factory.py index 06c12dbdf..04d58f8c8 100644 --- a/python/valuecell/agents/strategy_agent/execution/factory.py +++ b/python/valuecell/agents/common/trading/execution/factory.py @@ -5,14 +5,14 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from ..models import ExchangeConfig + from valuecell.agents.common.trading.models import ExchangeConfig from .ccxt_trading import CCXTExecutionGateway -from .interfaces import ExecutionGateway +from .interfaces import BaseExecutionGateway from .paper_trading import PaperExecutionGateway -async def create_execution_gateway(config: ExchangeConfig) -> ExecutionGateway: +async def create_execution_gateway(config: ExchangeConfig) -> BaseExecutionGateway: """Create an execution gateway based on exchange configuration. Args: @@ -63,7 +63,7 @@ async def create_execution_gateway(config: ExchangeConfig) -> ExecutionGateway: raise ValueError(f"Unsupported trading mode: {config.trading_mode}") -def create_execution_gateway_sync(config: ExchangeConfig) -> ExecutionGateway: +def create_execution_gateway_sync(config: ExchangeConfig) -> BaseExecutionGateway: """Synchronous version that returns paper gateway or raises for live mode. Use this when you need a gateway immediately without async initialization. diff --git a/python/valuecell/agents/strategy_agent/execution/interfaces.py b/python/valuecell/agents/common/trading/execution/interfaces.py similarity index 90% rename from python/valuecell/agents/strategy_agent/execution/interfaces.py rename to python/valuecell/agents/common/trading/execution/interfaces.py index d503cfd86..72021c3b7 100644 --- a/python/valuecell/agents/strategy_agent/execution/interfaces.py +++ b/python/valuecell/agents/common/trading/execution/interfaces.py @@ -3,13 +3,17 @@ from abc import ABC, abstractmethod from typing import List, Optional -from ..models import FeatureVector, TradeInstruction, TxResult +from valuecell.agents.common.trading.models import ( + FeatureVector, + TradeInstruction, + TxResult, +) # Contracts for execution gateways (module-local abstract interfaces). # An implementation may route to a real exchange or a paper broker. -class ExecutionGateway(ABC): +class BaseExecutionGateway(ABC): """Executes normalized trade instructions against an exchange/broker.""" @abstractmethod diff --git a/python/valuecell/agents/strategy_agent/execution/paper_trading.py b/python/valuecell/agents/common/trading/execution/paper_trading.py similarity index 88% rename from python/valuecell/agents/strategy_agent/execution/paper_trading.py rename to python/valuecell/agents/common/trading/execution/paper_trading.py index 54df5bd1f..a0ade3d11 100644 --- a/python/valuecell/agents/strategy_agent/execution/paper_trading.py +++ b/python/valuecell/agents/common/trading/execution/paper_trading.py @@ -1,11 +1,18 @@ from typing import List, Optional -from ..models import FeatureVector, TradeInstruction, TradeSide, TxResult, derive_side_from_action -from ..utils import extract_price_map -from .interfaces import ExecutionGateway +from valuecell.agents.common.trading.models import ( + FeatureVector, + TradeInstruction, + TradeSide, + TxResult, + derive_side_from_action, +) +from valuecell.agents.common.trading.utils import extract_price_map +from .interfaces import BaseExecutionGateway -class PaperExecutionGateway(ExecutionGateway): + +class PaperExecutionGateway(BaseExecutionGateway): """Async paper executor that simulates fills with slippage and fees. - Uses instruction.max_slippage_bps to compute execution price around snapshot. diff --git a/python/valuecell/agents/common/trading/features/__init__.py b/python/valuecell/agents/common/trading/features/__init__.py new file mode 100644 index 000000000..4946b3390 --- /dev/null +++ b/python/valuecell/agents/common/trading/features/__init__.py @@ -0,0 +1,6 @@ +"""Feature computation components.""" + +from .interfaces import BaseFeaturesPipeline +from .pipeline import DefaultFeaturesPipeline + +__all__ = ["DefaultFeaturesPipeline", "BaseFeaturesPipeline"] diff --git a/python/valuecell/agents/strategy_agent/features/candle.py b/python/valuecell/agents/common/trading/features/candle.py similarity index 98% rename from python/valuecell/agents/strategy_agent/features/candle.py rename to python/valuecell/agents/common/trading/features/candle.py index bb55693e5..c24417af7 100644 --- a/python/valuecell/agents/strategy_agent/features/candle.py +++ b/python/valuecell/agents/common/trading/features/candle.py @@ -4,7 +4,8 @@ import numpy as np import pandas as pd -from ..models import Candle, FeatureVector +from valuecell.agents.common.trading.models import Candle, FeatureVector + from .interfaces import CandleBasedFeatureComputer diff --git a/python/valuecell/agents/strategy_agent/features/interfaces.py b/python/valuecell/agents/common/trading/features/interfaces.py similarity index 87% rename from python/valuecell/agents/strategy_agent/features/interfaces.py rename to python/valuecell/agents/common/trading/features/interfaces.py index 4c69b5449..20569fc5b 100644 --- a/python/valuecell/agents/strategy_agent/features/interfaces.py +++ b/python/valuecell/agents/common/trading/features/interfaces.py @@ -1,10 +1,13 @@ from __future__ import annotations from abc import ABC, abstractmethod -from dataclasses import dataclass from typing import Any, Dict, List, Optional -from ..models import Candle, FeatureVector +from valuecell.agents.common.trading.models import ( + Candle, + FeaturesPipelineResult, + FeatureVector, +) # Contracts for feature computation (module-local abstract interfaces). # Plain ABCs (not Pydantic) to keep implementations lightweight. @@ -36,14 +39,7 @@ def compute_features( raise NotImplementedError -@dataclass -class FeaturesPipelineResult: - """Result of running a features pipeline.""" - - features: List[FeatureVector] - - -class FeaturesPipeline(ABC): +class BaseFeaturesPipeline(ABC): """Abstract pipeline that produces feature vectors (including market features).""" @abstractmethod diff --git a/python/valuecell/agents/strategy_agent/features/market_snapshot.py b/python/valuecell/agents/common/trading/features/market_snapshot.py similarity index 91% rename from python/valuecell/agents/strategy_agent/features/market_snapshot.py rename to python/valuecell/agents/common/trading/features/market_snapshot.py index 4883c7315..c59270555 100644 --- a/python/valuecell/agents/strategy_agent/features/market_snapshot.py +++ b/python/valuecell/agents/common/trading/features/market_snapshot.py @@ -2,8 +2,12 @@ from typing import Dict, List -from ..models import FeatureVector, InstrumentRef, MarketSnapShotType -from ..utils import get_current_timestamp_ms +from valuecell.agents.common.trading.models import ( + FeatureVector, + InstrumentRef, + MarketSnapShotType, +) +from valuecell.agents.common.trading.utils import get_current_timestamp_ms class MarketSnapshotFeatureComputer: @@ -14,7 +18,9 @@ class MarketSnapshotFeatureComputer: makes the pipeline easier to test and replace. """ - def build(self, market_snapshot: MarketSnapShotType, exchange_id: str) -> List[FeatureVector]: + def build( + self, market_snapshot: MarketSnapShotType, exchange_id: str + ) -> List[FeatureVector]: features: List[FeatureVector] = [] now_ts = get_current_timestamp_ms() diff --git a/python/valuecell/agents/strategy_agent/features/multimodal.py b/python/valuecell/agents/common/trading/features/multimodal.py similarity index 100% rename from python/valuecell/agents/strategy_agent/features/multimodal.py rename to python/valuecell/agents/common/trading/features/multimodal.py diff --git a/python/valuecell/agents/strategy_agent/features/news.py b/python/valuecell/agents/common/trading/features/news.py similarity index 100% rename from python/valuecell/agents/strategy_agent/features/news.py rename to python/valuecell/agents/common/trading/features/news.py diff --git a/python/valuecell/agents/strategy_agent/features/pipeline.py b/python/valuecell/agents/common/trading/features/pipeline.py similarity index 91% rename from python/valuecell/agents/strategy_agent/features/pipeline.py rename to python/valuecell/agents/common/trading/features/pipeline.py index 90d25e642..29af101d2 100644 --- a/python/valuecell/agents/strategy_agent/features/pipeline.py +++ b/python/valuecell/agents/common/trading/features/pipeline.py @@ -10,26 +10,30 @@ from typing import List -from ..data.interfaces import MarketDataSource +from valuecell.agents.common.trading.models import ( + FeaturesPipelineResult, + FeatureVector, + UserRequest, +) + +from ..data.interfaces import BaseMarketDataSource from ..data.market import SimpleMarketDataSource -from ..models import FeatureVector, UserRequest from .candle import SimpleCandleFeatureComputer from .interfaces import ( + BaseFeaturesPipeline, CandleBasedFeatureComputer, - FeaturesPipeline, - FeaturesPipelineResult, ) from .market_snapshot import MarketSnapshotFeatureComputer -class DefaultFeaturesPipeline(FeaturesPipeline): +class DefaultFeaturesPipeline(BaseFeaturesPipeline): """Default pipeline using the simple data source and feature computer.""" def __init__( self, *, request: UserRequest, - market_data_source: MarketDataSource, + market_data_source: BaseMarketDataSource, candle_feature_computer: CandleBasedFeatureComputer, market_snapshot_computer: MarketSnapshotFeatureComputer, micro_interval: str = "1s", @@ -53,7 +57,9 @@ async def build(self) -> FeaturesPipelineResult: candles_micro = await self._market_data_source.get_recent_candles( self._symbols, self._micro_interval, self._micro_lookback ) - micro_features = self._candle_feature_computer.compute_features(candles=candles_micro) + micro_features = self._candle_feature_computer.compute_features( + candles=candles_micro + ) candles_medium = await self._market_data_source.get_recent_candles( self._symbols, self._medium_interval, self._medium_lookback diff --git a/python/valuecell/agents/strategy_agent/models.py b/python/valuecell/agents/common/trading/models.py similarity index 97% rename from python/valuecell/agents/strategy_agent/models.py rename to python/valuecell/agents/common/trading/models.py index 67c790495..11e74122a 100644 --- a/python/valuecell/agents/strategy_agent/models.py +++ b/python/valuecell/agents/common/trading/models.py @@ -1,9 +1,10 @@ +from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field, field_validator, model_validator -from .constants import ( +from valuecell.agents.common.trading.constants import ( DEFAULT_AGENT_MODEL, DEFAULT_CAP_FACTOR, DEFAULT_INITIAL_CAPITAL, @@ -810,3 +811,32 @@ class StrategyStatusContent(BaseModel): strategy_id: str status: StrategyStatus + + +class ComposeResult(BaseModel): + """Result of a compose operation.""" + + instructions: List[TradeInstruction] + rationale: Optional[str] = None + + +class FeaturesPipelineResult(BaseModel): + """Result of running a features pipeline.""" + + features: List[FeatureVector] + + +@dataclass +class DecisionCycleResult: + """Outcome of a single decision cycle.""" + + compose_id: str + timestamp_ms: int + cycle_index: int + rationale: Optional[str] + strategy_summary: StrategySummary + instructions: List[TradeInstruction] + trades: List[TradeHistoryEntry] + history_records: List[HistoryRecord] + digest: TradeDigest + portfolio_view: PortfolioView diff --git a/python/valuecell/agents/strategy_agent/portfolio/__init__.py b/python/valuecell/agents/common/trading/portfolio/__init__.py similarity index 100% rename from python/valuecell/agents/strategy_agent/portfolio/__init__.py rename to python/valuecell/agents/common/trading/portfolio/__init__.py diff --git a/python/valuecell/agents/strategy_agent/portfolio/in_memory.py b/python/valuecell/agents/common/trading/portfolio/in_memory.py similarity index 98% rename from python/valuecell/agents/strategy_agent/portfolio/in_memory.py rename to python/valuecell/agents/common/trading/portfolio/in_memory.py index 79a3f9b69..db6159ce2 100644 --- a/python/valuecell/agents/strategy_agent/portfolio/in_memory.py +++ b/python/valuecell/agents/common/trading/portfolio/in_memory.py @@ -1,7 +1,7 @@ from datetime import datetime, timezone from typing import List, Optional -from ..models import ( +from valuecell.agents.common.trading.models import ( Constraints, FeatureVector, MarketType, @@ -12,11 +12,12 @@ TradeType, TradingMode, ) -from ..utils import extract_price_map -from .interfaces import PortfolioService +from valuecell.agents.common.trading.utils import extract_price_map +from .interfaces import BasePortfolioService -class InMemoryPortfolioService(PortfolioService): + +class InMemoryPortfolioService(BasePortfolioService): """Tracks cash and positions in memory and computes derived metrics. Notes: diff --git a/python/valuecell/agents/strategy_agent/portfolio/interfaces.py b/python/valuecell/agents/common/trading/portfolio/interfaces.py similarity index 88% rename from python/valuecell/agents/strategy_agent/portfolio/interfaces.py rename to python/valuecell/agents/common/trading/portfolio/interfaces.py index 49c8a39a2..3471ef4c4 100644 --- a/python/valuecell/agents/strategy_agent/portfolio/interfaces.py +++ b/python/valuecell/agents/common/trading/portfolio/interfaces.py @@ -3,10 +3,14 @@ from abc import ABC, abstractmethod from typing import List, Optional -from ..models import FeatureVector, PortfolioView, TradeHistoryEntry +from valuecell.agents.common.trading.models import ( + FeatureVector, + PortfolioView, + TradeHistoryEntry, +) -class PortfolioService(ABC): +class BasePortfolioService(ABC): """Provides current portfolio state to decision modules. Keep this as a read-only service used by DecisionCoordinator and Composer. @@ -31,7 +35,7 @@ def apply_trades( raise NotImplementedError -class PortfolioSnapshotStore(ABC): +class BasePortfolioSnapshotStore(ABC): """Persist/load portfolio snapshots (optional for paper/backtest modes).""" @abstractmethod diff --git a/python/valuecell/agents/common/trading/trading_history/__init__.py b/python/valuecell/agents/common/trading/trading_history/__init__.py new file mode 100644 index 000000000..c005f905f --- /dev/null +++ b/python/valuecell/agents/common/trading/trading_history/__init__.py @@ -0,0 +1,12 @@ +"""Trading history recording and digest building.""" + +from .digest import RollingDigestBuilder +from .interfaces import BaseDigestBuilder, BaseHistoryRecorder +from .recorder import InMemoryHistoryRecorder + +__all__ = [ + "InMemoryHistoryRecorder", + "RollingDigestBuilder", + "BaseHistoryRecorder", + "BaseDigestBuilder", +] diff --git a/python/valuecell/agents/strategy_agent/trading_history/digest.py b/python/valuecell/agents/common/trading/trading_history/digest.py similarity index 97% rename from python/valuecell/agents/strategy_agent/trading_history/digest.py rename to python/valuecell/agents/common/trading/trading_history/digest.py index 525183953..4551dd9d7 100644 --- a/python/valuecell/agents/strategy_agent/trading_history/digest.py +++ b/python/valuecell/agents/common/trading/trading_history/digest.py @@ -3,8 +3,14 @@ import numpy as np -from ..models import HistoryRecord, InstrumentRef, TradeDigest, TradeDigestEntry -from .interfaces import DigestBuilder +from valuecell.agents.common.trading.models import ( + HistoryRecord, + InstrumentRef, + TradeDigest, + TradeDigestEntry, +) + +from .interfaces import BaseDigestBuilder # Risk-free rate for Sharpe Ratio calculation (annualized, 3% for this example) RISK_FREE_RATE = 0.03 @@ -13,7 +19,7 @@ SECONDS_PER_YEAR = 365 * 24 * 3600 -class RollingDigestBuilder(DigestBuilder): +class RollingDigestBuilder(BaseDigestBuilder): """Builds a lightweight digest from recent execution records.""" def __init__(self, window: int = 50) -> None: diff --git a/python/valuecell/agents/strategy_agent/trading_history/interfaces.py b/python/valuecell/agents/common/trading/trading_history/interfaces.py similarity index 85% rename from python/valuecell/agents/strategy_agent/trading_history/interfaces.py rename to python/valuecell/agents/common/trading/trading_history/interfaces.py index 5d1139ab6..8cc58adc8 100644 --- a/python/valuecell/agents/strategy_agent/trading_history/interfaces.py +++ b/python/valuecell/agents/common/trading/trading_history/interfaces.py @@ -3,12 +3,12 @@ from abc import ABC, abstractmethod from typing import List -from ..models import HistoryRecord, TradeDigest +from valuecell.agents.common.trading.models import HistoryRecord, TradeDigest # Contracts for history recording and digest building (module-local abstract interfaces). -class HistoryRecorder(ABC): +class BaseHistoryRecorder(ABC): """Persists important checkpoints for later analysis and digest building.""" @abstractmethod @@ -22,7 +22,7 @@ def get_records(self) -> List[HistoryRecord]: raise NotImplementedError -class DigestBuilder(ABC): +class BaseDigestBuilder(ABC): """Builds TradeDigest from historical records (incremental or batch).""" @abstractmethod diff --git a/python/valuecell/agents/strategy_agent/trading_history/recorder.py b/python/valuecell/agents/common/trading/trading_history/recorder.py similarity index 76% rename from python/valuecell/agents/strategy_agent/trading_history/recorder.py rename to python/valuecell/agents/common/trading/trading_history/recorder.py index a4f2f1ad3..2578253ea 100644 --- a/python/valuecell/agents/strategy_agent/trading_history/recorder.py +++ b/python/valuecell/agents/common/trading/trading_history/recorder.py @@ -1,10 +1,11 @@ from typing import List -from ..models import HistoryRecord -from .interfaces import HistoryRecorder +from valuecell.agents.common.trading.models import HistoryRecord +from .interfaces import BaseHistoryRecorder -class InMemoryHistoryRecorder(HistoryRecorder): + +class InMemoryHistoryRecorder(BaseHistoryRecorder): """In-memory recorder storing history records.""" def __init__(self, history_limit: int = 200) -> None: diff --git a/python/valuecell/agents/strategy_agent/utils.py b/python/valuecell/agents/common/trading/utils.py similarity index 98% rename from python/valuecell/agents/strategy_agent/utils.py rename to python/valuecell/agents/common/trading/utils.py index e86ff320a..5c7e0b314 100644 --- a/python/valuecell/agents/strategy_agent/utils.py +++ b/python/valuecell/agents/common/trading/utils.py @@ -6,7 +6,7 @@ import httpx from loguru import logger -from .models import FeatureVector +from valuecell.agents.common.trading.models import FeatureVector def get_current_timestamp_ms() -> int: diff --git a/python/valuecell/agents/strategy_agent/trading_history/__init__.py b/python/valuecell/agents/prompt_strategy_agent/__init__.py similarity index 100% rename from python/valuecell/agents/strategy_agent/trading_history/__init__.py rename to python/valuecell/agents/prompt_strategy_agent/__init__.py diff --git a/python/valuecell/agents/strategy_agent/__main__.py b/python/valuecell/agents/prompt_strategy_agent/__main__.py similarity index 81% rename from python/valuecell/agents/strategy_agent/__main__.py rename to python/valuecell/agents/prompt_strategy_agent/__main__.py index 6c2019369..b39097bb9 100644 --- a/python/valuecell/agents/strategy_agent/__main__.py +++ b/python/valuecell/agents/prompt_strategy_agent/__main__.py @@ -2,7 +2,7 @@ from valuecell.core.agent import create_wrapped_agent -from .prompt_agent import StrategyAgent +from .core import StrategyAgent if __name__ == "__main__": agent = create_wrapped_agent(StrategyAgent) diff --git a/python/valuecell/agents/strategy_agent/prompt_agent.py b/python/valuecell/agents/prompt_strategy_agent/core.py similarity index 74% rename from python/valuecell/agents/strategy_agent/prompt_agent.py rename to python/valuecell/agents/prompt_strategy_agent/core.py index 8c2cefd9e..6689aba00 100644 --- a/python/valuecell/agents/strategy_agent/prompt_agent.py +++ b/python/valuecell/agents/prompt_strategy_agent/core.py @@ -7,13 +7,16 @@ from __future__ import annotations -from .agent import BaseStrategyAgent -from .decision.composer import LlmComposer -from .decision.interfaces import Composer -from .features.pipeline import DefaultFeaturesPipeline, FeaturesPipeline -from .models import UserRequest +from valuecell.agents.common.trading.base_agent import BaseStrategyAgent +from valuecell.agents.common.trading.decision import BaseComposer, LlmComposer +from valuecell.agents.common.trading.features import ( + BaseFeaturesPipeline, + DefaultFeaturesPipeline, +) +from valuecell.agents.common.trading.models import UserRequest +# TODO: Rename to PromptBasedStrategyAgent class StrategyAgent(BaseStrategyAgent): """Default strategy agent with standard feature computation and LLM composer. @@ -36,12 +39,14 @@ def _build_features_pipeline(self, request): return MyCustomPipeline(request) """ - def _build_features_pipeline(self, request: UserRequest) -> FeaturesPipeline | None: + def _build_features_pipeline( + self, request: UserRequest + ) -> BaseFeaturesPipeline | None: """Use the default features pipeline built from the user request.""" return DefaultFeaturesPipeline.from_request(request) - def _create_decision_composer(self, request: UserRequest) -> Composer | None: + def _create_decision_composer(self, request: UserRequest) -> BaseComposer | None: """Use default LLM-based composer.""" return LlmComposer(request=request) diff --git a/python/valuecell/agents/strategy_agent/templates/aggressive.txt b/python/valuecell/agents/prompt_strategy_agent/templates/aggressive.txt similarity index 100% rename from python/valuecell/agents/strategy_agent/templates/aggressive.txt rename to python/valuecell/agents/prompt_strategy_agent/templates/aggressive.txt diff --git a/python/valuecell/agents/strategy_agent/templates/default.txt b/python/valuecell/agents/prompt_strategy_agent/templates/default.txt similarity index 100% rename from python/valuecell/agents/strategy_agent/templates/default.txt rename to python/valuecell/agents/prompt_strategy_agent/templates/default.txt diff --git a/python/valuecell/agents/strategy_agent/templates/insane.txt b/python/valuecell/agents/prompt_strategy_agent/templates/insane.txt similarity index 100% rename from python/valuecell/agents/strategy_agent/templates/insane.txt rename to python/valuecell/agents/prompt_strategy_agent/templates/insane.txt diff --git a/python/valuecell/server/api/routers/models.py b/python/valuecell/server/api/routers/models.py index 8c7c1c53e..89038a54a 100644 --- a/python/valuecell/server/api/routers/models.py +++ b/python/valuecell/server/api/routers/models.py @@ -25,7 +25,7 @@ # Optional fallback constants from StrategyAgent try: - from valuecell.agents.strategy_agent.constants import ( + from valuecell.agents.common.trading.constants import ( DEFAULT_AGENT_MODEL, DEFAULT_MODEL_PROVIDER, ) diff --git a/python/valuecell/server/api/routers/strategy_agent.py b/python/valuecell/server/api/routers/strategy_agent.py index 94724eb8e..27b2c5a4c 100644 --- a/python/valuecell/server/api/routers/strategy_agent.py +++ b/python/valuecell/server/api/routers/strategy_agent.py @@ -9,7 +9,7 @@ from loguru import logger from sqlalchemy.orm import Session -from valuecell.agents.strategy_agent.models import ( +from valuecell.agents.common.trading.models import ( StrategyStatus, StrategyStatusContent, UserRequest, diff --git a/python/valuecell/server/db/init_db.py b/python/valuecell/server/db/init_db.py index 86f421120..9822dec5b 100644 --- a/python/valuecell/server/db/init_db.py +++ b/python/valuecell/server/db/init_db.py @@ -420,7 +420,7 @@ def initialize_basic_data(self) -> bool: template_path = ( Path(__file__).resolve().parents[2] / "agents" - / "strategy_agent" + / "prompt_strategy_agent" / "templates" / "default.txt" ) diff --git a/python/valuecell/server/services/strategy_persistence.py b/python/valuecell/server/services/strategy_persistence.py index a33b6b85c..53d442757 100644 --- a/python/valuecell/server/services/strategy_persistence.py +++ b/python/valuecell/server/services/strategy_persistence.py @@ -3,7 +3,7 @@ from loguru import logger -from valuecell.agents.strategy_agent import models as agent_models +from valuecell.agents.common.trading import models as agent_models from valuecell.server.db.repositories.strategy_repository import ( get_strategy_repository, ) From f1897e83c1b7267ba1930270782da651ac9eb782 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Wed, 19 Nov 2025 17:31:50 +0800 Subject: [PATCH 08/15] refactor: update README to reflect the Trading Agent Framework and its architecture --- .../valuecell/agents/common/trading/README.md | 850 ++++++++++++++---- 1 file changed, 655 insertions(+), 195 deletions(-) diff --git a/python/valuecell/agents/common/trading/README.md b/python/valuecell/agents/common/trading/README.md index 3602f2e0e..2fa610683 100644 --- a/python/valuecell/agents/common/trading/README.md +++ b/python/valuecell/agents/common/trading/README.md @@ -1,210 +1,670 @@ -# Strategy Agent (Design Overview) +# Trading Agent Framework -This document describes the design for the Strategy Agent: a lightweight, LLM-driven trading decision pipeline with a short, testable chain from market data to executable instructions, plus history and digest for feedback. +This document describes the common trading agent framework: a flexible, composable architecture for building LLM-driven trading strategies with clean separation of concerns and extensible components. -- Assumptions (current stage): - - Real-time data (no explicit handling of late/out-of-order data yet) - - No complex live-trading order/fill/cancel processing (kept out of scope for now) - - Decisions are generated by an LLM inside the composer; guardrails normalize the output into executable instructions. +## Architecture Overview -## Goals +The framework provides a base class (`BaseStrategyAgent`) that handles lifecycle management, streaming, and persistence, while allowing users to customize decision logic and feature computation through well-defined extension points. -- Keep the dependency flow one-way and simple: data → features → composer(LLM + guardrails) → execution → history/digest -- Clearly defined DTOs and interfaces so each module can be developed and tested in isolation -- Minimal surface area for configuration: the strategy prompt is a plain string (prompt_text) -- Idempotent and auditable: each composition run has a compose_id; any optional - auditing metadata (prompt hash, model name, token usage, latency, filters) - is recorded as a HistoryRecord payload (no separate report object). +**Key Principles:** + +- **Separation of Concerns**: Data fetching, feature computation, decision making, execution, and history tracking are independent modules +- **Extensibility**: Override specific components without rewriting the entire pipeline +- **Type Safety**: Pydantic models ensure data contracts across boundaries +- **Async-First**: Built on asyncio for efficient I/O and concurrent operations +- **Auditable**: Each decision cycle has a unique `compose_id` for tracing ## Module Layout -- `data/` - - `market_data.py` — Market data source (candles) abstraction(s) -- `features/` - - `technical_indicators.py`, `multimodal_analysis.py`, etc. — Feature computation from raw data -- `decision/` - - `composer.py` — LLM decision + normalization + guardrails (core) - - `system_prompt.py` (optional) — prompt templates, or store in config/constants -- `execution/` - - `exchanges.py`, `paper_trading.py` — Gateways to real or paper execution (only instructions input for now) -- `trading_history/` - - `recorder.py` — Persist key checkpoints - - `digest.py` — Build `TradeDigest` for historical guidance -- Root files - - `models.py` — DTOs only (interfaces live in module-level files) - - `core.py` — DecisionCoordinator (wires the full decision cycle) - - `constants.py` — Basic configuration/limits; can hold prompt_text initially +```text +common/trading/ +├── base_agent.py # BaseStrategyAgent abstract class +├── models.py # Pydantic DTOs and enums +├── constants.py # Default configuration values +├── utils.py # Shared utilities +├── _internal/ # Internal runtime implementation +│ ├── coordinator.py # DefaultDecisionCoordinator +│ ├── runtime.py # StrategyRuntime factory +│ └── stream_controller.py # Persistence and streaming +├── data/ # Market data sources +│ ├── interfaces.py # BaseMarketDataSource +│ └── market.py # SimpleMarketDataSource (CCXT) +├── features/ # Feature computation +│ ├── interfaces.py # BaseFeaturesPipeline, CandleBasedFeatureComputer +│ ├── pipeline.py # DefaultFeaturesPipeline +│ ├── candle.py # SimpleCandleFeatureComputer +│ └── market_snapshot.py # MarketSnapshotFeatureComputer +├── decision/ # Decision composers +│ ├── interfaces.py # BaseComposer +│ └── prompt_based/ +│ ├── composer.py # LlmComposer +│ └── system_prompt.py +├── execution/ # Trade execution +│ ├── interfaces.py # BaseExecutionGateway +│ ├── factory.py # Gateway factory +│ ├── paper_trading.py # PaperExecutionGateway +│ └── ccxt_trading.py # CCXTExecutionGateway (live) +├── portfolio/ # Portfolio management +│ ├── interfaces.py # BasePortfolioService +│ └── in_memory.py # InMemoryPortfolioService +└── trading_history/ # History and digest + ├── interfaces.py # BaseHistoryRecorder, BaseDigestBuilder + ├── recorder.py # InMemoryHistoryRecorder + └── digest.py # RollingDigestBuilder +``` + +## Data Flow (Decision Cycle) + +Each decision cycle follows this pipeline: + +1. **Portfolio State**: Coordinator fetches current `PortfolioView` (positions, cash, constraints) +2. **Data Collection**: Pipeline pulls market data (candles, tickers, funding rates, etc.) +3. **Feature Computation**: Pipeline computes `FeatureVector[]` from raw data +4. **Context Assembly**: Coordinator builds `ComposeContext` with features, portfolio, digest, and constraints +5. **Decision**: Composer (LLM + guardrails) produces normalized `TradeInstruction[]` +6. **Execution**: Gateway executes instructions and returns `TxResult[]` +7. **Portfolio Update**: Service applies trades to update positions and metrics +8. **History**: Recorder checkpoints features, instructions, trades, and summary +9. **Digest**: Builder updates `TradeDigest` for next cycle's context + +```text + ┌─────────────┐ + │ Portfolio │ + │ View │ + └──────┬──────┘ + │ + ▼ +┌─────────────┐ ┌──────────────┐ ┌─────────────┐ +│ Market │───▶│ Features │───▶│ Context │ +│ Data │ │ Pipeline │ │ Assembly │ +└─────────────┘ └──────────────┘ └──────┬──────┘ + │ + ▼ + ┌─────────────┐ + │ Composer │ + │ │ + └──────┬──────┘ + │ + ▼ + ┌─────────────┐ + │ Execution │ + │ Gateway │ + └──────┬──────┘ + │ + ▼ + ┌──────────────────────────────────────┴────────────────┐ + │ │ + ▼ ▼ +┌─────────────┐ ┌─────────────┐ +│ History │ │ Portfolio │ +│ Recorder │ │ Update │ +└──────┬──────┘ └─────────────┘ + │ + ▼ +┌─────────────┐ +│ Digest │ +│ Builder │ +└─────────────┘ +``` + +## Core Data Models + +### Configuration + +UserRequest + +- `llm_model_config: LLMModelConfig` — AI model settings +- `exchange_config: ExchangeConfig` — Trading mode, exchange, credentials +- `trading_config: TradingConfig` — Strategy parameters + +TradingConfig + +- `strategy_name?: str` — Display name +- `initial_capital: float` — Starting capital (USD) +- `max_leverage: float` — Maximum leverage allowed +- `max_positions: int` — Concurrent position limit +- `symbols: List[str]` — Instruments to trade (e.g., `["BTC-USDT", "ETH-USDT"]`) +- `decide_interval: int` — Seconds between cycles +- `custom_prompt?: str` — Custom strategy prompt +- `prompt_text?: str` — Additional prompt text + +### Market Data + +Candle + +- `ts: int` — Timestamp (milliseconds) +- `instrument: InstrumentRef` — Symbol reference +- `open, high, low, close, volume: float` — OHLCV data +- `interval: str` — Timeframe (e.g., "1m", "1h") + +FeatureVector + +- `ts: int` — Feature timestamp +- `instrument: InstrumentRef` — Associated symbol +- `values: Dict[str, float]` — Feature key-value pairs +- `meta?: Dict[str, Any]` — Optional metadata (interval, group_by_key, etc.) + +### Portfolio + +PositionSnapshot + +- `instrument: InstrumentRef` +- `quantity: float` — Signed quantity (+long, -short) +- `avg_price?: float` — Average entry price +- `mark_price?: float` — Current market price +- `unrealized_pnl?: float` — Unrealized profit/loss +- `leverage?: float` — Applied leverage +- `entry_ts?: int` — Entry timestamp -## Data Flow (one decision cycle) +PortfolioView -1. DecisionCoordinator pulls `PortfolioView` (positions, cash, optional constraints) -1. DecisionCoordinator gets recent `Candle` from `MarketDataSource` -1. `FeatureComputer` produces `FeatureVector[]` -1. DecisionCoordinator assembles `ComposeContext`: features (including `features.market_snapshot`), portfolio, digest, prompt_text (string), and extra constraints +- `strategy_id: str` +- `ts: int` — Snapshot timestamp +- `account_balance: float` — Cash balance +- `positions: Dict[str, PositionSnapshot]` — Active positions by symbol +- `total_value?: float` — Portfolio equity +- `total_unrealized_pnl?: float` — Sum of position unrealized PnL +- `buying_power?: float` — Available buying power +- `constraints?: Constraints` — Position and leverage limits -1. `Composer.compose(context)`: calls LLM with `ComposeContext` → `LlmPlanProposal`; normalizes plan (target position logic, limits, step size, min notional, cool-down, etc.); returns `TradeInstruction[]` +### Decision -1. `ExecutionGateway.execute(instructions, market_features)` (no detailed order/fill handling at this stage) -1. `HistoryRecorder.record(...)` checkpoints (including optional auditing metadata); +ComposeContext - DigestBuilder updates `TradeDigest` +- `ts: int` — Cycle timestamp +- `compose_id: str` — Unique cycle identifier +- `strategy_id: str` +- `features: List[FeatureVector]` — Computed features +- `portfolio: PortfolioView` — Current portfolio state +- `digest: TradeDigest` — Historical performance summary + +TradeInstruction + +- `instruction_id: str` — Unique instruction ID +- `compose_id: str` — Parent cycle ID +- `instrument: InstrumentRef` +- `side: TradeSide` — BUY or SELL +- `quantity: float` — Order quantity +- `leverage?: float` — Applied leverage +- `max_slippage_bps?: int` — Maximum slippage (basis points) +- `meta?: Dict` — Optional metadata + +### Execution + +TxResult + +- `instruction_id: str` +- `instrument: InstrumentRef` +- `side: TradeSide` +- `status: TxStatus` — FILLED, PARTIAL, REJECTED, ERROR +- `requested_qty: float` +- `filled_qty: float` +- `avg_exec_price?: float` +- `fee_cost?: float` +- `leverage?: float` + +### History + +TradeHistoryEntry + +- `trade_id: str` +- `compose_id: str` — Originating cycle +- `instruction_id: str` — Originating instruction +- `instrument: InstrumentRef` +- `side: TradeSide` +- `type: TradeType` — LONG or SHORT +- `quantity: float` +- `entry_price?, exit_price?: float` +- `entry_ts?, exit_ts?: int` +- `realized_pnl?: float` — Profit/loss on close +- `holding_ms?: int` — Position duration + +TradeDigest + +- `ts: int` +- `by_instrument: Dict[str, TradeDigestEntry]` +- `sharpe_ratio?: float` — Portfolio Sharpe ratio + +**TradeDigestEntry** (per-symbol stats) + +- `trade_count: int` +- `realized_pnl: float` +- `win_rate?: float` +- `avg_holding_ms?: int` +- `last_trade_ts?: int` + +--- + +## Integration Guide + +### Quick Start: Using the Default Agent + +The simplest way to create a trading agent is to use `prompt_strategy_agent`, which provides default implementations for all components: + +```python +# python/valuecell/agents/prompt_strategy_agent/__main__.py +import asyncio +from valuecell.core.agent import create_wrapped_agent +from .core import StrategyAgent + +if __name__ == "__main__": + agent = create_wrapped_agent(StrategyAgent) + asyncio.run(agent.serve()) +``` + +**StrategyAgent** (in `core.py`) extends `BaseStrategyAgent` and uses: + +- `DefaultFeaturesPipeline`: Fetches candles and computes technical indicators +- `LlmComposer`: LLM-based decision making with guardrails + +To run: + +```bash +cd python/valuecell/agents/prompt_strategy_agent +python -m valuecell.agents.prompt_strategy_agent +``` + +### Custom Agent: Override Specific Components + +Create a custom agent by subclassing `BaseStrategyAgent` and overriding extension points: + +#### Example 1: Custom Feature Pipeline + +```python +from valuecell.agents.common.trading.base_agent import BaseStrategyAgent +from valuecell.agents.common.trading.features import BaseFeaturesPipeline +from valuecell.agents.common.trading.models import ( + FeaturesPipelineResult, + FeatureVector, + UserRequest, +) + +class MyFeaturesPipeline(BaseFeaturesPipeline): + """Custom pipeline with specialized indicators.""" + + def __init__(self, request: UserRequest): + self.request = request + self.symbols = request.trading_config.symbols + # Initialize custom data sources, indicators, etc. + + async def build(self) -> FeaturesPipelineResult: + features = [] + # Fetch data and compute custom features + # ... your logic here ... + return FeaturesPipelineResult(features=features) + + +class MyCustomAgent(BaseStrategyAgent): + """Agent with custom feature computation.""" + + def _build_features_pipeline( + self, request: UserRequest + ) -> BaseFeaturesPipeline | None: + return MyFeaturesPipeline(request) + + def _create_decision_composer(self, request: UserRequest): + # Use default LLM composer + return None +``` + +#### Example 2: Custom Decision Composer + +```python +from valuecell.agents.common.trading.decision import BaseComposer +from valuecell.agents.common.trading.models import ( + ComposeContext, + ComposeResult, + TradeInstruction, +) + +class RuleBasedComposer(BaseComposer): + """Simple rule-based decision maker (no LLM).""" + + def __init__(self, request: UserRequest): + self.request = request + + async def compose(self, context: ComposeContext) -> ComposeResult: + instructions = [] + # Implement your trading rules + # Example: Buy when RSI < 30, sell when RSI > 70 + for fv in context.features: + rsi = fv.values.get("rsi") + if rsi and rsi < 30: + # Create buy instruction + pass + elif rsi and rsi > 70: + # Create sell instruction + pass + + return ComposeResult( + instructions=instructions, + rationale="Rule-based signals" + ) + + +class RuleBasedAgent(BaseStrategyAgent): + """Agent using rule-based decisions.""" + + def _build_features_pipeline(self, request: UserRequest): + # Use default pipeline + return None + + def _create_decision_composer(self, request: UserRequest): + return RuleBasedComposer(request) +``` + +#### Example 3: Lifecycle Hooks + +```python +class MonitoredAgent(BaseStrategyAgent): + """Agent with custom monitoring and logging.""" + + def _on_start(self, runtime, request): + """Called once after runtime creation.""" + self.cycle_count = 0 + print(f"Strategy {runtime.strategy_id} starting...") + + def _on_cycle_result(self, result, runtime, request): + """Called after each cycle completes.""" + self.cycle_count += 1 + print(f"Cycle {self.cycle_count}: " + f"{len(result.trades)} trades, " + f"PnL: {result.strategy_summary.realized_pnl}") + + # Send metrics to external monitoring + # ... custom logic ... + + def _on_stop(self, runtime, request, reason): + """Called before finalization.""" + print(f"Strategy stopping: {reason}") + print(f"Total cycles: {self.cycle_count}") + + def _build_features_pipeline(self, request): + return None # Use defaults + + def _create_decision_composer(self, request): + return None # Use defaults +``` -ASCII overview: +### Creating a Complete Custom Agent Module + +**Directory Structure:** ```text -Data → Features → Composer(LLM+Guardrails) → Execution → History → Digest - ↑ ↓ ↑ - PortfolioView ----------------------------- | - prompt_text ----------------------------------------→ -``` - -## DTOs (Pydantic models) - -Defined in `models.py`: - -- Identification and raw data - - `InstrumentRef { symbol, exchange_id?, quote_ccy? }` - - `Candle { ts, instrument, open, high, low, close, volume, interval }` - -- User request / configuration - - `UserRequest { model_config: ModelConfig, exchange_config: ExchangeConfig, trading_config: TradingConfig }` - - `ModelConfig { provider, model_id, api_key }` - - `ExchangeConfig { exchange_id?, trading_mode, api_key?, secret_key? }` - - `TradingConfig { strategy_name?, initial_capital?, max_leverage?, max_positions?, symbols, decide_interval?, template_id?, custom_prompt? }` - -- Features and portfolio - - `FeatureVector { ts, instrument, values: Dict[str, float], meta? }` - - `PositionSnapshot { instrument, quantity, avg_price?, mark_price?, unrealized_pnl?, notional?, leverage?, entry_ts?, pnl_pct?, trade_type? }` - - `PortfolioView { strategy_id?, ts, cash, positions: Dict[symbol, PositionSnapshot], gross_exposure?, net_exposure?, constraints?, total_value?, total_unrealized_pnl?, available_cash? }` - -- LLM decision and normalization - - `LlmDecisionItem { instrument, action: (buy|sell|flat|noop), target_qty, confidence?, rationale? }` - - `LlmPlanProposal { ts, items: List[LlmDecisionItem], notes?, model_meta? }` - - `TradeInstruction { instruction_id, compose_id, instrument, side: (buy|sell), quantity, price_mode, limit_price?, max_slippage_bps?, meta? }` - - `ComposeContext { ts, compose_id, strategy_id?, features, portfolio, digest, prompt_text, constraints? }` - -- History and digest - - `HistoryRecord { ts, kind, reference_id, payload }` - - `TradeDigestEntry { instrument, trade_count, realized_pnl, win_rate?, avg_holding_ms?, last_trade_ts?, avg_entry_price?, max_drawdown?, recent_performance_score? }` - - `TradeDigest { ts, by_instrument: Dict[symbol, TradeDigestEntry] }` - -- UI/summary and series (optional; for leaderboard and charts) - - `TradingMode = (live|virtual)` - - `StrategyStatus = (running|paused|stopped|error)` - - `StrategySummary { strategy_id?, name?, model_provider?, model_id?, exchange_id?, mode?, status?, pnl_abs?, pnl_pct?, last_updated_ts? }` - - `StrategySummary { strategy_id?, name?, model_provider?, model_id?, exchange_id?, mode?, status?, realized_pnl?, unrealized_pnl?, pnl_pct?, last_updated_ts? }` - - `MetricPoint { ts, value }` - - `PortfolioValueSeries { strategy_id?, points: List[MetricPoint] }` - -`TradeHistoryEntry { trade_id?, compose_id?, instruction_id?, strategy_id?, trade_ts?, entry_ts?, exit_ts?, instrument, side, type, quantity, entry_price?, exit_price?, realized_pnl?, realized_pnl_pct?, holding_ms?, leverage?, note? }` - -Notes: - -- Only `target_qty` is used (no `delta_qty`). Composer computes `order_qty = target_qty − current_qty` and turns it into a `TradeInstruction` (side + quantity). -- Initial versions can set `price_mode = "market"` for simplicity. -Action semantics: - -- `flat`: target position is zero (may emit close-out instructions) -- `noop`: target equals current (delta == 0), emit no instruction - -Additional notes: - -- `mark_price` in `PositionSnapshot` allows consistent P&L visualization without coupling to feed-specific last trade logic. -- The UI-oriented DTOs (`StrategySummary`, `PortfolioValueSeries`, etc.) are additive and do not affect the core compose/execute pipeline. - -## ID and correlation model - -- `strategy_id`: identity of a running strategy; used by UI aggregation (`StrategySummary`, `PortfolioValueSeries`). -- `compose_id`: unique id generated per decision cycle by the coordinator. It is carried in `ComposeContext` and copied into each `TradeInstruction` for correlation. `HistoryRecord.reference_id` uses this id. -- `instruction_id`: deterministic id for idempotency, recommended format: `${compose_id}:${instrument.symbol}` (or include an ordinal if multiple instructions per instrument). -- `trade_id`: execution-layer id for a closed trade. `TradeHistoryEntry` can store `compose_id` and `instruction_id` optionally to link back to the decision that initiated it. - -## Abstract Interfaces (contracts) - -Interfaces live in their respective modules as ABCs (not Pydantic models): - -- `data/interfaces.py` - - `MarketDataSource.get_recent_candles(symbols, interval, lookback) -> List[Candle]` -- `features/interfaces.py` - - `FeatureComputer.compute_features(candles?: List[Candle]) -> List[FeatureVector]` -- `core.py` - - `DecisionCoordinator.run_once() -> None` -- `portfolio/interfaces.py` - - `PortfolioService.get_view() -> PortfolioView` - - `PortfolioSnapshotStore.load_latest() -> Optional[PortfolioView]` - - `PortfolioSnapshotStore.save(view: PortfolioView) -> None` -- `decision/interfaces.py` - - `Composer.compose(context: ComposeContext) -> List[TradeInstruction]` -- `execution/interfaces.py` - - `ExecutionGateway.execute(instructions: List[TradeInstruction], market_features?: List[FeatureVector]) -> None` -- `trading_history/interfaces.py` - - `HistoryRecorder.record(record: HistoryRecord) -> None` - - `DigestBuilder.build(records: List[HistoryRecord]) -> TradeDigest` - -## Guardrails (composer) - -- Position targeting: compute `order_qty` from `target_qty` vs current position -- Rounding: step size, minimum order quantity/nominal -- Limits: per-instrument cap, net exposure cap, optional shorting allowance -- Cool-down/recent performance: use `TradeDigest` to suppress or downweight -- Confidence threshold and invalid field filtering -- Audit: record optional metadata (prompt hash, model name, token usage, latency, rejection reasons) as a `HistoryRecord` payload at the "compose" checkpoint -- Fallback: if LLM output is invalid/empty, optionally use a simple deterministic rule from features or return no-op - -## History and Digest (clarified) - -We record a few compact checkpoints using `HistoryRecord { ts, kind, reference_id, payload }`: - -- kind = "features": - - reference_id: compose_id - - payload: a small summary (e.g., per-symbol feature keys and last values, or a hash) -- kind = "compose": - - reference_id: compose_id - - payload: optional auditing metadata (e.g., prompt_hash, model_name, token_usage, latency_ms, reasons filtered) -- kind = "instructions": - - reference_id: compose_id - - payload: the normalized `TradeInstruction[]` as a compact list or summary (symbol, side, qty) -- kind = "execution" (optional at this stage): - - reference_id: compose_id - - payload: ack/status if available from the gateway - -DigestBuilder consumes these records (recent N bars or N decisions) to build `TradeDigest`: - -- Per-instrument aggregates in `TradeDigestEntry`: - - trade_count, realized_pnl, win_rate, avg_holding_ms, last_trade_ts, - avg_entry_price, max_drawdown, recent_performance_score -- Update cadence: periodically (e.g., every M decisions or T minutes) or incrementally per instruction/execution -- Usage in composer: cool-down (skip recent losers), down-weight bad performers, - enforce simple risk heuristics (e.g., cap net additions if recent_performance_score < threshold) - -This keeps recording simple and purpose-driven for composer feedback without inventing a separate report object. - -## Runtime Modes - -- Paper trading: default mode (via `execution/paper_trading.py`) -- Live and backtest: future extensions; the same interfaces remain stable - -## Extensibility - -- Add new features by extending `FeatureComputer` -- Plug different LLM providers/parsers within `Composer` -- Add more execution backends by implementing `ExecutionGateway` -- Evolve digests: additional stats inside `TradeDigestEntry` without breaking composer - -## Out of Scope (current stage) - -- Order lifecycle (partial fills, cancels, rejections) -- Late/out-of-order data handling -- Complex portfolio accounting beyond `PortfolioView` - -## Minimal DecisionCoordinator Contract - -A typical `run_once()` should: - -1. `view = portfolio.get_view()` -2. Pull candles via `data` and compute `features = features.compute_features(candles=...)` -3. `context = ComposeContext(ts=..., features=features, portfolio=view, digest=..., prompt_text=..., constraints=...)` -4. `instructions = composer.compose(context)` -5. `executor.execute(instructions, market_features)` -6. Record `HistoryRecord` for features, compose auditing metadata, and instructions -7. Update `TradeDigest` periodically or incrementally +python/valuecell/agents/my_agent/ +├── __init__.py +├── __main__.py # Entry point +├── core.py # Agent implementation +├── features.py # Custom features (optional) +├── composer.py # Custom composer (optional) +└── templates/ + └── strategy.txt # Strategy prompt template +``` + +**`__main__.py`:** + +```python +import asyncio +from valuecell.core.agent import create_wrapped_agent +from .core import MyAgent + +if __name__ == "__main__": + agent = create_wrapped_agent(MyAgent) + asyncio.run(agent.serve()) +``` + +**`core.py`:** + +```python +from valuecell.agents.common.trading.base_agent import BaseStrategyAgent +from valuecell.agents.common.trading.models import UserRequest +from .features import MyFeaturesPipeline # if custom +from .composer import MyComposer # if custom + +class MyAgent(BaseStrategyAgent): + def _build_features_pipeline(self, request: UserRequest): + # Return custom pipeline or None for default + return MyFeaturesPipeline(request) + + def _create_decision_composer(self, request: UserRequest): + # Return custom composer or None for default + return MyComposer(request) +``` + +**Run your agent:** + +```bash +cd python/valuecell/agents/my_agent +python -m valuecell.agents.my_agent +``` + +### Live Trading Setup + +For live trading with real exchanges: + +**Set trading mode to LIVE:** + +```json +{ + "exchange_config": { + "trading_mode": "live", + "exchange_id": "binance", + "api_key": "YOUR_API_KEY", + "secret_key": "YOUR_SECRET_KEY", + "testnet": true // Use testnet first! + } +} +``` + +**The runtime automatically:** + +- Fetches real account balance +- Sets `initial_capital` to available cash +- Uses `CCXTExecutionGateway` for order submission + +**Always test on testnet first** before using real funds + +### Testing Strategies + +#### Paper Trading (Default) + +```json +{ + "exchange_config": { + "trading_mode": "virtual", + "fee_bps": 10.0 // 0.1% simulated fees + } +} +``` + +Paper trading uses `PaperExecutionGateway` which: + +- Simulates order fills at market price ± slippage +- Applies configurable fees +- No real exchange connection needed + +#### Backtesting + +Create a custom `BaseMarketDataSource` that replays historical data: + +```python +from valuecell.agents.common.trading.data import BaseMarketDataSource +from valuecell.agents.common.trading.models import Candle + +class BacktestDataSource(BaseMarketDataSource): + def __init__(self, historical_data): + self.data = historical_data + self.current_index = 0 + + async def get_recent_candles(self, symbols, interval, lookback): + # Return historical candles for current timestamp + candles = self.data[self.current_index] + self.current_index += 1 + return candles + + async def get_market_snapshot(self, symbols): + # Return snapshot from historical data + return {} +``` + +Then use it in your custom pipeline. + +### Extension Points Summary + +| Component | Method | Purpose | +|-----------|--------|---------| +| **Features** | `_build_features_pipeline()` | Define how market data is fetched and processed | +| **Decision** | `_create_decision_composer()` | Customize trading logic (LLM, rules, ML) | +| **Lifecycle** | `_on_start()` | Initialize resources after runtime creation | +| **Lifecycle** | `_on_cycle_result()` | Monitor/log each cycle result | +| **Lifecycle** | `_on_stop()` | Cleanup before finalization | + +### Best Practices + +1. **Start with defaults**: Use `prompt_strategy_agent` as a template +2. **Override incrementally**: Only customize what you need +3. **Type safety**: Use Pydantic models for all data contracts +4. **Async operations**: Mark I/O operations as `async` +5. **Error handling**: Hooks swallow exceptions to prevent crashes +6. **Testnet first**: Always test live trading on testnet +7. **Monitor carefully**: Use lifecycle hooks for observability + +### Common Patterns + +#### Adding Custom Indicators + +```python +class CustomFeaturesPipeline(BaseFeaturesPipeline): + async def build(self): + # Fetch candles + candles = await self.market_data.get_recent_candles(...) + + # Compute standard indicators + rsi = compute_rsi(candles) + macd = compute_macd(candles) + + # Add custom indicators + my_signal = compute_custom_indicator(candles) + + features = [] + for symbol in symbols: + features.append(FeatureVector( + ts=timestamp, + instrument=InstrumentRef(symbol=symbol), + values={ + "rsi": rsi[symbol], + "macd": macd[symbol], + "custom_signal": my_signal[symbol], + } + )) + + return FeaturesPipelineResult(features=features) +``` + +#### Combining LLM with Rules + +```python +class HybridComposer(BaseComposer): + def __init__(self, request): + self.llm_composer = LlmComposer(request) + self.request = request + + async def compose(self, context): + # Get LLM suggestions + llm_result = await self.llm_composer.compose(context) + + # Apply additional rule filters + filtered_instructions = [] + for inst in llm_result.instructions: + if self._passes_risk_check(inst, context): + filtered_instructions.append(inst) + + return ComposeResult( + instructions=filtered_instructions, + rationale=f"LLM + risk filters: {llm_result.rationale}" + ) + + def _passes_risk_check(self, inst, context): + # Custom risk rules + return True +``` + +### Debugging + +Enable detailed logging: + +```python +import logging +from loguru import logger + +logger.add("strategy_{time}.log", level="DEBUG") +``` + +The framework logs: + +- Cycle start/end with compose_id +- Instruction count and details +- Execution results and fills +- Portfolio updates +- Error traces with context + +--- + +## Advanced Topics + +### Custom Execution Gateway + +Implement `BaseExecutionGateway` to integrate custom execution logic: + +```python +from valuecell.agents.common.trading.execution import BaseExecutionGateway +from valuecell.agents.common.trading.models import ( + TradeInstruction, + TxResult, + TxStatus, +) + +class MyExecutionGateway(BaseExecutionGateway): + async def execute(self, instructions, market_features): + results = [] + for inst in instructions: + # Your execution logic + result = TxResult( + instruction_id=inst.instruction_id, + instrument=inst.instrument, + side=inst.side, + status=TxStatus.FILLED, + requested_qty=inst.quantity, + filled_qty=inst.quantity, + avg_exec_price=100.0, # from your system + ) + results.append(result) + return results +``` + +### Custom Portfolio Service + +Override portfolio management for complex accounting: + +```python +from valuecell.agents.common.trading.portfolio import BasePortfolioService + +class CustomPortfolioService(BasePortfolioService): + def get_view(self): + # Return custom PortfolioView + pass + + def apply_trades(self, trades, market_features): + # Update internal state + pass +``` + +### Persistence Integration + +Use `StreamController` to persist strategy state to your database. The framework already handles: + +- Initial portfolio snapshot +- Cycle results (compose cycles, instructions, execution details) +- Final cleanup and status updates + +See `_internal/stream_controller.py` for persistence logic. From 53cb6c02618463968327e99949ca2124358f9547 Mon Sep 17 00:00:00 2001 From: paisley Date: Thu, 20 Nov 2025 10:07:31 +0800 Subject: [PATCH 09/15] fix conflict --- .../decision/grid_composer/__init__.py | 0 .../decision/grid_composer}/grid_composer.py | 13 ++++++------ .../valuecell/agents/grid_agent/grid_agent.py | 21 ++++++++++++------- 3 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 python/valuecell/agents/common/trading/decision/grid_composer/__init__.py rename python/valuecell/agents/{grid_agent => common/trading/decision/grid_composer}/grid_composer.py (96%) diff --git a/python/valuecell/agents/common/trading/decision/grid_composer/__init__.py b/python/valuecell/agents/common/trading/decision/grid_composer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/valuecell/agents/grid_agent/grid_composer.py b/python/valuecell/agents/common/trading/decision/grid_composer/grid_composer.py similarity index 96% rename from python/valuecell/agents/grid_agent/grid_composer.py rename to python/valuecell/agents/common/trading/decision/grid_composer/grid_composer.py index 0210e210c..8a8d712a8 100644 --- a/python/valuecell/agents/grid_agent/grid_composer.py +++ b/python/valuecell/agents/common/trading/decision/grid_composer/grid_composer.py @@ -5,17 +5,17 @@ from loguru import logger -from ..models import ( +from ...models import ( ComposeContext, + ComposeResult, InstrumentRef, LlmDecisionAction, LlmDecisionItem, LlmPlanProposal, MarketType, - TradeInstruction, UserRequest, ) -from .composer import LlmComposer +from ..prompt_based.composer import LlmComposer class GridComposer(LlmComposer): @@ -54,7 +54,7 @@ def __init__( self._max_steps = int(max_steps) self._base_fraction = float(base_fraction) - async def compose(self, context: ComposeContext) -> List[TradeInstruction]: + async def compose(self, context: ComposeContext) -> ComposeResult: # Prepare buying power/constraints/price map, then generate plan and reuse parent normalization equity, allowed_lev, constraints, _projected_gross, price_map = ( self._init_buying_power_context(context) @@ -262,7 +262,7 @@ def steps_from_avg(px: float, avg: float) -> int: logger.debug( "GridComposer produced NOOP plan for compose_id={}", context.compose_id ) - return [] + return ComposeResult(instructions=[], rationale="Grid NOOP") plan = LlmPlanProposal( ts=ts, @@ -270,4 +270,5 @@ def steps_from_avg(px: float, avg: float) -> int: rationale=f"Grid step={self._step_pct:.4f}, base_fraction={self._base_fraction:.3f}", ) # Reuse parent normalization: quantity filters, buying power, cap_factor, reduceOnly, etc. - return self._normalize_plan(context, plan) + normalized = self._normalize_plan(context, plan) + return ComposeResult(instructions=normalized, rationale=plan.rationale) diff --git a/python/valuecell/agents/grid_agent/grid_agent.py b/python/valuecell/agents/grid_agent/grid_agent.py index 6820c5e05..929fda8d8 100644 --- a/python/valuecell/agents/grid_agent/grid_agent.py +++ b/python/valuecell/agents/grid_agent/grid_agent.py @@ -5,18 +5,21 @@ - Rule-based decision composer `GridComposer` Usage: - from valuecell.agents.strategy_agent.grid_agent import GridStrategyAgent + from valuecell.agents.grid_agent.grid_agent import GridStrategyAgent agent = GridStrategyAgent() await agent.stream(request) """ from __future__ import annotations -from .agent import BaseStrategyAgent -from .decision.grid_composer import GridComposer -from .decision.interfaces import Composer -from .features.pipeline import DefaultFeaturesPipeline, FeaturesPipeline -from .models import UserRequest +from valuecell.agents.common.trading.base_agent import BaseStrategyAgent +from valuecell.agents.common.trading.decision.grid_composer.grid_composer import ( + GridComposer, +) +from valuecell.agents.common.trading.decision.interfaces import BaseComposer +from valuecell.agents.common.trading.features.interfaces import BaseFeaturesPipeline +from valuecell.agents.common.trading.features.pipeline import DefaultFeaturesPipeline +from valuecell.agents.common.trading.models import UserRequest class GridStrategyAgent(BaseStrategyAgent): @@ -27,10 +30,12 @@ class GridStrategyAgent(BaseStrategyAgent): add long on down moves; reduce on reversals. """ - def _build_features_pipeline(self, request: UserRequest) -> FeaturesPipeline | None: + def _build_features_pipeline( + self, request: UserRequest + ) -> BaseFeaturesPipeline | None: return DefaultFeaturesPipeline.from_request(request) - def _create_decision_composer(self, request: UserRequest) -> Composer | None: + def _create_decision_composer(self, request: UserRequest) -> BaseComposer | None: # Adjust step_pct / max_steps / base_fraction as needed return GridComposer( request=request, From 89912fa0dcc5a747619748c66f9e361483e2a3e4 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:16:21 +0800 Subject: [PATCH 10/15] refactor: update local_agent_class path in grid_agent.json and remove unused trading history components --- python/configs/agent_cards/grid_agent.json | 2 +- .../trading/trading_history/__init__.py | 12 ----------- .../trading/trading_history/recorder.py | 21 ------------------- 3 files changed, 1 insertion(+), 34 deletions(-) delete mode 100644 python/valuecell/agents/common/trading/trading_history/__init__.py delete mode 100644 python/valuecell/agents/common/trading/trading_history/recorder.py diff --git a/python/configs/agent_cards/grid_agent.json b/python/configs/agent_cards/grid_agent.json index 286bee999..107a24f6c 100644 --- a/python/configs/agent_cards/grid_agent.json +++ b/python/configs/agent_cards/grid_agent.json @@ -14,6 +14,6 @@ "author": "ValueCell Team", "tags": ["strategy", "trading", "llm", "demo"], "notes": "This card is a lightweight example; replace model api_key and tune parameters for production use.", - "local_agent_class": "valuecell.agents.strategy_agent.grid_agent:GridStrategyAgent" + "local_agent_class": "valuecell.agents.grid_agent.grid_agent:GridStrategyAgent" } } diff --git a/python/valuecell/agents/common/trading/trading_history/__init__.py b/python/valuecell/agents/common/trading/trading_history/__init__.py deleted file mode 100644 index c005f905f..000000000 --- a/python/valuecell/agents/common/trading/trading_history/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Trading history recording and digest building.""" - -from .digest import RollingDigestBuilder -from .interfaces import BaseDigestBuilder, BaseHistoryRecorder -from .recorder import InMemoryHistoryRecorder - -__all__ = [ - "InMemoryHistoryRecorder", - "RollingDigestBuilder", - "BaseHistoryRecorder", - "BaseDigestBuilder", -] diff --git a/python/valuecell/agents/common/trading/trading_history/recorder.py b/python/valuecell/agents/common/trading/trading_history/recorder.py deleted file mode 100644 index 2578253ea..000000000 --- a/python/valuecell/agents/common/trading/trading_history/recorder.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import List - -from valuecell.agents.common.trading.models import HistoryRecord - -from .interfaces import BaseHistoryRecorder - - -class InMemoryHistoryRecorder(BaseHistoryRecorder): - """In-memory recorder storing history records.""" - - def __init__(self, history_limit: int = 200) -> None: - self.records: List[HistoryRecord] = [] - self.history_limit = history_limit - - def record(self, record: HistoryRecord) -> None: - self.records.append(record) - if len(self.records) > self.history_limit: - self.records = self.records[-self.history_limit :] - - def get_records(self) -> List[HistoryRecord]: - return self.records From 84f90319356063896ab35f9c0309a3f4364e6c80 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:17:33 +0800 Subject: [PATCH 11/15] refactor: update import statement for constants in models.py --- python/valuecell/agents/common/trading/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/valuecell/agents/common/trading/models.py b/python/valuecell/agents/common/trading/models.py index c2c05de6d..2a58de02d 100644 --- a/python/valuecell/agents/common/trading/models.py +++ b/python/valuecell/agents/common/trading/models.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, Field, field_validator, model_validator -from valuecell.agents.common.trading.constants import ( +from .constants import ( DEFAULT_AGENT_MODEL, DEFAULT_CAP_FACTOR, DEFAULT_INITIAL_CAPITAL, From 884cd1ba49f8702c77dce82517a820f731e14d3f Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:41:32 +0800 Subject: [PATCH 12/15] refactor trading decision models and utilities --- .../decision/grid_composer/grid_composer.py | 40 +- .../common/trading/decision/interfaces.py | 534 ++++++++++++++- .../trading/decision/prompt_based/composer.py | 646 +----------------- .../valuecell/agents/common/trading/models.py | 46 +- .../valuecell/agents/common/trading/utils.py | 75 ++ 5 files changed, 664 insertions(+), 677 deletions(-) diff --git a/python/valuecell/agents/common/trading/decision/grid_composer/grid_composer.py b/python/valuecell/agents/common/trading/decision/grid_composer/grid_composer.py index 8a8d712a8..eaab8e8d0 100644 --- a/python/valuecell/agents/common/trading/decision/grid_composer/grid_composer.py +++ b/python/valuecell/agents/common/trading/decision/grid_composer/grid_composer.py @@ -9,20 +9,20 @@ ComposeContext, ComposeResult, InstrumentRef, - LlmDecisionAction, - LlmDecisionItem, - LlmPlanProposal, MarketType, + TradeDecisionAction, + TradeDecisionItem, + TradePlanProposal, UserRequest, ) -from ..prompt_based.composer import LlmComposer +from ..interfaces import BaseComposer -class GridComposer(LlmComposer): +class GridComposer(BaseComposer): """Rule-based grid strategy composer. Goal: avoid LLM usage by applying simple mean-reversion grid rules to - produce an `LlmPlanProposal`, then reuse the parent normalization and + produce an `TradePlanProposal`, then reuse the parent normalization and risk controls (`_normalize_plan`) to output executable `TradeInstruction`s. Key rules: @@ -60,7 +60,7 @@ async def compose(self, context: ComposeContext) -> ComposeResult: self._init_buying_power_context(context) ) - items: List[LlmDecisionItem] = [] + items: List[TradeDecisionItem] = [] ts = int(context.ts) # Pre-fetch micro change percentage from features (prefer 1s, fallback 1m) @@ -118,12 +118,12 @@ def steps_from_avg(px: float, avg: float) -> int: if chg <= -self._step_pct: # Short-term drop → open long items.append( - LlmDecisionItem( + TradeDecisionItem( instrument=InstrumentRef( symbol=symbol, exchange_id=self._request.exchange_config.exchange_id, ), - action=LlmDecisionAction.OPEN_LONG, + action=TradeDecisionAction.OPEN_LONG, target_qty=base_qty, leverage=( 1.0 @@ -146,12 +146,12 @@ def steps_from_avg(px: float, avg: float) -> int: elif (not is_spot) and chg >= self._step_pct: # Short-term rise → open short (perpetual only) items.append( - LlmDecisionItem( + TradeDecisionItem( instrument=InstrumentRef( symbol=symbol, exchange_id=self._request.exchange_config.exchange_id, ), - action=LlmDecisionAction.OPEN_SHORT, + action=TradeDecisionAction.OPEN_SHORT, target_qty=base_qty, leverage=min( float(self._request.trading_config.max_leverage or 1.0), @@ -180,12 +180,12 @@ def steps_from_avg(px: float, avg: float) -> int: up = (avg_px > 0) and (price >= avg_px * (1.0 + self._step_pct)) if down: items.append( - LlmDecisionItem( + TradeDecisionItem( instrument=InstrumentRef( symbol=symbol, exchange_id=self._request.exchange_config.exchange_id, ), - action=LlmDecisionAction.OPEN_LONG, + action=TradeDecisionAction.OPEN_LONG, target_qty=base_qty * k, leverage=1.0 if is_spot @@ -203,12 +203,12 @@ def steps_from_avg(px: float, avg: float) -> int: ) elif up: items.append( - LlmDecisionItem( + TradeDecisionItem( instrument=InstrumentRef( symbol=symbol, exchange_id=self._request.exchange_config.exchange_id, ), - action=LlmDecisionAction.CLOSE_LONG, + action=TradeDecisionAction.CLOSE_LONG, target_qty=min(abs(qty), base_qty * k), leverage=1.0, confidence=min(1.0, k / float(self._max_steps)), @@ -223,12 +223,12 @@ def steps_from_avg(px: float, avg: float) -> int: down = (avg_px > 0) and (price <= avg_px * (1.0 - self._step_pct)) if up and (not is_spot): items.append( - LlmDecisionItem( + TradeDecisionItem( instrument=InstrumentRef( symbol=symbol, exchange_id=self._request.exchange_config.exchange_id, ), - action=LlmDecisionAction.OPEN_SHORT, + action=TradeDecisionAction.OPEN_SHORT, target_qty=base_qty * k, leverage=min( float(self._request.trading_config.max_leverage or 1.0), @@ -244,12 +244,12 @@ def steps_from_avg(px: float, avg: float) -> int: ) elif down: items.append( - LlmDecisionItem( + TradeDecisionItem( instrument=InstrumentRef( symbol=symbol, exchange_id=self._request.exchange_config.exchange_id, ), - action=LlmDecisionAction.CLOSE_SHORT, + action=TradeDecisionAction.CLOSE_SHORT, target_qty=min(abs(qty), base_qty * k), leverage=1.0, confidence=min(1.0, k / float(self._max_steps)), @@ -264,7 +264,7 @@ def steps_from_avg(px: float, avg: float) -> int: ) return ComposeResult(instructions=[], rationale="Grid NOOP") - plan = LlmPlanProposal( + plan = TradePlanProposal( ts=ts, items=items, rationale=f"Grid step={self._step_pct:.4f}, base_fraction={self._base_fraction:.3f}", diff --git a/python/valuecell/agents/common/trading/decision/interfaces.py b/python/valuecell/agents/common/trading/decision/interfaces.py index bb1752094..0fc787aee 100644 --- a/python/valuecell/agents/common/trading/decision/interfaces.py +++ b/python/valuecell/agents/common/trading/decision/interfaces.py @@ -1,11 +1,22 @@ from __future__ import annotations +import math from abc import ABC, abstractmethod +from typing import Dict, List, Optional -from valuecell.agents.common.trading.models import ( +from loguru import logger + +from ..models import ( ComposeContext, ComposeResult, + Constraints, + TradeDecisionAction, + TradePlanProposal, + MarketType, + TradeInstruction, + TradeSide, ) +from ..utils import extract_price_map # Contracts for decision making (module-local abstract interfaces). # Composer hosts the LLM call and guardrails, producing executable instructions. @@ -27,3 +38,524 @@ async def compose(self, context: ComposeContext) -> ComposeResult: a validated ComposeResult containing instructions and optional rationale. """ raise NotImplementedError + + def _init_buying_power_context( + self, + context: ComposeContext, + ) -> tuple: + """Initialize buying power tracking context. + + Returns: + (equity, allowed_lev, constraints, projected_gross, price_map) + """ + constraints = context.portfolio.constraints or Constraints( + max_positions=self._request.trading_config.max_positions, + max_leverage=self._request.trading_config.max_leverage, + ) + + # Compute equity based on market type: + if self._request.exchange_config.market_type == MarketType.SPOT: + # Spot: use available account_balance as equity + equity = float(context.portfolio.account_balance or 0.0) + else: + # Derivatives: use portfolio equity (account_balance + net exposure), or total_value if provided + if getattr(context.portfolio, "total_value", None) is not None: + equity = float(context.portfolio.total_value or 0.0) + else: + account_balance = float(context.portfolio.account_balance or 0.0) + net = float(context.portfolio.net_exposure or 0.0) + equity = account_balance + net + + # Market-type leverage policy: SPOT -> 1.0; Derivatives -> constraints + if self._request.exchange_config.market_type == MarketType.SPOT: + allowed_lev = 1.0 + else: + allowed_lev = ( + float(constraints.max_leverage) + if constraints.max_leverage is not None + else 1.0 + ) + + # Initialize projected gross exposure + price_map = extract_price_map(context.features) + if getattr(context.portfolio, "gross_exposure", None) is not None: + projected_gross = float(context.portfolio.gross_exposure or 0.0) + else: + projected_gross = 0.0 + for sym, snap in context.portfolio.positions.items(): + px = float( + price_map.get(sym) or getattr(snap, "mark_price", 0.0) or 0.0 + ) + projected_gross += abs(float(snap.quantity)) * px + + return equity, allowed_lev, constraints, projected_gross, price_map + + def _normalize_quantity( + self, + symbol: str, + quantity: float, + side: TradeSide, + current_qty: float, + constraints: Constraints, + equity: float, + allowed_lev: float, + projected_gross: float, + price_map: Dict[str, float], + ) -> tuple: + """Normalize quantity through all guardrails: filters, caps, and buying power. + + Returns: + (final_qty, consumed_buying_power_delta) + """ + qty = quantity + + # Step 1: per-order filters (step size, min notional, max order qty) + logger.debug(f"_normalize_quantity Step 1: {symbol} qty={qty} before filters") + qty = self._apply_quantity_filters( + symbol, + qty, + float(constraints.quantity_step or 0.0), + float(constraints.min_trade_qty or 0.0), + constraints.max_order_qty, + constraints.min_notional, + price_map, + ) + logger.debug(f"_normalize_quantity Step 1: {symbol} qty={qty} after filters") + + if qty <= self._quantity_precision: + logger.warning( + f"Post-filter quantity for {symbol} is {qty} <= precision {self._quantity_precision} -> returning 0" + ) + return 0.0, 0.0 + + # Step 2: notional/leverage cap (Phase 1 rules) + price = price_map.get(symbol) + if price is not None and price > 0: + # cap_factor controls how aggressively we allow position sizing by notional. + # Make it configurable via trading_config.cap_factor (strategy parameter). + cap_factor = float(self._request.trading_config.cap_factor or 1.5) + if constraints.quantity_step and constraints.quantity_step > 0: + cap_factor = max(cap_factor, 1.5) + + allowed_lev_cap = ( + allowed_lev if math.isfinite(allowed_lev) else float("inf") + ) + max_abs_by_factor = (cap_factor * equity) / float(price) + max_abs_by_lev = (allowed_lev_cap * equity) / float(price) + max_abs_final = min(max_abs_by_factor, max_abs_by_lev) + + desired_final = current_qty + (qty if side is TradeSide.BUY else -qty) + if math.isfinite(max_abs_final) and abs(desired_final) > max_abs_final: + target_abs = max_abs_final + new_qty = max(0.0, target_abs - abs(current_qty)) + if new_qty < qty: + logger.debug( + "Capping {} qty due to notional/leverage (price={}, cap_factor={}, old_qty={}, new_qty={})", + symbol, + price, + cap_factor, + qty, + new_qty, + ) + qty = new_qty + + if qty <= self._quantity_precision: + logger.debug( + "Post-cap quantity for {} is {} <= precision {} -> skipping", + symbol, + qty, + self._quantity_precision, + ) + return 0.0, 0.0 + + # Step 3: buying power clamp + px = price_map.get(symbol) + if px is None or px <= 0: + # Without a valid price, we cannot safely assess notional or buying power. + # Allow only de-risking (reductions/closures); block new/exposure-increasing trades. + is_reduction = (side is TradeSide.BUY and current_qty < 0) or ( + side is TradeSide.SELL and current_qty > 0 + ) + if is_reduction: + # Clamp to the current absolute position to avoid overshooting zero + final_qty = min(qty, abs(current_qty)) + logger.warning( + "Missing price for {} — allowing reduce-only trade: final_qty={} (current_qty={})", + symbol, + final_qty, + current_qty, + ) + else: + logger.warning( + "Missing price for {} — blocking exposure-increasing trade (side={}, qty={})", + symbol, + side, + qty, + ) + return 0.0, 0.0 + else: + if self._request.exchange_config.market_type == MarketType.SPOT: + # Spot: cash-only buying power + avail_bp = max(0.0, equity) + else: + # Derivatives: margin-based buying power + avail_bp = max(0.0, equity * allowed_lev - projected_gross) + # When buying power is exhausted, we should still allow reductions/closures. + # Set additional purchasable units to 0 but proceed with piecewise logic + # so that de-risking trades are not blocked. + a = abs(current_qty) + # Conservative buffer for expected slippage: assume execution price may move + # against us by `self._default_slippage_bps`. Use a higher effective price + # when computing how many units fit into available buying power so that + # planned increases don't accidentally exceed real-world costs. + slip_bps = float(self._default_slippage_bps or 0.0) + slip = slip_bps / 10000.0 + effective_px = float(px) * (1.0 + slip) + ap_units = (avail_bp / effective_px) if avail_bp > 0 else 0.0 + + # Piecewise: additional gross consumption must fit into available BP + if side is TradeSide.BUY: + if current_qty >= 0: + q_allowed = ap_units + else: + if qty <= 2 * a: + q_allowed = qty + else: + q_allowed = 2 * a + ap_units + else: # SELL + if current_qty <= 0: + q_allowed = ap_units + else: + if qty <= 2 * a: + q_allowed = qty + else: + q_allowed = 2 * a + ap_units + + final_qty = max(0.0, min(qty, q_allowed)) + + if final_qty <= self._quantity_precision: + logger.debug( + "Post-buying-power quantity for {} is {} <= precision {} -> skipping", + symbol, + final_qty, + self._quantity_precision, + ) + return 0.0, 0.0 + + # Compute consumed buying power delta + abs_before = abs(current_qty) + abs_after = abs( + current_qty + (final_qty if side is TradeSide.BUY else -final_qty) + ) + delta_abs = abs_after - abs_before + # Use effective price (with slippage) for consumed buying power to stay conservative + # If px was missing, we would have returned earlier for exposure-increasing trades; + # for reduction-only trades, treat consumed buying power as 0. + if px is None or px <= 0: + consumed_bp_delta = 0.0 + else: + # Recompute effective price consistently with the clamp + slip_bps = float(self._default_slippage_bps or 0.0) + slip = slip_bps / 10000.0 + effective_px = float(px) * (1.0 + slip) + consumed_bp_delta = (delta_abs * effective_px) if delta_abs > 0 else 0.0 + + return final_qty, consumed_bp_delta + + def _normalize_plan( + self, + context: ComposeContext, + plan: TradePlanProposal, + ) -> List[TradeInstruction]: + instructions: List[TradeInstruction] = [] + + # --- prepare state --- + projected_positions: Dict[str, float] = { + symbol: snapshot.quantity + for symbol, snapshot in context.portfolio.positions.items() + } + + def _count_active(pos_map: Dict[str, float]) -> int: + return sum(1 for q in pos_map.values() if abs(q) > self._quantity_precision) + + active_positions = _count_active(projected_positions) + + # Initialize buying power context + equity, allowed_lev, constraints, projected_gross, price_map = ( + self._init_buying_power_context(context) + ) + + max_positions = constraints.max_positions + max_position_qty = constraints.max_position_qty + + # --- process each planned item --- + for idx, item in enumerate(plan.items): + symbol = item.instrument.symbol + current_qty = projected_positions.get(symbol, 0.0) + + # determine the intended target quantity (clamped by max_position_qty) + target_qty = self._resolve_target_quantity( + item, current_qty, max_position_qty + ) + # SPOT long-only: do not allow negative target quantities + if ( + self._request.exchange_config.market_type == MarketType.SPOT + and target_qty < 0 + ): + target_qty = 0.0 + # Enforce: single-lot per symbol and no direct flip. If target flips side, + # split into two sub-steps: first flat to 0, then open to target side. + sub_targets: List[float] = [] + if current_qty * target_qty < 0: + sub_targets = [0.0, float(target_qty)] + else: + sub_targets = [float(target_qty)] + + local_current = float(current_qty) + for sub_i, sub_target in enumerate(sub_targets): + delta = sub_target - local_current + + if abs(delta) <= self._quantity_precision: + continue + + is_new_position = ( + abs(local_current) <= self._quantity_precision + and abs(sub_target) > self._quantity_precision + ) + if ( + is_new_position + and max_positions is not None + and active_positions >= int(max_positions) + ): + logger.warning( + "Skipping symbol {} due to max_positions constraint (active={} max={})", + symbol, + active_positions, + max_positions, + ) + continue + + side = TradeSide.BUY if delta > 0 else TradeSide.SELL + # requested leverage (default 1.0), clamped to constraints + requested_lev = ( + float(item.leverage) + if getattr(item, "leverage", None) is not None + else 1.0 + ) + allowed_lev_item = ( + float(constraints.max_leverage) + if constraints.max_leverage is not None + else requested_lev + ) + if self._request.exchange_config.market_type == MarketType.SPOT: + # Spot: long-only, no leverage + final_leverage = 1.0 + else: + final_leverage = max(1.0, min(requested_lev, allowed_lev_item)) + quantity = abs(delta) + + # Normalize quantity through all guardrails + logger.debug(f"Before normalize: {symbol} quantity={quantity}") + quantity, consumed_bp = self._normalize_quantity( + symbol, + quantity, + side, + local_current, + constraints, + equity, + allowed_lev, + projected_gross, + price_map, + ) + logger.debug( + f"After normalize: {symbol} quantity={quantity}, consumed_bp={consumed_bp}" + ) + + if quantity <= self._quantity_precision: + logger.warning( + f"SKIPPED: {symbol} quantity={quantity} <= precision={self._quantity_precision} after normalization" + ) + continue + + # Update projected positions for subsequent guardrails + signed_delta = quantity if side is TradeSide.BUY else -quantity + projected_positions[symbol] = local_current + signed_delta + projected_gross += consumed_bp + + # active positions accounting + if is_new_position: + active_positions += 1 + if abs(projected_positions[symbol]) <= self._quantity_precision: + active_positions = max(active_positions - 1, 0) + + # Use a stable per-item sub-index to keep instruction ids unique + instr = self._create_instruction( + context, + idx * 10 + sub_i, + item, + symbol, + side, + quantity, + final_leverage, + local_current, + sub_target, + ) + instructions.append(instr) + + # advance local_current for the next sub-step + local_current = projected_positions[symbol] + + return instructions + + def _create_instruction( + self, + context: ComposeContext, + idx: int, + item, + symbol: str, + side: TradeSide, + quantity: float, + final_leverage: float, + current_qty: float, + target_qty: float, + ) -> TradeInstruction: + """Create a normalized TradeInstruction with metadata.""" + final_target = current_qty + (quantity if side is TradeSide.BUY else -quantity) + meta = { + "requested_target_qty": target_qty, + "current_qty": current_qty, + "final_target_qty": final_target, + "action": item.action.value, + } + if item.confidence is not None: + meta["confidence"] = item.confidence + if item.rationale: + meta["rationale"] = item.rationale + + # For derivatives/perpetual markets, mark reduceOnly when instruction reduces absolute exposure to avoid accidental reverse opens + try: + if self._request.exchange_config.market_type != MarketType.SPOT: + if abs(final_target) < abs(current_qty): + meta["reduceOnly"] = True + # Bybit uses a different param key + if ( + self._request.exchange_config.exchange_id or "" + ).lower() == "bybit": + meta["reduce_only"] = True + except Exception: + # Ignore any exception; do not block instruction creation + pass + + instruction = TradeInstruction( + instruction_id=f"{context.compose_id}:{symbol}:{idx}", + compose_id=context.compose_id, + instrument=item.instrument, + action=item.action, + side=side, + quantity=quantity, + leverage=final_leverage, + max_slippage_bps=self._default_slippage_bps, + meta=meta, + ) + logger.debug( + "Created TradeInstruction {} for {} side={} qty={} lev={}", + instruction.instruction_id, + symbol, + instruction.side, + instruction.quantity, + final_leverage, + ) + return instruction + + def _resolve_target_quantity( + self, + item, + current_qty: float, + max_position_qty: Optional[float], + ) -> float: + # NOOP: keep current position + if item.action == TradeDecisionAction.NOOP: + return current_qty + + # Interpret target_qty as operation magnitude (not final position), normalized to positive + mag = abs(float(item.target_qty)) + target = current_qty + + # Compute target position per open/close long/short action + if item.action == TradeDecisionAction.OPEN_LONG: + base = current_qty if current_qty > 0 else 0.0 + target = base + mag + elif item.action == TradeDecisionAction.OPEN_SHORT: + base = current_qty if current_qty < 0 else 0.0 + target = base - mag + elif item.action == TradeDecisionAction.CLOSE_LONG: + if current_qty > 0: + target = max(current_qty - mag, 0.0) + else: + # No long position, keep unchanged + target = current_qty + elif item.action == TradeDecisionAction.CLOSE_SHORT: + if current_qty < 0: + target = min(current_qty + mag, 0.0) + else: + # No short position, keep unchanged + target = current_qty + else: + # Fallback: treat unknown action as NOOP + target = current_qty + + # Clamp by max_position_qty (symmetric) + if max_position_qty is not None: + max_abs = abs(float(max_position_qty)) + target = max(-max_abs, min(max_abs, target)) + + return target + + def _apply_quantity_filters( + self, + symbol: str, + quantity: float, + quantity_step: float, + min_trade_qty: float, + max_order_qty: Optional[float], + min_notional: Optional[float], + price_map: Dict[str, float], + ) -> float: + qty = quantity + logger.debug(f"Filtering {symbol}: initial qty={qty}") + + if max_order_qty is not None: + qty = min(qty, float(max_order_qty)) + logger.debug(f"After max_order_qty filter: qty={qty}") + + if quantity_step > 0: + qty = math.floor(qty / quantity_step) * quantity_step + logger.debug(f"After quantity_step filter: qty={qty}") + + if qty <= 0: + logger.warning(f"FILTERED: {symbol} qty={qty} <= 0") + return 0.0 + + if qty < min_trade_qty: + logger.warning( + f"FILTERED: {symbol} qty={qty} < min_trade_qty={min_trade_qty}" + ) + return 0.0 + + if min_notional is not None: + price = price_map.get(symbol) + if price is None: + logger.warning(f"FILTERED: {symbol} no price reference available") + return 0.0 + notional = qty * price + if notional < float(min_notional): + logger.warning( + f"FILTERED: {symbol} notional={notional:.4f} < min_notional={min_notional}" + ) + return 0.0 + logger.debug( + f"Passed min_notional check: notional={notional:.4f} >= {min_notional}" + ) + + logger.debug(f"Final qty for {symbol}: {qty}") + return qty diff --git a/python/valuecell/agents/common/trading/decision/prompt_based/composer.py b/python/valuecell/agents/common/trading/decision/prompt_based/composer.py index 3cf0ea76d..a7698c6a5 100644 --- a/python/valuecell/agents/common/trading/decision/prompt_based/composer.py +++ b/python/valuecell/agents/common/trading/decision/prompt_based/composer.py @@ -1,9 +1,8 @@ from __future__ import annotations import json -import math from datetime import datetime, timezone -from typing import Dict, List, Optional +from typing import Dict from agno.agent import Agent as AgnoAgent from loguru import logger @@ -11,21 +10,17 @@ from valuecell.utils import env as env_utils from valuecell.utils import model as model_utils -from ...constants import FEATURE_GROUP_BY_KEY from ...models import ( ComposeContext, ComposeResult, - Constraints, - FeatureVector, - LlmDecisionAction, - LlmPlanProposal, - MarketType, - TradeInstruction, - TradeSide, + TradeDecisionAction, + TradePlanProposal, UserRequest, ) from ...utils import ( - extract_price_map, + extract_market_section, + group_features, + prune_none, send_discord_message, ) from ..interfaces import BaseComposer @@ -104,93 +99,6 @@ async def compose(self, context: ComposeContext) -> ComposeResult: return ComposeResult(instructions=normalized, rationale=plan.rationale) # ------------------------------------------------------------------ - # Prompt + LLM helpers - - @staticmethod - def _prune_none(obj): - """Recursively remove None, empty dict, and empty list values.""" - if isinstance(obj, dict): - pruned = { - k: LlmComposer._prune_none(v) for k, v in obj.items() if v is not None - } - return {k: v for k, v in pruned.items() if v not in (None, {}, [])} - if isinstance(obj, list): - pruned = [LlmComposer._prune_none(v) for v in obj] - return [v for v in pruned if v not in (None, {}, [])] - return obj - - def _extract_market_section(self, market_data: List[Dict]) -> Dict: - """Extract decision-critical metrics from market feature entries.""" - - compact: Dict[str, Dict] = {} - for item in market_data: - symbol = (item.get("instrument") or {}).get("symbol") - if not symbol: - continue - - values = item.get("values") or {} - entry: Dict[str, float] = {} - - for feature_key, alias in ( - ("price.last", "last"), - ("price.close", "close"), - ("price.open", "open"), - ("price.high", "high"), - ("price.low", "low"), - ("price.bid", "bid"), - ("price.ask", "ask"), - ("price.change_pct", "change_pct"), - ("price.volume", "volume"), - ): - if feature_key in values and values[feature_key] is not None: - entry[alias] = values[feature_key] - - if values.get("open_interest") is not None: - entry["open_interest"] = values["open_interest"] - - if values.get("funding.rate") is not None: - entry["funding_rate"] = values["funding.rate"] - if values.get("funding.mark_price") is not None: - entry["mark_price"] = values["funding.mark_price"] - - normalized = {k: v for k, v in entry.items() if v is not None} - if normalized: - compact[symbol] = normalized - - return compact - - def _organize_features(self, features: List[FeatureVector]) -> Dict: - """Organize features by grouping metadata and trim payload noise. - - Prefers the FeatureVector.meta group_by_key when present, otherwise - falls back to the interval tag. This allows callers to introduce - ad-hoc groupings (e.g., market snapshots) without overloading the - interval field. - """ - grouped: Dict[str, List] = {} - - for fv in features: - data = fv.model_dump(mode="json") - meta = data.get("meta") or {} - group_key = meta.get(FEATURE_GROUP_BY_KEY) - - if not group_key: - continue - - # Keep only concise metadata helpful for the LLM prompt. - trimmed_meta = {} - if meta.get("interval"): - trimmed_meta["interval"] = meta["interval"] - if meta.get("count") is not None: - trimmed_meta["count"] = meta["count"] - if trimmed_meta: - data["meta"] = trimmed_meta - else: - data.pop("meta", None) - - grouped.setdefault(group_key, []).append(data) - - return grouped def _build_summary(self, context: ComposeContext) -> Dict: """Build portfolio summary with risk metrics.""" @@ -223,8 +131,8 @@ def _build_llm_prompt(self, context: ComposeContext) -> str: # Build components summary = self._build_summary(context) - features = self._organize_features(context.features) - market = self._extract_market_section(features.get("market_snapshot", [])) + features = group_features(context.features) + market = extract_market_section(features.get("market_snapshot", [])) # Portfolio positions positions = [ @@ -245,7 +153,7 @@ def _build_llm_prompt(self, context: ComposeContext) -> str: else {} ) - payload = self._prune_none( + payload = prune_none( { "strategy_prompt": self._build_prompt_text(), "summary": summary, @@ -266,7 +174,7 @@ def _build_llm_prompt(self, context: ComposeContext) -> str: return f"{instructions}\n\nContext:\n{json.dumps(payload, ensure_ascii=False)}" - async def _call_llm(self, prompt: str) -> LlmPlanProposal: + async def _call_llm(self, prompt: str) -> TradePlanProposal: """Invoke an LLM asynchronously and parse the response into LlmPlanProposal. This implementation follows the parser_agent pattern: it creates a model @@ -286,7 +194,7 @@ async def _call_llm(self, prompt: str) -> LlmPlanProposal: # Wrap model in an Agent (consistent with parser_agent usage) agent = AgnoAgent( model=model, - output_schema=LlmPlanProposal, + output_schema=TradePlanProposal, markdown=False, instructions=[SYSTEM_PROMPT], use_json_mode=model_utils.model_should_use_json_mode(model), @@ -297,17 +205,17 @@ async def _call_llm(self, prompt: str) -> LlmPlanProposal: content = getattr(response, "content", None) or response logger.debug("Received LLM response {}", content) # If the agent already returned a validated model, return it directly - if isinstance(content, LlmPlanProposal): + if isinstance(content, TradePlanProposal): return content logger.error("LLM output failed validation: {}", content) - return LlmPlanProposal( + return TradePlanProposal( ts=int(datetime.now(timezone.utc).timestamp() * 1000), items=[], rationale="LLM output failed validation", ) - async def _send_plan_to_discord(self, plan: LlmPlanProposal) -> None: + async def _send_plan_to_discord(self, plan: TradePlanProposal) -> None: """Send plan rationale to Discord when there are actionable items. Behavior: @@ -317,7 +225,7 @@ async def _send_plan_to_discord(self, plan: LlmPlanProposal) -> None: - Reads webhook from `STRATEGY_AGENT_DISCORD_WEBHOOK_URL` (handled by `send_discord_message`). Does nothing if no actionable items exist. """ - actionable = [it for it in plan.items if it.action != LlmDecisionAction.NOOP] + actionable = [it for it in plan.items if it.action != TradeDecisionAction.NOOP] if not actionable: return @@ -354,527 +262,3 @@ async def _send_plan_to_discord(self, plan: LlmPlanProposal) -> None: ) except Exception as exc: logger.warning("Error sending plan to Discord, err={}", exc) - - # ------------------------------------------------------------------ - # Normalization / guardrails helpers - - def _init_buying_power_context( - self, - context: ComposeContext, - ) -> tuple: - """Initialize buying power tracking context. - - Returns: - (equity, allowed_lev, constraints, projected_gross, price_map) - """ - constraints = context.portfolio.constraints or Constraints( - max_positions=self._request.trading_config.max_positions, - max_leverage=self._request.trading_config.max_leverage, - ) - - # Compute equity based on market type: - if self._request.exchange_config.market_type == MarketType.SPOT: - # Spot: use available account_balance as equity - equity = float(context.portfolio.account_balance or 0.0) - else: - # Derivatives: use portfolio equity (account_balance + net exposure), or total_value if provided - if getattr(context.portfolio, "total_value", None) is not None: - equity = float(context.portfolio.total_value or 0.0) - else: - account_balance = float(context.portfolio.account_balance or 0.0) - net = float(context.portfolio.net_exposure or 0.0) - equity = account_balance + net - - # Market-type leverage policy: SPOT -> 1.0; Derivatives -> constraints - if self._request.exchange_config.market_type == MarketType.SPOT: - allowed_lev = 1.0 - else: - allowed_lev = ( - float(constraints.max_leverage) - if constraints.max_leverage is not None - else 1.0 - ) - - # Initialize projected gross exposure - price_map = extract_price_map(context.features) - if getattr(context.portfolio, "gross_exposure", None) is not None: - projected_gross = float(context.portfolio.gross_exposure or 0.0) - else: - projected_gross = 0.0 - for sym, snap in context.portfolio.positions.items(): - px = float( - price_map.get(sym) or getattr(snap, "mark_price", 0.0) or 0.0 - ) - projected_gross += abs(float(snap.quantity)) * px - - return equity, allowed_lev, constraints, projected_gross, price_map - - def _normalize_quantity( - self, - symbol: str, - quantity: float, - side: TradeSide, - current_qty: float, - constraints: Constraints, - equity: float, - allowed_lev: float, - projected_gross: float, - price_map: Dict[str, float], - ) -> tuple: - """Normalize quantity through all guardrails: filters, caps, and buying power. - - Returns: - (final_qty, consumed_buying_power_delta) - """ - qty = quantity - - # Step 1: per-order filters (step size, min notional, max order qty) - logger.debug(f"_normalize_quantity Step 1: {symbol} qty={qty} before filters") - qty = self._apply_quantity_filters( - symbol, - qty, - float(constraints.quantity_step or 0.0), - float(constraints.min_trade_qty or 0.0), - constraints.max_order_qty, - constraints.min_notional, - price_map, - ) - logger.debug(f"_normalize_quantity Step 1: {symbol} qty={qty} after filters") - - if qty <= self._quantity_precision: - logger.warning( - f"Post-filter quantity for {symbol} is {qty} <= precision {self._quantity_precision} -> returning 0" - ) - return 0.0, 0.0 - - # Step 2: notional/leverage cap (Phase 1 rules) - price = price_map.get(symbol) - if price is not None and price > 0: - # cap_factor controls how aggressively we allow position sizing by notional. - # Make it configurable via trading_config.cap_factor (strategy parameter). - cap_factor = float(self._request.trading_config.cap_factor or 1.5) - if constraints.quantity_step and constraints.quantity_step > 0: - cap_factor = max(cap_factor, 1.5) - - allowed_lev_cap = ( - allowed_lev if math.isfinite(allowed_lev) else float("inf") - ) - max_abs_by_factor = (cap_factor * equity) / float(price) - max_abs_by_lev = (allowed_lev_cap * equity) / float(price) - max_abs_final = min(max_abs_by_factor, max_abs_by_lev) - - desired_final = current_qty + (qty if side is TradeSide.BUY else -qty) - if math.isfinite(max_abs_final) and abs(desired_final) > max_abs_final: - target_abs = max_abs_final - new_qty = max(0.0, target_abs - abs(current_qty)) - if new_qty < qty: - logger.debug( - "Capping {} qty due to notional/leverage (price={}, cap_factor={}, old_qty={}, new_qty={})", - symbol, - price, - cap_factor, - qty, - new_qty, - ) - qty = new_qty - - if qty <= self._quantity_precision: - logger.debug( - "Post-cap quantity for {} is {} <= precision {} -> skipping", - symbol, - qty, - self._quantity_precision, - ) - return 0.0, 0.0 - - # Step 3: buying power clamp - px = price_map.get(symbol) - if px is None or px <= 0: - # Without a valid price, we cannot safely assess notional or buying power. - # Allow only de-risking (reductions/closures); block new/exposure-increasing trades. - is_reduction = (side is TradeSide.BUY and current_qty < 0) or ( - side is TradeSide.SELL and current_qty > 0 - ) - if is_reduction: - # Clamp to the current absolute position to avoid overshooting zero - final_qty = min(qty, abs(current_qty)) - logger.warning( - "Missing price for {} — allowing reduce-only trade: final_qty={} (current_qty={})", - symbol, - final_qty, - current_qty, - ) - else: - logger.warning( - "Missing price for {} — blocking exposure-increasing trade (side={}, qty={})", - symbol, - side, - qty, - ) - return 0.0, 0.0 - else: - if self._request.exchange_config.market_type == MarketType.SPOT: - # Spot: cash-only buying power - avail_bp = max(0.0, equity) - else: - # Derivatives: margin-based buying power - avail_bp = max(0.0, equity * allowed_lev - projected_gross) - # When buying power is exhausted, we should still allow reductions/closures. - # Set additional purchasable units to 0 but proceed with piecewise logic - # so that de-risking trades are not blocked. - a = abs(current_qty) - # Conservative buffer for expected slippage: assume execution price may move - # against us by `self._default_slippage_bps`. Use a higher effective price - # when computing how many units fit into available buying power so that - # planned increases don't accidentally exceed real-world costs. - slip_bps = float(self._default_slippage_bps or 0.0) - slip = slip_bps / 10000.0 - effective_px = float(px) * (1.0 + slip) - ap_units = (avail_bp / effective_px) if avail_bp > 0 else 0.0 - - # Piecewise: additional gross consumption must fit into available BP - if side is TradeSide.BUY: - if current_qty >= 0: - q_allowed = ap_units - else: - if qty <= 2 * a: - q_allowed = qty - else: - q_allowed = 2 * a + ap_units - else: # SELL - if current_qty <= 0: - q_allowed = ap_units - else: - if qty <= 2 * a: - q_allowed = qty - else: - q_allowed = 2 * a + ap_units - - final_qty = max(0.0, min(qty, q_allowed)) - - if final_qty <= self._quantity_precision: - logger.debug( - "Post-buying-power quantity for {} is {} <= precision {} -> skipping", - symbol, - final_qty, - self._quantity_precision, - ) - return 0.0, 0.0 - - # Compute consumed buying power delta - abs_before = abs(current_qty) - abs_after = abs( - current_qty + (final_qty if side is TradeSide.BUY else -final_qty) - ) - delta_abs = abs_after - abs_before - # Use effective price (with slippage) for consumed buying power to stay conservative - # If px was missing, we would have returned earlier for exposure-increasing trades; - # for reduction-only trades, treat consumed buying power as 0. - if px is None or px <= 0: - consumed_bp_delta = 0.0 - else: - # Recompute effective price consistently with the clamp - slip_bps = float(self._default_slippage_bps or 0.0) - slip = slip_bps / 10000.0 - effective_px = float(px) * (1.0 + slip) - consumed_bp_delta = (delta_abs * effective_px) if delta_abs > 0 else 0.0 - - return final_qty, consumed_bp_delta - - def _normalize_plan( - self, - context: ComposeContext, - plan: LlmPlanProposal, - ) -> List[TradeInstruction]: - instructions: List[TradeInstruction] = [] - - # --- prepare state --- - projected_positions: Dict[str, float] = { - symbol: snapshot.quantity - for symbol, snapshot in context.portfolio.positions.items() - } - - def _count_active(pos_map: Dict[str, float]) -> int: - return sum(1 for q in pos_map.values() if abs(q) > self._quantity_precision) - - active_positions = _count_active(projected_positions) - - # Initialize buying power context - equity, allowed_lev, constraints, projected_gross, price_map = ( - self._init_buying_power_context(context) - ) - - max_positions = constraints.max_positions - max_position_qty = constraints.max_position_qty - - # --- process each planned item --- - for idx, item in enumerate(plan.items): - symbol = item.instrument.symbol - current_qty = projected_positions.get(symbol, 0.0) - - # determine the intended target quantity (clamped by max_position_qty) - target_qty = self._resolve_target_quantity( - item, current_qty, max_position_qty - ) - # SPOT long-only: do not allow negative target quantities - if ( - self._request.exchange_config.market_type == MarketType.SPOT - and target_qty < 0 - ): - target_qty = 0.0 - # Enforce: single-lot per symbol and no direct flip. If target flips side, - # split into two sub-steps: first flat to 0, then open to target side. - sub_targets: List[float] = [] - if current_qty * target_qty < 0: - sub_targets = [0.0, float(target_qty)] - else: - sub_targets = [float(target_qty)] - - local_current = float(current_qty) - for sub_i, sub_target in enumerate(sub_targets): - delta = sub_target - local_current - - if abs(delta) <= self._quantity_precision: - continue - - is_new_position = ( - abs(local_current) <= self._quantity_precision - and abs(sub_target) > self._quantity_precision - ) - if ( - is_new_position - and max_positions is not None - and active_positions >= int(max_positions) - ): - logger.warning( - "Skipping symbol {} due to max_positions constraint (active={} max={})", - symbol, - active_positions, - max_positions, - ) - continue - - side = TradeSide.BUY if delta > 0 else TradeSide.SELL - # requested leverage (default 1.0), clamped to constraints - requested_lev = ( - float(item.leverage) - if getattr(item, "leverage", None) is not None - else 1.0 - ) - allowed_lev_item = ( - float(constraints.max_leverage) - if constraints.max_leverage is not None - else requested_lev - ) - if self._request.exchange_config.market_type == MarketType.SPOT: - # Spot: long-only, no leverage - final_leverage = 1.0 - else: - final_leverage = max(1.0, min(requested_lev, allowed_lev_item)) - quantity = abs(delta) - - # Normalize quantity through all guardrails - logger.debug(f"Before normalize: {symbol} quantity={quantity}") - quantity, consumed_bp = self._normalize_quantity( - symbol, - quantity, - side, - local_current, - constraints, - equity, - allowed_lev, - projected_gross, - price_map, - ) - logger.debug( - f"After normalize: {symbol} quantity={quantity}, consumed_bp={consumed_bp}" - ) - - if quantity <= self._quantity_precision: - logger.warning( - f"SKIPPED: {symbol} quantity={quantity} <= precision={self._quantity_precision} after normalization" - ) - continue - - # Update projected positions for subsequent guardrails - signed_delta = quantity if side is TradeSide.BUY else -quantity - projected_positions[symbol] = local_current + signed_delta - projected_gross += consumed_bp - - # active positions accounting - if is_new_position: - active_positions += 1 - if abs(projected_positions[symbol]) <= self._quantity_precision: - active_positions = max(active_positions - 1, 0) - - # Use a stable per-item sub-index to keep instruction ids unique - instr = self._create_instruction( - context, - idx * 10 + sub_i, - item, - symbol, - side, - quantity, - final_leverage, - local_current, - sub_target, - ) - instructions.append(instr) - - # advance local_current for the next sub-step - local_current = projected_positions[symbol] - - return instructions - - def _create_instruction( - self, - context: ComposeContext, - idx: int, - item, - symbol: str, - side: TradeSide, - quantity: float, - final_leverage: float, - current_qty: float, - target_qty: float, - ) -> TradeInstruction: - """Create a normalized TradeInstruction with metadata.""" - final_target = current_qty + (quantity if side is TradeSide.BUY else -quantity) - meta = { - "requested_target_qty": target_qty, - "current_qty": current_qty, - "final_target_qty": final_target, - "action": item.action.value, - } - if item.confidence is not None: - meta["confidence"] = item.confidence - if item.rationale: - meta["rationale"] = item.rationale - - # For derivatives/perpetual markets, mark reduceOnly when instruction reduces absolute exposure to avoid accidental reverse opens - try: - if self._request.exchange_config.market_type != MarketType.SPOT: - if abs(final_target) < abs(current_qty): - meta["reduceOnly"] = True - # Bybit uses a different param key - if ( - self._request.exchange_config.exchange_id or "" - ).lower() == "bybit": - meta["reduce_only"] = True - except Exception: - # Ignore any exception; do not block instruction creation - pass - - instruction = TradeInstruction( - instruction_id=f"{context.compose_id}:{symbol}:{idx}", - compose_id=context.compose_id, - instrument=item.instrument, - action=item.action, - side=side, - quantity=quantity, - leverage=final_leverage, - max_slippage_bps=self._default_slippage_bps, - meta=meta, - ) - logger.debug( - "Created TradeInstruction {} for {} side={} qty={} lev={}", - instruction.instruction_id, - symbol, - instruction.side, - instruction.quantity, - final_leverage, - ) - return instruction - - def _resolve_target_quantity( - self, - item, - current_qty: float, - max_position_qty: Optional[float], - ) -> float: - # NOOP: keep current position - if item.action == LlmDecisionAction.NOOP: - return current_qty - - # Interpret target_qty as operation magnitude (not final position), normalized to positive - mag = abs(float(item.target_qty)) - target = current_qty - - # Compute target position per open/close long/short action - if item.action == LlmDecisionAction.OPEN_LONG: - base = current_qty if current_qty > 0 else 0.0 - target = base + mag - elif item.action == LlmDecisionAction.OPEN_SHORT: - base = current_qty if current_qty < 0 else 0.0 - target = base - mag - elif item.action == LlmDecisionAction.CLOSE_LONG: - if current_qty > 0: - target = max(current_qty - mag, 0.0) - else: - # No long position, keep unchanged - target = current_qty - elif item.action == LlmDecisionAction.CLOSE_SHORT: - if current_qty < 0: - target = min(current_qty + mag, 0.0) - else: - # No short position, keep unchanged - target = current_qty - else: - # Fallback: treat unknown action as NOOP - target = current_qty - - # Clamp by max_position_qty (symmetric) - if max_position_qty is not None: - max_abs = abs(float(max_position_qty)) - target = max(-max_abs, min(max_abs, target)) - - return target - - def _apply_quantity_filters( - self, - symbol: str, - quantity: float, - quantity_step: float, - min_trade_qty: float, - max_order_qty: Optional[float], - min_notional: Optional[float], - price_map: Dict[str, float], - ) -> float: - qty = quantity - logger.debug(f"Filtering {symbol}: initial qty={qty}") - - if max_order_qty is not None: - qty = min(qty, float(max_order_qty)) - logger.debug(f"After max_order_qty filter: qty={qty}") - - if quantity_step > 0: - qty = math.floor(qty / quantity_step) * quantity_step - logger.debug(f"After quantity_step filter: qty={qty}") - - if qty <= 0: - logger.warning(f"FILTERED: {symbol} qty={qty} <= 0") - return 0.0 - - if qty < min_trade_qty: - logger.warning( - f"FILTERED: {symbol} qty={qty} < min_trade_qty={min_trade_qty}" - ) - return 0.0 - - if min_notional is not None: - price = price_map.get(symbol) - if price is None: - logger.warning(f"FILTERED: {symbol} no price reference available") - return 0.0 - notional = qty * price - if notional < float(min_notional): - logger.warning( - f"FILTERED: {symbol} notional={notional:.4f} < min_notional={min_notional}" - ) - return 0.0 - logger.debug( - f"Passed min_notional check: notional={notional:.4f} >= {min_notional}" - ) - - logger.debug(f"Final qty for {symbol}: {qty}") - return qty diff --git a/python/valuecell/agents/common/trading/models.py b/python/valuecell/agents/common/trading/models.py index 2a58de02d..b0e66c3c3 100644 --- a/python/valuecell/agents/common/trading/models.py +++ b/python/valuecell/agents/common/trading/models.py @@ -32,13 +32,13 @@ class TradeType(str, Enum): class TradeSide(str, Enum): """Low-level execution side (exchange primitive). - This remains distinct from `LlmDecisionAction` which encodes *intent* at a + This remains distinct from `TradeDecisionAction` which encodes *intent* at a position semantic level (open_long/close_short/etc). TradeSide is kept for: - direct mapping to exchange APIs that require BUY/SELL - conveying slippage/fee direction in execution records Removal consideration: if the pipeline fully normalizes around - LlmDecisionAction -> (final target delta), we can derive side on the fly: + TradeDecisionAction -> (final target delta), we can derive side on the fly: OPEN_LONG, CLOSE_SHORT -> BUY OPEN_SHORT, CLOSE_LONG -> SELL For now we keep it explicit to avoid recomputation and ease auditing. @@ -291,13 +291,6 @@ def _infer_market_type(cls, data): return values -# ========================= -# Minimal DTOs for Strategy Agent (LLM-driven composer, no StrategyHint) -# These DTOs define the data contract across modules following the -# simplified pipeline: data -> features -> composer(LLM+rules) -> execution -> history/digest. -# ========================= - - class InstrumentRef(BaseModel): """Identifies a tradable instrument. @@ -482,8 +475,8 @@ class PortfolioView(BaseModel): ) -class LlmDecisionAction(str, Enum): - """Position-oriented high-level actions produced by the LLM plan. +class TradeDecisionAction(str, Enum): + """Position-oriented high-level actions produced by the plan. Semantics: - OPEN_LONG: open/increase long; if currently short, flatten then open long @@ -501,7 +494,7 @@ class LlmDecisionAction(str, Enum): def derive_side_from_action( - action: Optional[LlmDecisionAction], + action: Optional[TradeDecisionAction], ) -> Optional["TradeSide"]: """Derive execution side (BUY/SELL) from a high-level action. @@ -509,16 +502,16 @@ def derive_side_from_action( """ if action is None: return None - if action in (LlmDecisionAction.OPEN_LONG, LlmDecisionAction.CLOSE_SHORT): + if action in (TradeDecisionAction.OPEN_LONG, TradeDecisionAction.CLOSE_SHORT): return TradeSide.BUY - if action in (LlmDecisionAction.OPEN_SHORT, LlmDecisionAction.CLOSE_LONG): + if action in (TradeDecisionAction.OPEN_SHORT, TradeDecisionAction.CLOSE_LONG): return TradeSide.SELL # NOOP or future adjust/cancel actions: no executable side return None -class LlmDecisionItem(BaseModel): - """LLM plan item. Interprets target_qty as operation size (magnitude). +class TradeDecisionItem(BaseModel): + """Trade plan item. Interprets target_qty as operation size (magnitude). Unlike the previous "final target position" semantics, target_qty here is the size to operate (same unit as position quantity). The composer @@ -526,7 +519,7 @@ class LlmDecisionItem(BaseModel): """ instrument: InstrumentRef - action: LlmDecisionAction + action: TradeDecisionAction target_qty: float = Field( ..., description="Operation size for this action (units), e.g., open/close long/short", @@ -543,11 +536,11 @@ class LlmDecisionItem(BaseModel): ) -class LlmPlanProposal(BaseModel): - """Structured LLM output before rule normalization.""" +class TradePlanProposal(BaseModel): + """Structured output before rule normalization.""" ts: int - items: List[LlmDecisionItem] = Field(default_factory=list) + items: List[TradeDecisionItem] = Field(default_factory=list) rationale: Optional[str] = Field( default=None, description="Optional natural language rationale" ) @@ -573,7 +566,7 @@ class TradeInstruction(BaseModel): ..., description="Decision cycle id to correlate instructions and history" ) instrument: InstrumentRef - action: Optional[LlmDecisionAction] = Field( + action: Optional[TradeDecisionAction] = Field( default=None, description="High-level intent action for dispatch ('open_long'|'open_short'|'close_long'|'close_short'|'noop')", ) @@ -607,12 +600,15 @@ def _validate_action_side_alignment(self): if act is None: return self try: - if act == LlmDecisionAction.NOOP: + if act == TradeDecisionAction.NOOP: # Composer should not emit NOOP instructions; tolerate in lenient mode return self - if act in (LlmDecisionAction.OPEN_LONG, LlmDecisionAction.CLOSE_SHORT): + if act in (TradeDecisionAction.OPEN_LONG, TradeDecisionAction.CLOSE_SHORT): expected = TradeSide.BUY - elif act in (LlmDecisionAction.OPEN_SHORT, LlmDecisionAction.CLOSE_LONG): + elif act in ( + TradeDecisionAction.OPEN_SHORT, + TradeDecisionAction.CLOSE_LONG, + ): expected = TradeSide.SELL else: return self @@ -684,7 +680,7 @@ class PortfolioValueSeries(BaseModel): class ComposeContext(BaseModel): - """Context assembled for the LLM-driven composer.""" + """Context assembled for the composer.""" ts: int compose_id: str = Field( diff --git a/python/valuecell/agents/common/trading/utils.py b/python/valuecell/agents/common/trading/utils.py index 07aa02293..82e56b4f2 100644 --- a/python/valuecell/agents/common/trading/utils.py +++ b/python/valuecell/agents/common/trading/utils.py @@ -242,3 +242,78 @@ async def send_discord_message( if raise_for_status: resp.raise_for_status() return resp.text + + +def prune_none(obj): + """Recursively remove None, empty dict, and empty list values.""" + if isinstance(obj, dict): + pruned = {k: prune_none(v) for k, v in obj.items() if v is not None} + return {k: v for k, v in pruned.items() if v not in (None, {}, [])} + if isinstance(obj, list): + pruned = [prune_none(v) for v in obj] + return [v for v in pruned if v not in (None, {}, [])] + return obj + + +def extract_market_section(market_data: List[Dict]) -> Dict: + """Extract decision-critical metrics from market feature entries.""" + + compact: Dict[str, Dict] = {} + for item in market_data: + symbol = (item.get("instrument") or {}).get("symbol") + if not symbol: + continue + + values = item.get("values") or {} + entry: Dict[str, float] = {} + + for feature_key, alias in ( + ("price.last", "last"), + ("price.close", "close"), + ("price.open", "open"), + ("price.high", "high"), + ("price.low", "low"), + ("price.bid", "bid"), + ("price.ask", "ask"), + ("price.change_pct", "change_pct"), + ("price.volume", "volume"), + ): + if feature_key in values and values[feature_key] is not None: + entry[alias] = values[feature_key] + + if values.get("open_interest") is not None: + entry["open_interest"] = values["open_interest"] + + if values.get("funding.rate") is not None: + entry["funding_rate"] = values["funding.rate"] + if values.get("funding.mark_price") is not None: + entry["mark_price"] = values["funding.mark_price"] + + normalized = {k: v for k, v in entry.items() if v is not None} + if normalized: + compact[symbol] = normalized + + return compact + + +def group_features(features: List[FeatureVector]) -> Dict: + """Organize features by grouping metadata and trim payload noise. + + Prefers the FeatureVector.meta group_by_key when present, otherwise + falls back to the interval tag. This allows callers to introduce + ad-hoc groupings (e.g., market snapshots) without overloading the + interval field. + """ + grouped: Dict[str, List] = {} + + for fv in features: + data = fv.model_dump(mode="json") + meta = data.get("meta") or {} + group_key = meta.get(FEATURE_GROUP_BY_KEY) + + if not group_key: + continue + + grouped.setdefault(group_key, []).append(data) + + return grouped From 57d5306c445bb013288804d2b91d7d48fe2a3052 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:53:58 +0800 Subject: [PATCH 13/15] refactor: add skills field to grid agent configuration --- python/configs/agent_cards/grid_agent.json | 1 + 1 file changed, 1 insertion(+) diff --git a/python/configs/agent_cards/grid_agent.json b/python/configs/agent_cards/grid_agent.json index 107a24f6c..df255ab93 100644 --- a/python/configs/agent_cards/grid_agent.json +++ b/python/configs/agent_cards/grid_agent.json @@ -7,6 +7,7 @@ "streaming": true, "push_notifications": true }, + "skills": [], "enabled": true, "metadata": { "planner_passthrough": true, From 23c9c7a3f768096c4fe364e0e027cb86d4b0739f Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:54:21 +0800 Subject: [PATCH 14/15] refactor: implement constructor in BaseComposer class --- python/valuecell/agents/common/trading/decision/interfaces.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/valuecell/agents/common/trading/decision/interfaces.py b/python/valuecell/agents/common/trading/decision/interfaces.py index 0fc787aee..5ba2868bd 100644 --- a/python/valuecell/agents/common/trading/decision/interfaces.py +++ b/python/valuecell/agents/common/trading/decision/interfaces.py @@ -29,6 +29,9 @@ class BaseComposer(ABC): Output: TradeInstruction list """ + def __init__(self, *args, **kwargs) -> None: + pass + @abstractmethod async def compose(self, context: ComposeContext) -> ComposeResult: """Produce normalized trade instructions given the current context. From 08787cb47d229df692fa36dfed34988e98a20d04 Mon Sep 17 00:00:00 2001 From: Zhaofeng Zhang <24791380+vcfgv@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:54:21 +0800 Subject: [PATCH 15/15] refactor: implement constructor in BaseComposer class --- .../agents/common/trading/decision/interfaces.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/python/valuecell/agents/common/trading/decision/interfaces.py b/python/valuecell/agents/common/trading/decision/interfaces.py index 0fc787aee..024e427d2 100644 --- a/python/valuecell/agents/common/trading/decision/interfaces.py +++ b/python/valuecell/agents/common/trading/decision/interfaces.py @@ -10,11 +10,12 @@ ComposeContext, ComposeResult, Constraints, - TradeDecisionAction, - TradePlanProposal, MarketType, + TradeDecisionAction, TradeInstruction, + TradePlanProposal, TradeSide, + UserRequest, ) from ..utils import extract_price_map @@ -29,6 +30,17 @@ class BaseComposer(ABC): Output: TradeInstruction list """ + def __init__( + self, + request: UserRequest, + *, + default_slippage_bps: int = 25, + quantity_precision: float = 1e-9, + ) -> None: + self._request = request + self._default_slippage_bps = default_slippage_bps + self._quantity_precision = quantity_precision + @abstractmethod async def compose(self, context: ComposeContext) -> ComposeResult: """Produce normalized trade instructions given the current context.