Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions python/third_party/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Deploy Third Party Agents

## ai-hedge-fund

```bash
# ... install
cd ./ai-hedge-fund
echo "uv: $(which uv)"
echo "python: $(which python)"

uv run -m adapter --env-file ${path_to_dotenv}
# or simply
bash launch_adapter.sh
```
73 changes: 67 additions & 6 deletions python/third_party/ai-hedge-fund/adapter/__main__.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,32 @@
import asyncio
import json
import logging
from datetime import datetime
from typing import List

from agno.agent import Agent
from agno.models.openrouter import OpenRouter
from dateutil.relativedelta import relativedelta
from langchain_core.messages import HumanMessage
from pydantic import BaseModel, Field, field_validator
from valuecell.core.agent.decorator import create_wrapped_agent
from valuecell.core.agent.types import BaseAgent

from src.main import run_hedge_fund
from src.main import create_workflow
from src.utils.analysts import ANALYST_ORDER
from src.utils.progress import progress

allowed_analysts = set(
key for display_name, key in sorted(ANALYST_ORDER, key=lambda x: x[1])
)
allowed_tickers = {"AAPL", "GOOGL", "MSFT", "NVDA", "TSLA"}

logger = logging.getLogger(__name__)


class HedgeFundRequest(BaseModel):
tickers: List[str] = Field(
...,
description=f"List of stock tickers to analyze. Must be from: {allowed_tickers}",
description=f"List of stock tickers to analyze. Must be from: {allowed_tickers}. Otherwise, empty.",
)
selected_analysts: List[str] = Field(
default=[],
Expand All @@ -32,6 +36,8 @@ class HedgeFundRequest(BaseModel):
@field_validator("tickers")
@classmethod
def validate_tickers(cls, v):
if not v:
raise ValueError("No valid tickers are recognized.")
invalid_tickers = set(v) - allowed_tickers
if invalid_tickers:
raise ValueError(
Expand Down Expand Up @@ -61,10 +67,13 @@ def __init__(self):
)

async def stream(self, query, session_id, task_id):
logger.info(f"Parsing query: {query}. Task ID: {task_id}, Session ID: {session_id}")
run_response = self.agno_agent.run(
f"Parse the following hedge fund analysis request and extract the parameters: {query}"
)
hedge_fund_request = run_response.content
if not isinstance(hedge_fund_request, HedgeFundRequest):
raise ValueError(f"Unable to parse query: {query}")

end_date = datetime.now().strftime("%Y-%m-%d")
end_date_obj = datetime.strptime(end_date, "%Y-%m-%d")
Expand Down Expand Up @@ -94,22 +103,74 @@ async def stream(self, query, session_id, task_id):
},
}

result = run_hedge_fund(
logger.info(
f"Start analyzing. Task ID: {task_id}, Session ID: {session_id}"
)
for stream_type, chunk in run_hedge_fund_stream(
tickers=hedge_fund_request.tickers,
start_date=start_date,
end_date=end_date,
portfolio=portfolio,
model_name="openai/gpt-4o-mini",
model_provider="OpenRouter",
selected_analysts=hedge_fund_request.selected_analysts,
)
):
if not isinstance(chunk, str):
continue
yield {
"content": chunk,
"is_task_complete": False,
}

yield {
"content": json.dumps(result),
"content": "",
"is_task_complete": True,
}


def run_hedge_fund_stream(
tickers: list[str],
start_date: str,
end_date: str,
portfolio: dict,
selected_analysts: list[str],
show_reasoning: bool = False,
model_name: str = "gpt-4.1",
model_provider: str = "OpenAI",
):
# Start progress tracking
progress.start()

try:
# Create a new workflow if analysts are customized
workflow = create_workflow(selected_analysts)
_agent = workflow.compile()

inputs = {
"messages": [
HumanMessage(
content="Make trading decisions based on the provided data.",
)
],
"data": {
"tickers": tickers,
"portfolio": portfolio,
"start_date": start_date,
"end_date": end_date,
"analyst_signals": {},
},
"metadata": {
"show_reasoning": show_reasoning,
"model_name": model_name,
"model_provider": model_provider,
},
}
yield from _agent.stream(inputs, stream_mode=["custom", "messages"])
finally:
# Stop progress tracking
progress.stop()


if __name__ == "__main__":
agent = create_wrapped_agent(AIHedgeFundAgent)
asyncio.run(agent.serve())
2 changes: 1 addition & 1 deletion python/third_party/ai-hedge-fund/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ dependencies = [
"langchain-openai>=0.3.5,<0.4",
"langchain-deepseek>=0.1.2,<0.2",
"langchain-ollama==0.3.6",
"langgraph==0.2.56",
"langgraph==0.3.34",
"pandas>=2.1.0,<3",
"numpy>=1.24.0,<2",
"python-dotenv==1.0.0",
Expand Down
14 changes: 14 additions & 0 deletions python/third_party/ai-hedge-fund/src/agents/aswath_damodaran.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from src.graph.state import AgentState, show_agent_reasoning
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.messages import HumanMessage
from langgraph.config import get_stream_writer

from src.tools.api import (
get_financial_metrics,
Expand Down Expand Up @@ -40,13 +41,16 @@ def aswath_damodaran_agent(state: AgentState, agent_id: str = "aswath_damodaran_

analysis_data: dict[str, dict] = {}
damodaran_signals: dict[str, dict] = {}
writer = get_stream_writer()

for ticker in tickers:
# ─── Fetch core data ────────────────────────────────────────────────────
progress.update_status(agent_id, ticker, "Fetching financial metrics")
writer(f"Fetching financial metrics for {ticker}...\n")
metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=5, api_key=api_key)

progress.update_status(agent_id, ticker, "Fetching financial line items")
writer(f"Fetching financial line items for {ticker}...\n")
line_items = search_line_items(
ticker,
[
Expand All @@ -64,19 +68,24 @@ def aswath_damodaran_agent(state: AgentState, agent_id: str = "aswath_damodaran_
)

progress.update_status(agent_id, ticker, "Getting market cap")
writer(f"Getting market cap for {ticker}...\n")
market_cap = get_market_cap(ticker, end_date, api_key=api_key)

# ─── Analyses ───────────────────────────────────────────────────────────
progress.update_status(agent_id, ticker, "Analyzing growth and reinvestment")
writer(f"Analyzing growth and reinvestment for {ticker}...\n")
growth_analysis = analyze_growth_and_reinvestment(metrics, line_items)

progress.update_status(agent_id, ticker, "Analyzing risk profile")
writer(f"Analyzing risk profile for {ticker}...\n")
risk_analysis = analyze_risk_profile(metrics, line_items)

progress.update_status(agent_id, ticker, "Calculating intrinsic value (DCF)")
writer(f"Calculating intrinsic value (DCF) for {ticker}...\n")
intrinsic_val_analysis = calculate_intrinsic_value_dcf(metrics, line_items, risk_analysis)

progress.update_status(agent_id, ticker, "Assessing relative valuation")
writer(f"Assessing relative valuation for {ticker}...\n")
relative_val_analysis = analyze_relative_valuation(metrics)

# ─── Score & margin of safety ──────────────────────────────────────────
Expand Down Expand Up @@ -114,6 +123,7 @@ def aswath_damodaran_agent(state: AgentState, agent_id: str = "aswath_damodaran_

# ─── LLM: craft Damodaran-style narrative ──────────────────────────────
progress.update_status(agent_id, ticker, "Generating Damodaran analysis")
writer(f"Generating Damodaran analysis for {ticker}...\n")
damodaran_output = generate_damodaran_output(
ticker=ticker,
analysis_data=analysis_data,
Expand All @@ -124,6 +134,10 @@ def aswath_damodaran_agent(state: AgentState, agent_id: str = "aswath_damodaran_
damodaran_signals[ticker] = damodaran_output.model_dump()

progress.update_status(agent_id, ticker, "Done", analysis=damodaran_output.reasoning)
writer(
f"Analysis output: {damodaran_output.signal} with confidence {damodaran_output.confidence:.1%}\n"
f"{damodaran_output.reasoning}\n\n"
)

# ─── Push message back to graph state ──────────────────────────────────────
message = HumanMessage(content=json.dumps(damodaran_signals), name=agent_id)
Expand Down
13 changes: 13 additions & 0 deletions python/third_party/ai-hedge-fund/src/agents/ben_graham.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.messages import HumanMessage
from langgraph.config import get_stream_writer
from pydantic import BaseModel
import json
from typing_extensions import Literal
Expand Down Expand Up @@ -32,25 +33,32 @@ def ben_graham_agent(state: AgentState, agent_id: str = "ben_graham_agent"):

analysis_data = {}
graham_analysis = {}
writer = get_stream_writer()

for ticker in tickers:
progress.update_status(agent_id, ticker, "Fetching financial metrics")
writer(f"Fetching financial metrics for {ticker}...\n")
metrics = get_financial_metrics(ticker, end_date, period="annual", limit=10, api_key=api_key)

progress.update_status(agent_id, ticker, "Gathering financial line items")
writer(f"Gathering financial line items for {ticker}...\n")
financial_line_items = search_line_items(ticker, ["earnings_per_share", "revenue", "net_income", "book_value_per_share", "total_assets", "total_liabilities", "current_assets", "current_liabilities", "dividends_and_other_cash_distributions", "outstanding_shares"], end_date, period="annual", limit=10, api_key=api_key)

progress.update_status(agent_id, ticker, "Getting market cap")
writer(f"Getting market cap for {ticker}...\n")
market_cap = get_market_cap(ticker, end_date, api_key=api_key)

# Perform sub-analyses
progress.update_status(agent_id, ticker, "Analyzing earnings stability")
writer(f"Analyzing earnings stability for {ticker}...\n")
earnings_analysis = analyze_earnings_stability(metrics, financial_line_items)

progress.update_status(agent_id, ticker, "Analyzing financial strength")
writer(f"Analyzing financial strength for {ticker}...\n")
strength_analysis = analyze_financial_strength(financial_line_items)

progress.update_status(agent_id, ticker, "Analyzing Graham valuation")
writer(f"Analyzing Graham valuation for {ticker}...\n")
valuation_analysis = analyze_valuation_graham(financial_line_items, market_cap)

# Aggregate scoring
Expand All @@ -68,6 +76,7 @@ def ben_graham_agent(state: AgentState, agent_id: str = "ben_graham_agent"):
analysis_data[ticker] = {"signal": signal, "score": total_score, "max_score": max_possible_score, "earnings_analysis": earnings_analysis, "strength_analysis": strength_analysis, "valuation_analysis": valuation_analysis}

progress.update_status(agent_id, ticker, "Generating Ben Graham analysis")
writer(f"Generating Ben Graham analysis for {ticker}...\n")
graham_output = generate_graham_output(
ticker=ticker,
analysis_data=analysis_data,
Expand All @@ -78,6 +87,10 @@ def ben_graham_agent(state: AgentState, agent_id: str = "ben_graham_agent"):
graham_analysis[ticker] = {"signal": graham_output.signal, "confidence": graham_output.confidence, "reasoning": graham_output.reasoning}

progress.update_status(agent_id, ticker, "Done", analysis=graham_output.reasoning)
writer(
f"Analysis output: {graham_output.signal} with confidence {graham_output.confidence:.1%}\n"
f"{graham_output.reasoning}\n\n"
)

# Wrap results in a single message for the chain
message = HumanMessage(content=json.dumps(graham_analysis), name=agent_id)
Expand Down
9 changes: 9 additions & 0 deletions python/third_party/ai-hedge-fund/src/agents/bill_ackman.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.messages import HumanMessage
from langgraph.config import get_stream_writer
from pydantic import BaseModel
import json
from typing_extensions import Literal
Expand All @@ -28,12 +29,15 @@ def bill_ackman_agent(state: AgentState, agent_id: str = "bill_ackman_agent"):
api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY")
analysis_data = {}
ackman_analysis = {}
writer = get_stream_writer()

for ticker in tickers:
progress.update_status(agent_id, ticker, "Fetching financial metrics")
writer(f"Fetching financial metrics for {ticker}...\n")
metrics = get_financial_metrics(ticker, end_date, period="annual", limit=5, api_key=api_key)

progress.update_status(agent_id, ticker, "Gathering financial line items")
writer(f"Gathering financial line items for {ticker}...\n")
# Request multiple periods of data (annual or TTM) for a more robust long-term view.
financial_line_items = search_line_items(
ticker,
Expand All @@ -56,18 +60,23 @@ def bill_ackman_agent(state: AgentState, agent_id: str = "bill_ackman_agent"):
)

progress.update_status(agent_id, ticker, "Getting market cap")
writer(f"Getting market cap for {ticker}...\n")
market_cap = get_market_cap(ticker, end_date, api_key=api_key)

progress.update_status(agent_id, ticker, "Analyzing business quality")
writer(f"Analyzing business quality for {ticker}...\n")
quality_analysis = analyze_business_quality(metrics, financial_line_items)

progress.update_status(agent_id, ticker, "Analyzing balance sheet and capital structure")
writer(f"Analyzing balance sheet and capital structure for {ticker}...\n")
balance_sheet_analysis = analyze_financial_discipline(metrics, financial_line_items)

progress.update_status(agent_id, ticker, "Analyzing activism potential")
writer(f"Analyzing activism potential for {ticker}...\n")
activism_analysis = analyze_activism_potential(financial_line_items)

progress.update_status(agent_id, ticker, "Calculating intrinsic value & margin of safety")
writer(f"Calculating intrinsic value & margin of safety for {ticker}...\n")
valuation_analysis = analyze_valuation(financial_line_items, market_cap)

# Combine partial scores or signals
Expand Down
Loading