diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml
new file mode 100644
index 0000000..90c2c18
--- /dev/null
+++ b/.github/workflows/sync.yml
@@ -0,0 +1,49 @@
+name: Upstream Sync
+
+on:
+ schedule:
+ - cron: "0 */6 * * *"
+ workflow_dispatch:
+
+permissions:
+ contents: write
+ actions: write
+
+jobs:
+ sync_latest_from_upstream:
+ name: Sync latest commits from upstream repo
+ runs-on: ubuntu-latest
+ if: ${{ github.event.repository.fork }}
+
+ steps:
+ # Step 1: run a standard checkout action
+ - name: Checkout target repo
+ uses: actions/checkout@v4
+
+ # Step 2: run the sync action
+ - name: Sync upstream changes
+ id: sync
+ uses: aormsby/Fork-Sync-With-Upstream-action@v3.4.1
+ with:
+ upstream_sync_repo: HuaYaoAI/FinGenius
+ upstream_sync_branch: main
+ target_sync_branch: main
+ target_repo_token: ${{ secrets.GITHUB_TOKEN }}
+ upstream_pull_args: '--allow-unrelated-histories --strategy=ours'
+ git_log_format_option: '--oneline'
+ git_push_args: '--no-verify'
+ git_commit_args: '--no-verify'
+
+ - name: Sync check
+ if: failure()
+ run: |
+ echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork."
+ exit 1
+
+ - name: Delete workflow runs
+ uses: Mattraks/delete-workflow-runs@main
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ repository: ${{ github.repository }}
+ retain_days: 0
+ keep_minimum_runs: 2
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index fd5afd8..846b9f2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -197,3 +197,6 @@ cython_debug/
# node
node_modules
+
+report/
+logs/
diff --git a/README.md b/README.md
index 7d9337b..6a858ab 100644
--- a/README.md
+++ b/README.md
@@ -69,7 +69,7 @@
欢迎对AI金融极致热爱的你
-携手完善FinGenius,共同探索金融智能分析的技术前沿边界!🌟
+携手完善[FinGenius](https://fingenius.cn),共同探索金融智能分析的技术前沿边界!🌟
## 安装指南
diff --git a/config/config.example.toml b/config/config.example.toml
deleted file mode 100644
index e0307f9..0000000
--- a/config/config.example.toml
+++ /dev/null
@@ -1,53 +0,0 @@
-# Global LLM configuration
-# 模型越好,生成的效果越好,建议使用最好的模型。
-[llm]
-api_type = "openai" # 添加API类型,使用OpenAI兼容的API
-model = "claude-3-7-sonnet-20250219" # The LLM model to use, better use tool supported model
-base_url = "https://api.anthropic.com/v1/" # API endpoint URL
-api_key = "YOUR_API_KEY" # Your API key
-max_tokens = 8192 # Maximum number of tokens in the response
-temperature = 0.0 # Controls randomness
-
-# [llm] #AZURE OPENAI:
-# api_type= 'azure'
-# model = "YOUR_MODEL_NAME" #"gpt-4o-mini"
-# base_url = "{YOUR_AZURE_ENDPOINT.rstrip('/')}/openai/deployments/{AZURE_DEPOLYMENT_ID}"
-# api_key = "AZURE API KEY"
-# max_tokens = 8096
-# temperature = 0.0
-# api_version="AZURE API VERSION" #"2024-08-01-preview"
-
-
-# [llm]
-# api_type = "ollama"
-# model = "你的模型名称" # 例如: "llama3.2", "qwen2.5", "deepseek-coder"
-# base_url = "http://10.24.163.221:8080/v1" # 你的Ollama服务地址
-# api_key = "ollama" # 可以是任意值,Ollama会忽略但OpenAI SDK需要
-# max_tokens = 4096
-# temperature = 0.0
-
-# Optional configuration for specific LLM models
-# [llm.vision]
-# model = "claude-3-7-sonnet-20250219" # The vision model to use
-# base_url = "https://api.anthropic.com/v1/" # API endpoint URL for vision model
-# api_key = "YOUR_API_KEY" # Your API key for vision model
-# max_tokens = 8192 # Maximum number of tokens in the response
-# temperature = 0.0 # Controls randomness for vision model
-
-# [llm.vision] #OLLAMA VISION:
-# api_type = 'ollama'
-# model = "llama3.2-vision"
-# base_url = "http://localhost:11434/v1"
-# api_key = "ollama"
-# max_tokens = 4096
-# temperature = 0.0
-
-# Optional configuration, Search settings.
-[search]
-# Search engine for agent to use. Default is "Google", can be set to "Baidu" or "DuckDuckGo" or "Bing"
-# 对于国内用户,建议使用以下搜索引擎优先级:
-# Baidu(百度)- 国内访问最稳定
-# Bing(必应)- 国际化且国内可用
-# Google - 作为备选(需要良好的国际网络)
-# DuckDuckGo - 作为备选(需要良好的国际网络)
-engine = "Bing"
diff --git a/src/agent/sentiment.py b/src/agent/sentiment.py
index 6aed8c4..57e1c6a 100644
--- a/src/agent/sentiment.py
+++ b/src/agent/sentiment.py
@@ -68,43 +68,52 @@ async def analyze(self, stock_code: str, **kwargs) -> Dict:
# 1. 强制执行新闻搜索
try:
- news_result = await self.tool_call("web_search", {
- "query": f"{stock_code} 股票 最新消息 舆情",
- "max_results": 10
- })
- if news_result and news_result.success:
- analysis_tasks.append(("news_search", news_result.data))
+ news_result = await self.available_tools.execute(
+ name="web_search",
+ tool_input={
+ "query": f"{stock_code} 股票 最新消息 舆情",
+ "max_results": 10
+ }
+ )
+ if news_result and not news_result.error:
+ analysis_tasks.append(("news_search", news_result.output))
logger.info(f"新闻搜索成功: {stock_code}")
else:
- logger.warning(f"新闻搜索失败: {stock_code}")
+ logger.warning(f"新闻搜索失败: {stock_code}, {news_result.error if news_result else 'Unknown error'}")
except Exception as e:
logger.error(f"新闻搜索异常: {stock_code}, {str(e)}")
# 2. 强制执行社交媒体分析
try:
- social_result = await self.tool_call("web_search", {
- "query": f"{stock_code} 股吧 讨论 情绪",
- "max_results": 5
- })
- if social_result and social_result.success:
- analysis_tasks.append(("social_media", social_result.data))
+ social_result = await self.available_tools.execute(
+ name="web_search",
+ tool_input={
+ "query": f"{stock_code} 股吧 讨论 情绪",
+ "max_results": 5
+ }
+ )
+ if social_result and not social_result.error:
+ analysis_tasks.append(("social_media", social_result.output))
logger.info(f"社交媒体分析成功: {stock_code}")
else:
- logger.warning(f"社交媒体分析失败: {stock_code}")
+ logger.warning(f"社交媒体分析失败: {stock_code}, {social_result.error if social_result else 'Unknown error'}")
except Exception as e:
logger.error(f"社交媒体分析异常: {stock_code}, {str(e)}")
# 3. 强制执行舆情分析工具
try:
- sentiment_result = await self.tool_call("sentiment_analysis", {
- "stock_code": stock_code,
- "analysis_type": "comprehensive"
- })
- if sentiment_result and sentiment_result.success:
- analysis_tasks.append(("sentiment_analysis", sentiment_result.data))
+ sentiment_result = await self.available_tools.execute(
+ name="sentiment_analysis",
+ tool_input={
+ "stock_code": stock_code,
+ "analysis_type": "comprehensive"
+ }
+ )
+ if sentiment_result and not sentiment_result.error:
+ analysis_tasks.append(("sentiment_analysis", sentiment_result.output))
logger.info(f"舆情分析工具成功: {stock_code}")
else:
- logger.warning(f"舆情分析工具失败: {stock_code}")
+ logger.warning(f"舆情分析工具失败: {stock_code}, {sentiment_result.error if sentiment_result else 'Unknown error'}")
except Exception as e:
logger.error(f"舆情分析工具异常: {stock_code}, {str(e)}")
diff --git a/src/mcp/big_deal_analysis_server.py b/src/mcp/big_deal_analysis_server.py
deleted file mode 100644
index 6c2948e..0000000
--- a/src/mcp/big_deal_analysis_server.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from src.mcp.server import MCPServer
-from src.tool import Terminate
-from src.tool.big_deal_analysis import BigDealAnalysisTool
-
-
-class BigDealAnalysisServer(MCPServer):
- def __init__(self, name: str = "BigDealAnalysisServer"):
- super().__init__(name)
-
- def _initialize_standard_tools(self) -> None:
- self.tools.update(
- {
- "big_deal_analysis_tool": BigDealAnalysisTool(),
- "terminate": Terminate(),
- }
- )
diff --git a/src/mcp/chip_analysis_server.py b/src/mcp/chip_analysis_server.py
deleted file mode 100644
index 5607911..0000000
--- a/src/mcp/chip_analysis_server.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from src.mcp.server import MCPServer
-from src.tool import Terminate
-from src.tool.chip_analysis import ChipAnalysisTool
-
-
-class ChipAnalysisServer(MCPServer):
- def __init__(self, name: str = "ChipAnalysisServer"):
- super().__init__(name)
-
- def _initialize_standard_tools(self) -> None:
- self.tools.update(
- {
- "chip_analysis_tool": ChipAnalysisTool(),
- "terminate": Terminate(),
- }
- )
diff --git a/src/mcp/risk_control_server.py b/src/mcp/risk_control_server.py
index 5d0f63c..bf01db5 100644
--- a/src/mcp/risk_control_server.py
+++ b/src/mcp/risk_control_server.py
@@ -1,16 +1,18 @@
from src.mcp.server import MCPServer
from src.tool import Terminate
-from src.tool.risk_control import RiskControlTool
+from src.tool.sentiment import SentimentTool
+from src.tool.web_search import WebSearch
-class RiskControlServer(MCPServer):
- def __init__(self, name: str = "RiskControlServer"):
+class SentimentServer(MCPServer):
+ def __init__(self, name: str = "SentimentServer"):
super().__init__(name)
def _initialize_standard_tools(self) -> None:
self.tools.update(
{
- "risk_control_tool": RiskControlTool(),
+ "sentiment_tool": SentimentTool(),
+ "web_search": WebSearch(),
"terminate": Terminate(),
}
)
diff --git a/src/prompt/create_html.py b/src/prompt/create_html.py
index ad9c362..040721e 100644
--- a/src/prompt/create_html.py
+++ b/src/prompt/create_html.py
@@ -488,33 +488,119 @@