-
Notifications
You must be signed in to change notification settings - Fork 74
Feat: Add uv python enviorment, dspy chatbot, ollama, an easy frontend by flask #355
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
9fd0ddf
019a943
d659d5c
8680dc5
41ca6ea
b356e8d
ca138d2
95ced16
f570e25
b260eca
986dd06
82720b9
9d52c76
b1c205a
5b8eea5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,16 @@ | ||
| # Default ignored files | ||
| /shelf/ | ||
| /workspace.xml | ||
| # Ignored default folder with query files | ||
| /queries/ | ||
| # Datasource local storage ignored files | ||
| /dataSources/ | ||
| /dataSources.local.xml | ||
| # Editor-based HTTP Client requests | ||
| /httpRequests/ | ||
|
|
||
| *.env | ||
|
|
||
| # Ignore Python cache files | ||
| __pycache__/ | ||
| *.py[cod] |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| 3.13 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,3 @@ | ||
| { | ||
| "python-envs.pythonProjects": [] | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,13 @@ | ||
| [project] | ||
| name = "atlas-world" | ||
| version = "0.1.0" | ||
| description = "Add your description here" | ||
| readme = "README.md" | ||
| requires-python = ">=3.13" | ||
| dependencies = [ | ||
| "dspy>=3.1.0", | ||
| "ollama>=0.6.1", | ||
| "flask>=3.0.0", | ||
| "dotenv>=0.9.9", | ||
| "flask-cors>=6.0.2", | ||
| ] |
| Original file line number | Diff line number | Diff line change | ||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,98 @@ | ||||||||||||||
| from flask import Flask, request, jsonify, render_template | ||||||||||||||
| from flask_cors import CORS | ||||||||||||||
| import os | ||||||||||||||
| import dspy | ||||||||||||||
| from script.llm_config import configure_lm | ||||||||||||||
| from script.llm_module.investigator.module import InvestigatorModule | ||||||||||||||
| from script.llm_module.opposition.module import OppositionModule | ||||||||||||||
|
|
||||||||||||||
| # Ensure Flask can find the templates folder | ||||||||||||||
| base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) | ||||||||||||||
| template_dir = os.path.join(base_dir, 'templates') | ||||||||||||||
| app = Flask(__name__, template_folder=template_dir) | ||||||||||||||
| CORS(app) | ||||||||||||||
|
|
||||||||||||||
| # Initialize DSPy with user-chosen LM | ||||||||||||||
| print("正在啟動 LLM 配置引導...") | ||||||||||||||
| configure_lm() | ||||||||||||||
| investigator = InvestigatorModule() | ||||||||||||||
| opposer = OppositionModule() | ||||||||||||||
|
||||||||||||||
|
|
||||||||||||||
| # 使用官方 dspy.History 管理 | ||||||||||||||
| chat_history = dspy.History(messages=[]) | ||||||||||||||
|
|
||||||||||||||
| port = int(os.getenv("PORT") or 5000) | ||||||||||||||
|
|
||||||||||||||
| @app.route('/') | ||||||||||||||
| def home(): | ||||||||||||||
| return render_template('index.html') | ||||||||||||||
|
|
||||||||||||||
| @app.route('/investigate', methods=['POST']) | ||||||||||||||
| def investigate_endpoint(): | ||||||||||||||
| try: | ||||||||||||||
| data = request.json | ||||||||||||||
| user_content = data.get('userContent') | ||||||||||||||
| system_prompt = data.get('systemPrompt') or "" | ||||||||||||||
|
|
||||||||||||||
| readme_path = os.path.join(base_dir, 'README.md') | ||||||||||||||
| if os.path.exists(readme_path): | ||||||||||||||
| with open(readme_path, 'r', encoding='utf-8') as f: | ||||||||||||||
| readme_content = f.read() | ||||||||||||||
| system_prompt += "\n--- 背景資訊 ---\n" + readme_content | ||||||||||||||
|
Comment on lines
+37
to
+41
|
||||||||||||||
|
|
||||||||||||||
| if not user_content: | ||||||||||||||
| user_content = "請進行調查" | ||||||||||||||
|
|
||||||||||||||
| # 使用調查者模組 | ||||||||||||||
| result = investigator(system_prompt=system_prompt, user_query=user_content, history=chat_history) | ||||||||||||||
|
|
||||||||||||||
| # 更新歷史軌跡 | ||||||||||||||
| chat_history.messages.append({"user_query": user_content, "response": result.response}) | ||||||||||||||
|
|
||||||||||||||
| return jsonify({'response': result.response}) | ||||||||||||||
|
|
||||||||||||||
| except Exception as e: | ||||||||||||||
| return jsonify({'error': str(e)}), 500 | ||||||||||||||
|
|
||||||||||||||
| @app.route('/oppose', methods=['POST']) | ||||||||||||||
| def oppose_endpoint(): | ||||||||||||||
| try: | ||||||||||||||
| data = request.json | ||||||||||||||
| system_prompt = data.get('systemPrompt') or "" | ||||||||||||||
|
|
||||||||||||||
| readme_path = os.path.join(base_dir, 'README.md') | ||||||||||||||
| if os.path.exists(readme_path): | ||||||||||||||
| with open(readme_path, 'r', encoding='utf-8') as f: | ||||||||||||||
| readme_content = f.read() | ||||||||||||||
| system_prompt += "\n--- 背景資訊 ---\n" + readme_content | ||||||||||||||
|
Comment on lines
+63
to
+67
|
||||||||||||||
|
|
||||||||||||||
| # 反對者模組主要基於當前對話歷史提出挑戰 | ||||||||||||||
| result = opposer(system_prompt=system_prompt, history=chat_history) | ||||||||||||||
|
|
||||||||||||||
| # 反對者的回應通常也要更新進歷史,以便調查者下一次能回應反對點 | ||||||||||||||
| chat_history.messages.append({"user_query": "[監察反對請求]", "response": result}) | ||||||||||||||
|
|
||||||||||||||
| return jsonify({'response': result}) | ||||||||||||||
|
Comment on lines
+73
to
+75
|
||||||||||||||
| chat_history.messages.append({"user_query": "[監察反對請求]", "response": result}) | |
| return jsonify({'response': result}) | |
| chat_history.messages.append({"user_query": "[監察反對請求]", "response": result.response}) | |
| return jsonify({'response': result.response}) |
Copilot
AI
Jan 7, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Running Flask with both debug=True and use_reloader=True in production can expose sensitive information through debug tracebacks and cause the configure_lm() function to be called twice. While the comment mentions this issue, it's contradictory to set use_reloader=True. Consider setting use_reloader=False or using environment-based configuration to disable debug mode in production.
| app.run(debug=True, use_reloader=True, host='0.0.0.0', port=port) | |
| app.run(debug=True, use_reloader=False, host='0.0.0.0', port=port) |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,91 @@ | ||||||
| import dspy | ||||||
| import os | ||||||
|
|
||||||
| CONFIGED_LM = None | ||||||
|
|
||||||
| def configure_lm(): | ||||||
| from dotenv import load_dotenv | ||||||
| load_dotenv() | ||||||
| global CONFIGED_LM | ||||||
| if CONFIGED_LM is not None: | ||||||
| return CONFIGED_LM | ||||||
| print("\n--- 🌐 選擇要用的 LLM 模型來源 ---") | ||||||
| print("1) 本地模型 (Ollama/HF)") | ||||||
| print("2) OpenAI API") | ||||||
| print("3) Google Gemini API") | ||||||
| print("4) Anthropic Claude API") | ||||||
|
|
||||||
| choice = input("請輸入數字 (1/2/3/4): ").strip() | ||||||
|
|
||||||
| model_name = None | ||||||
| api_key = None | ||||||
|
|
||||||
|
|
||||||
| if choice == "1": | ||||||
| # 預設幾個常見本地模型參考 | ||||||
| env_model = os.getenv("OLLAMA_MODEL") | ||||||
| if env_model: | ||||||
| print(f"\n偵測到預設模型: {env_model}") | ||||||
| model_name = input(f"輸入本地模型名稱 (直接按 Enter 使用 {env_model}): ").strip() | ||||||
| if not model_name: | ||||||
| model_name = env_model | ||||||
| else: | ||||||
| print("\n可用本地模型例子: llama3, gemma3:1b, mistral, phi3") | ||||||
| model_name = input("輸入本地模型名稱: ").strip() | ||||||
|
|
||||||
| # 確保本地模型有名稱前綴 (LiteLM 要求) | ||||||
| if "/" not in model_name: | ||||||
| model_name = f"ollama/{model_name}" | ||||||
| print(f"自動修正為 LiteLM 格式: {model_name}") | ||||||
|
|
||||||
| lm = dspy.LM(model_name) | ||||||
|
|
||||||
| elif choice == "2": | ||||||
| print("\nOpenAI 模型選擇:") | ||||||
| print("1) openai/gpt-5.2\n2) openai/gpt-4o\n3) openai/gpt-4o-mini\n4) openai/o4-mini\n5) openai/o3-mini") | ||||||
| idx = input("選擇模型 (1-5): ").strip() | ||||||
| mapping = { | ||||||
| "1": "openai/gpt-5.2", | ||||||
| "2": "openai/gpt-4o", | ||||||
| } | ||||||
| model_name = mapping.get(idx, "openai/gpt-5.2") | ||||||
|
Comment on lines
+47
to
+51
|
||||||
| api_key = input("輸入 OpenAI API Key (或留空用 OPENAI_API_KEY 環境變數): ").strip() or os.getenv("OPENAI_API_KEY") | ||||||
| lm = dspy.LM(model_name, api_key=api_key) | ||||||
|
|
||||||
| elif choice == "3": | ||||||
| print("\nGoogle Gemini 模型選擇:\n1) gemini-2.5-flash\n2) gemini-2.5-pro\n3) gemini-3-flash-preview\n4) gemini-3-pro-preview") | ||||||
| idx = input("選擇模型 (1-4): ").strip() | ||||||
| mapping = { | ||||||
| "1": "gemini/gemini-2.5-flash", | ||||||
| "2": "gemini/gemini-2.5-pro", | ||||||
| "3": "gemini/gemini-3-flash-preview", | ||||||
| "4": "gemini/gemini-3-pro-preview" | ||||||
| } | ||||||
| model_name = mapping.get(idx, "gemini-2.5-pro") | ||||||
| api_key = input("輸入 Gemini API Key (或留空用 GEMINI_API_KEY 環境變數): ").strip() or os.getenv("GEMINI_API_KEY") | ||||||
| lm = dspy.LM(model_name, api_key=api_key) | ||||||
|
|
||||||
| elif choice == "4": | ||||||
| print("\nAnthropic Claude 模型選擇:") | ||||||
| print("1) claude-opus-4.5-20251101\n2) claude-sonnet-4.5\n3) claude-haiku-4.5") | ||||||
| idx = input("選擇模型 (1-3): ").strip() | ||||||
| mapping = { | ||||||
| "1": "claude/claude-opus-4.5-20251101", | ||||||
| "2": "claude/claude-sonnet-4.5", | ||||||
| "3": "claude/claude-haiku-4.5" | ||||||
| } | ||||||
| model_name = mapping.get(idx, "claude-opus-4.5-20251101") | ||||||
| api_key = input("輸入 Claude API Key (或留空用 ANTHROPIC_API_KEY): ").strip() or os.getenv("ANTHROPIC_API_KEY") | ||||||
| lm = dspy.LM(model_name, api_key=api_key) | ||||||
|
|
||||||
| else: | ||||||
| print(" 選擇無效,預設用 openai/gpt-5.2") | ||||||
| model_name = "openai/gpt-5.2" | ||||||
| api_key = os.getenv("OPENAI_API_KEY") | ||||||
| lm = dspy.LM(model_name, api_key=api_key, cache=False) | ||||||
|
||||||
| lm = dspy.LM(model_name, api_key=api_key, cache=False) | |
| lm = dspy.LM(model_name, api_key=api_key) |
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,17 @@ | ||||||||||||||||||||||||
| import dspy | ||||||||||||||||||||||||
| from .signature import InvestigatorSignature | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| class InvestigatorModule(dspy.Module): | ||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||
| 實作調查者邏輯的 DSPy 模組。 | ||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||
| def __init__(self): | ||||||||||||||||||||||||
| super().__init__() | ||||||||||||||||||||||||
| self.investigate = dspy.ChainOfThought(InvestigatorSignature) | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| def forward(self, system_prompt, user_query, history): | ||||||||||||||||||||||||
| return self.investigate( | ||||||||||||||||||||||||
| system_prompt=system_prompt, | ||||||||||||||||||||||||
| user_query=user_query, | ||||||||||||||||||||||||
| history=history | ||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||
|
Comment on lines
+13
to
+17
|
||||||||||||||||||||||||
| return self.investigate( | |
| system_prompt=system_prompt, | |
| user_query=user_query, | |
| history=history | |
| ) | |
| result = self.investigate( | |
| system_prompt=system_prompt, | |
| user_query=user_query, | |
| history=history | |
| ) | |
| return result.response |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,12 @@ | ||
| import dspy | ||
|
|
||
| class InvestigatorSignature(dspy.Signature): | ||
| """ | ||
| 妳是 Atlas-World 的「調查者」(Investigator)。 | ||
| 妳的職責是深入分析使用者的問題,結合背景資訊與對話歷史, | ||
| 進行邏輯推演、事實查核或情境解析,給出詳盡且具洞察力的調查報告或回應。 | ||
| """ | ||
| system_prompt = dspy.InputField(desc="目前的文件、與使用者給予的系統提示") | ||
| user_query = dspy.InputField(desc="使用者的當前問題或調查標的") | ||
| history: dspy.History = dspy.InputField(desc="目前的調查對話歷史記錄") | ||
| response = dspy.OutputField(desc="生成的調查分析、洞察或回應") |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,15 @@ | ||
| import dspy | ||
| from .signature import OppositionSignature | ||
|
|
||
| class OppositionModule(dspy.Module): | ||
| """ | ||
| 提供反向思考與監察邏輯的 DSPy 模組。 | ||
| """ | ||
| def __init__(self): | ||
| super().__init__() | ||
| self.predictor = dspy.ChainOfThought(OppositionSignature) | ||
|
|
||
| def forward(self, system_prompt, history): | ||
| # history 應為 dspy.History 物件 | ||
| result = self.predictor(system_prompt=system_prompt, history=history) | ||
| return result.response |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The CORS configuration uses CORS(app) without any restrictions, which allows requests from any origin. This could be a security risk in production environments. Consider restricting CORS to specific origins or at minimum documenting that this is intentional for development purposes only.