-
Notifications
You must be signed in to change notification settings - Fork 74
Feat: Add uv python enviorment, dspy chatbot, ollama, an easy frontend by flask #359
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
9fd0ddf
019a943
d659d5c
8680dc5
cbd88dc
23dba99
41ca6ea
b356e8d
ca138d2
95ced16
f570e25
b260eca
986dd06
82720b9
9d52c76
b1c205a
5b8eea5
c3c9cc8
abd2eb1
5428c50
dcb305d
e8f0005
9af2ca1
361e8ef
51da118
d2bc4d1
c9df197
4757e5a
9c9e951
6c7e884
c513c18
54d2e0f
a7ad552
7116830
44c813c
3843544
2a6188a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,16 @@ | ||
| # Default ignored files | ||
| /shelf/ | ||
| /workspace.xml | ||
| # Ignored default folder with query files | ||
| /queries/ | ||
| # Datasource local storage ignored files | ||
| /dataSources/ | ||
| /dataSources.local.xml | ||
| # Editor-based HTTP Client requests | ||
| /httpRequests/ | ||
|
|
||
| *.env | ||
|
|
||
| # Ignore Python cache files | ||
| __pycache__/ | ||
| *.py[cod] |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| 3.13 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,3 @@ | ||
| { | ||
| "python-envs.pythonProjects": [] | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,13 @@ | ||
| [project] | ||
| name = "atlas-world" | ||
| version = "0.1.0" | ||
| description = "Add your description here" | ||
| readme = "README.md" | ||
| requires-python = ">=3.13" | ||
| dependencies = [ | ||
| "dspy>=3.1.0", | ||
| "ollama>=0.6.1", | ||
| "flask>=3.0.0", | ||
| "dotenv>=0.9.9", | ||
| "flask-cors>=6.0.2", | ||
| ] | ||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,98 @@ | ||||||||||||||||||||||||
| from flask import Flask, request, jsonify, render_template | ||||||||||||||||||||||||
| from flask_cors import CORS | ||||||||||||||||||||||||
| import os | ||||||||||||||||||||||||
| import dspy | ||||||||||||||||||||||||
| from script.llm_config import configure_lm | ||||||||||||||||||||||||
| from script.llm_module.investigator.module import InvestigatorModule | ||||||||||||||||||||||||
| from script.llm_module.opposition.module import OppositionModule | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| # Ensure Flask can find the templates folder | ||||||||||||||||||||||||
| base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) | ||||||||||||||||||||||||
| template_dir = os.path.join(base_dir, 'templates') | ||||||||||||||||||||||||
| app = Flask(__name__, template_folder=template_dir) | ||||||||||||||||||||||||
| CORS(app) | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| # Initialize DSPy with user-chosen LM | ||||||||||||||||||||||||
| print("正在啟動 LLM 配置引導...") | ||||||||||||||||||||||||
| configure_lm() | ||||||||||||||||||||||||
| investigator = InvestigatorModule() | ||||||||||||||||||||||||
| opposer = OppositionModule() | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| # 使用官方 dspy.History 管理 | ||||||||||||||||||||||||
| chat_history = dspy.History(messages=[]) | ||||||||||||||||||||||||
|
||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| port = int(os.getenv("PORT") or 5000) | ||||||||||||||||||||||||
|
||||||||||||||||||||||||
| port = int(os.getenv("PORT") or 5000) | |
| port_env = os.getenv("PORT") | |
| try: | |
| port = int(port_env) | |
| if not (1 <= port <= 65535): | |
| raise ValueError("Port out of valid range") | |
| except (TypeError, ValueError): | |
| port = 5000 |
Copilot
AI
Jan 7, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Security concern: The README.md file is automatically included in every request without any size checks or sanitization. If the README becomes very large, this could cause performance issues or memory problems. Consider adding size limits or lazy loading mechanisms.
Copilot
AI
Jan 7, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Missing error handling: If the README.md file cannot be read due to encoding issues or file system errors, the exception will not be caught properly. The try-catch block at the endpoint level may not provide sufficient context about which file operation failed.
| with open(readme_path, 'r', encoding='utf-8') as f: | |
| readme_content = f.read() | |
| system_prompt += "\n--- 背景資訊 ---\n" + readme_content | |
| try: | |
| with open(readme_path, 'r', encoding='utf-8') as f: | |
| readme_content = f.read() | |
| except (OSError, UnicodeError) as readme_error: | |
| # Provide clearer context about failures related to README.md | |
| raise RuntimeError(f"Failed to read README.md at {readme_path}: {readme_error}") from readme_error | |
| else: | |
| system_prompt += "\n--- 背景資訊 ---\n" + readme_content |
Copilot
AI
Jan 7, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Missing input validation: The endpoints do not validate that user_content is actually provided before use. While there's a fallback for empty strings, there's no validation for missing keys or None values from the JSON payload, which could cause errors if the client sends malformed data.
Copilot
AI
Jan 7, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There is an inconsistency in how the investigator and opposer modules are called. The investigator returns result.response (line 50) which is then stored in history, while the opposer returns result directly (line 70). This suggests the opposer's forward method returns a string directly rather than an object with a .response attribute. This inconsistency could lead to data structure mismatches in the chat history.
| chat_history.messages.append({"user_query": "[監察反對請求]", "response": result}) | |
| return jsonify({'response': result}) | |
| chat_history.messages.append({"user_query": "[監察反對請求]", "response": result.response}) | |
| return jsonify({'response': result.response}) |
| Original file line number | Diff line number | Diff line change | ||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,91 @@ | ||||||||||||
| import dspy | ||||||||||||
| import os | ||||||||||||
|
|
||||||||||||
| CONFIGED_LM = None | ||||||||||||
|
|
||||||||||||
| def configure_lm(): | ||||||||||||
| from dotenv import load_dotenv | ||||||||||||
| load_dotenv() | ||||||||||||
| global CONFIGED_LM | ||||||||||||
| if CONFIGED_LM is not None: | ||||||||||||
| return CONFIGED_LM | ||||||||||||
| print("\n--- 🌐 選擇要用的 LLM 模型來源 ---") | ||||||||||||
| print("1) 本地模型 (Ollama/HF)") | ||||||||||||
| print("2) OpenAI API") | ||||||||||||
| print("3) Google Gemini API") | ||||||||||||
| print("4) Anthropic Claude API") | ||||||||||||
|
|
||||||||||||
| choice = input("請輸入數字 (1/2/3/4): ").strip() | ||||||||||||
|
|
||||||||||||
| model_name = None | ||||||||||||
| api_key = None | ||||||||||||
|
|
||||||||||||
|
|
||||||||||||
| if choice == "1": | ||||||||||||
| # 預設幾個常見本地模型參考 | ||||||||||||
| env_model = os.getenv("OLLAMA_MODEL") | ||||||||||||
| if env_model: | ||||||||||||
| print(f"\n偵測到預設模型: {env_model}") | ||||||||||||
| model_name = input(f"輸入本地模型名稱 (直接按 Enter 使用 {env_model}): ").strip() | ||||||||||||
| if not model_name: | ||||||||||||
| model_name = env_model | ||||||||||||
| else: | ||||||||||||
| print("\n可用本地模型例子: llama3, gemma3:1b, mistral, phi3") | ||||||||||||
| model_name = input("輸入本地模型名稱: ").strip() | ||||||||||||
|
|
||||||||||||
| # 確保本地模型有名稱前綴 (LiteLM 要求) | ||||||||||||
| if "/" not in model_name: | ||||||||||||
| model_name = f"ollama/{model_name}" | ||||||||||||
| print(f"自動修正為 LiteLM 格式: {model_name}") | ||||||||||||
|
|
||||||||||||
| lm = dspy.LM(model_name, cache=False) | ||||||||||||
|
|
||||||||||||
| elif choice == "2": | ||||||||||||
| print("\nOpenAI 模型選擇:") | ||||||||||||
| print("1) openai/gpt-5.2\n2) openai/gpt-4o\n3) openai/gpt-4o-mini\n4) openai/o4-mini\n5) openai/o3-mini") | ||||||||||||
| idx = input("選擇模型 (1-5): ").strip() | ||||||||||||
| mapping = { | ||||||||||||
| "1": "openai/gpt-5.2", | ||||||||||||
| "2": "openai/gpt-4o", | ||||||||||||
|
||||||||||||
| "2": "openai/gpt-4o", | |
| "2": "openai/gpt-4o", | |
| "3": "openai/gpt-4o-mini", | |
| "4": "openai/o4-mini", | |
| "5": "openai/o3-mini", |
Copilot
AI
Jan 7, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Security issue: API keys are prompted for user input via the terminal, which may be logged in command history or visible on screen. Consider using the getpass module for secure password/API key input to prevent exposure in terminal history and over-the-shoulder viewing.
Copilot
AI
Jan 7, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The default model fallback uses "openai/gpt-5.2" which does not exist. If a user provides an invalid choice, the application will fail when trying to use this non-existent model. Use a real, existing model as the fallback, such as "openai/gpt-4o" or "openai/gpt-4o-mini".
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,17 @@ | ||
| import dspy | ||
| from .signature import InvestigatorSignature | ||
|
|
||
| class InvestigatorModule(dspy.Module): | ||
| """ | ||
| 實作調查者邏輯的 DSPy 模組。 | ||
| """ | ||
| def __init__(self): | ||
| super().__init__() | ||
| self.investigate = dspy.ChainOfThought(InvestigatorSignature) | ||
|
|
||
| def forward(self, system_prompt, user_query, history): | ||
| return self.investigate( | ||
| system_prompt=system_prompt, | ||
| user_query=user_query, | ||
| history=history | ||
| ) | ||
|
Comment on lines
+12
to
+17
|
||
| Original file line number | Diff line number | Diff line change | ||||||||
|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,12 @@ | ||||||||||
| import dspy | ||||||||||
|
|
||||||||||
| class InvestigatorSignature(dspy.Signature): | ||||||||||
| """ | ||||||||||
| 妳是 Atlas-World 的「調查者」(Investigator)。 | ||||||||||
| 妳的職責是深入分析使用者的問題,結合背景資訊與對話歷史, | ||||||||||
|
Comment on lines
+5
to
+6
|
||||||||||
| 妳是 Atlas-World 的「調查者」(Investigator)。 | |
| 妳的職責是深入分析使用者的問題,結合背景資訊與對話歷史, | |
| 你是 Atlas-World 的「調查者」(Investigator)。 | |
| 你的職責是深入分析使用者的問題,結合背景資訊與對話歷史, |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,15 @@ | ||||||
| import dspy | ||||||
| from .signature import OppositionSignature | ||||||
|
|
||||||
| class OppositionModule(dspy.Module): | ||||||
| """ | ||||||
| 提供反向思考與監察邏輯的 DSPy 模組。 | ||||||
| """ | ||||||
| def __init__(self): | ||||||
| super().__init__() | ||||||
| self.predictor = dspy.ChainOfThought(OppositionSignature) | ||||||
|
|
||||||
| def forward(self, system_prompt, history): | ||||||
| # history 應為 dspy.History 物件 | ||||||
| result = self.predictor(system_prompt=system_prompt, history=history) | ||||||
| return result.response | ||||||
|
||||||
| return result.response | |
| return result |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The package name "dotenv" in the dependencies is incorrect. The correct package name is "python-dotenv". The current specification will fail during installation as the "dotenv" package (version 0.9.9) is outdated and not the commonly used dotenv library.