From 64c2fa4cfb7b327d35f8ea63f39bc6287c4c6d98 Mon Sep 17 00:00:00 2001 From: Stuart Alexander Date: Thu, 18 Dec 2025 14:04:34 +0000 Subject: [PATCH 01/25] Clarify project title as a fork of Chronicle Updated project title to indicate it's a fork. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 34027891..a93dcd55 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Chronicle +# Chronicle (fork from https://github.com/chronicler-ai/chronicle) Self-hostable AI system that captures audio/video data from OMI devices and other sources to generate memories, action items, and contextual insights about your conversations and daily interactions. From 66dc505129010ea23112f34cd51d3dec1a392c36 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Thu, 18 Dec 2025 13:15:46 +0000 Subject: [PATCH 02/25] Fixed reading .env file and get keys from backend instead of seperate env.test --- tests/setup/test_env.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/tests/setup/test_env.py b/tests/setup/test_env.py index 929e83e2..7e3ca983 100644 --- a/tests/setup/test_env.py +++ b/tests/setup/test_env.py @@ -1,25 +1,26 @@ # Test Environment Configuration import os from pathlib import Path +from dotenv import load_dotenv # Load .env file from backends/advanced directory if it exists # This allows tests to work when run from VSCode or command line -def load_env_file(): - """Load environment variables from .env file if it exists.""" - # Look for .env in backends/advanced directory - env_file = Path(__file__).parent.parent.parent / "backends" / "advanced" / ".env" - if env_file.exists(): - with open(env_file) as f: - for line in f: - line = line.strip() - if line and not line.startswith('#') and '=' in line: - key, value = line.split('=', 1) - # Only set if not already in environment (CI takes precedence) - if key not in os.environ: - os.environ[key] = value +# def load_env_file(): +# """Load environment variables from .env file if it exists.""" +# # Look for .env in backends/advanced directory +# env_file = Path(__file__).parent.parent.parent / "backends" / "advanced" / ".env" +# if env_file.exists(): +# with open(env_file) as f: +# for line in f: +# line = line.strip() +# if line and not line.startswith('#') and '=' in line: +# key, value = line.split('=', 1) +# # Only set if not already in environment (CI takes precedence) +# if key not in os.environ: +# os.environ[key] = value # Load .env file (CI environment variables take precedence) -load_env_file() +# load_env_file() # Load .env from backends/advanced directory to get COMPOSE_PROJECT_NAME backend_env_path = Path(__file__).resolve().parents[2] / "backends" / "advanced" / ".env" From b0fb46cc04ef41235879b37c5390dbf28d347c06 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Thu, 11 Dec 2025 21:23:19 +0000 Subject: [PATCH 03/25] created settings page # Conflicts: # backends/advanced/webui/src/pages/Settings.tsx # backends/advanced/webui/src/services/api.ts --- .../src/advanced_omi_backend/app_factory.py | 10 + .../routers/api_router.py | 2 + .../routers/modules/__init__.py | 3 + .../routers/modules/settings_routes.py | 418 ++++++++ .../advanced_omi_backend/settings_manager.py | 422 ++++++++ .../advanced_omi_backend/settings_models.py | 252 +++++ .../advanced/webui/src/pages/Settings.tsx | 952 ++++++++++++++++++ backends/advanced/webui/src/services/api.ts | 48 + 8 files changed, 2107 insertions(+) create mode 100644 backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py create mode 100644 backends/advanced/src/advanced_omi_backend/settings_manager.py create mode 100644 backends/advanced/src/advanced_omi_backend/settings_models.py create mode 100644 backends/advanced/webui/src/pages/Settings.tsx diff --git a/backends/advanced/src/advanced_omi_backend/app_factory.py b/backends/advanced/src/advanced_omi_backend/app_factory.py index 7ccda184..1eba0df6 100644 --- a/backends/advanced/src/advanced_omi_backend/app_factory.py +++ b/backends/advanced/src/advanced_omi_backend/app_factory.py @@ -66,6 +66,16 @@ async def lifespan(app: FastAPI): application_logger.error(f"Failed to initialize Beanie: {e}") raise + # Initialize settings manager + try: + from advanced_omi_backend.settings_manager import init_settings_manager + settings_mgr = init_settings_manager(config.db) + await settings_mgr.initialize() + application_logger.info("โœ… Settings manager initialized and loaded from environment/database") + except Exception as e: + application_logger.error(f"Failed to initialize settings manager: {e}") + # Don't raise - use fallback to environment variables if settings manager fails + # Create admin user if needed try: await create_admin_user_if_needed() diff --git a/backends/advanced/src/advanced_omi_backend/routers/api_router.py b/backends/advanced/src/advanced_omi_backend/routers/api_router.py index 528713c0..e6abfe48 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/api_router.py +++ b/backends/advanced/src/advanced_omi_backend/routers/api_router.py @@ -16,6 +16,7 @@ conversation_router, memory_router, queue_router, + settings_router, system_router, user_router, ) @@ -34,6 +35,7 @@ router.include_router(client_router) router.include_router(conversation_router) router.include_router(memory_router) +router.include_router(settings_router) router.include_router(system_router) router.include_router(queue_router) router.include_router(health_router) # Also include under /api for frontend compatibility diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py b/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py index a5669b06..2cda0884 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py @@ -12,6 +12,7 @@ - audio_routes: Audio file uploads and processing - health_routes: Health check endpoints - websocket_routes: WebSocket connection handling +- settings_routes: Application settings management """ from .audio_routes import router as audio_router @@ -21,6 +22,7 @@ from .health_routes import router as health_router from .memory_routes import router as memory_router from .queue_routes import router as queue_router +from .settings_routes import router as settings_router from .system_routes import router as system_router from .user_routes import router as user_router from .websocket_routes import router as websocket_router @@ -33,6 +35,7 @@ "health_router", "memory_router", "queue_router", + "settings_router", "system_router", "user_router", "websocket_router", diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py new file mode 100644 index 00000000..c490230b --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py @@ -0,0 +1,418 @@ +""" +Application settings management routes. + +Provides endpoints for reading and updating dynamic application settings. +Settings changes take effect within the cache TTL (default: 5 seconds). +""" + +import logging + +from fastapi import APIRouter, Depends, HTTPException + +from advanced_omi_backend.auth import current_active_user, current_superuser +from advanced_omi_backend.settings_manager import get_settings_manager, SettingsManager +from advanced_omi_backend.settings_models import ( + AllSettings, + AudioProcessingSettings, + ConversationSettings, + DiarizationSettings, + LLMSettings, + MiscSettings, + NetworkSettings, + ProviderSettings, + SpeechDetectionSettings, +) +from advanced_omi_backend.users import User + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/settings", tags=["settings"]) + + +# All Settings (Combined) + + +@router.get("", response_model=AllSettings) +async def get_all_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Get all application settings. + + Available to all authenticated users for read access. + """ + return await settings_mgr.get_all_settings() + + +@router.put("", response_model=AllSettings) +async def update_all_settings( + settings: AllSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update all application settings at once. + + Admin only. Changes take effect within the cache TTL. + """ + await settings_mgr.update_all_settings(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_all_settings() + + +# Speech Detection Settings + + +@router.get("/speech-detection", response_model=SpeechDetectionSettings) +async def get_speech_detection_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get speech detection settings.""" + return await settings_mgr.get_speech_detection() + + +@router.put("/speech-detection", response_model=SpeechDetectionSettings) +async def update_speech_detection_settings( + settings: SpeechDetectionSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update speech detection settings. Admin only. + + These settings control when audio sessions are converted to conversations. + """ + await settings_mgr.update_speech_detection(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_speech_detection() + + +# Conversation Settings + + +@router.get("/conversation", response_model=ConversationSettings) +async def get_conversation_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get conversation management settings.""" + return await settings_mgr.get_conversation() + + +@router.put("/conversation", response_model=ConversationSettings) +async def update_conversation_settings( + settings: ConversationSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update conversation management settings. Admin only. + + Controls conversation timeouts, transcription buffering, and speaker enrollment. + """ + await settings_mgr.update_conversation(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_conversation() + + +# Audio Processing Settings + + +@router.get("/audio-processing", response_model=AudioProcessingSettings) +async def get_audio_processing_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get audio processing settings.""" + return await settings_mgr.get_audio_processing() + + +@router.put("/audio-processing", response_model=AudioProcessingSettings) +async def update_audio_processing_settings( + settings: AudioProcessingSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update audio processing settings. Admin only. + + Controls audio cropping, silence removal, and segment duration. + """ + await settings_mgr.update_audio_processing(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_audio_processing() + + +# Diarization Settings + + +@router.get("/diarization", response_model=DiarizationSettings) +async def get_diarization_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get speaker diarization settings.""" + return await settings_mgr.get_diarization() + + +@router.put("/diarization", response_model=DiarizationSettings) +async def update_diarization_settings( + settings: DiarizationSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update speaker diarization settings. Admin only. + + Controls how speakers are identified and segments are separated. + """ + await settings_mgr.update_diarization(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_diarization() + + +# LLM Settings + + +@router.get("/llm", response_model=LLMSettings) +async def get_llm_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get LLM provider and model settings.""" + return await settings_mgr.get_llm() + + +@router.put("/llm", response_model=LLMSettings) +async def update_llm_settings( + settings: LLMSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update LLM settings. Admin only. + + Controls which LLM provider and models to use for processing and chat. + """ + await settings_mgr.update_llm(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_llm() + + +# Provider Settings + + +@router.get("/providers", response_model=ProviderSettings) +async def get_provider_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get service provider settings.""" + return await settings_mgr.get_providers() + + +@router.put("/providers", response_model=ProviderSettings) +async def update_provider_settings( + settings: ProviderSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update service provider settings. Admin only. + + Controls which memory and transcription providers to use. + """ + await settings_mgr.update_providers(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_providers() + + +# Network Settings + + +@router.get("/network", response_model=NetworkSettings) +async def get_network_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get network and public access settings.""" + return await settings_mgr.get_network() + + +@router.put("/network", response_model=NetworkSettings) +async def update_network_settings( + settings: NetworkSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update network settings. Admin only. + + Controls public endpoints, CORS, and network access configuration. + """ + await settings_mgr.update_network(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_network() + + +# Miscellaneous Settings + + +@router.get("/misc", response_model=MiscSettings) +async def get_misc_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get miscellaneous settings.""" + return await settings_mgr.get_misc() + + +@router.put("/misc", response_model=MiscSettings) +async def update_misc_settings( + settings: MiscSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update miscellaneous settings. Admin only. + + Controls debug options and telemetry. + """ + await settings_mgr.update_misc(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_misc() + + +# Cache Management + + +@router.post("/cache/invalidate") +async def invalidate_settings_cache( + category: str = None, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Invalidate settings cache. Admin only. + + Forces settings to reload from database on next access. + If category is provided, only invalidates that category. + """ + settings_mgr.invalidate_cache(category) + return { + "status": "success", + "message": f"Cache invalidated for {category if category else 'all settings'}", + } + + +# Infrastructure Status + + +@router.get("/infrastructure/status") +async def get_infrastructure_status( + current_user: User = Depends(current_active_user), +): + """ + Get infrastructure service connection status. + + Returns URLs and connection status for MongoDB, Redis, Qdrant, Neo4j. + """ + import os + from advanced_omi_backend.app_config import get_app_config + + config = get_app_config() + + status = { + "mongodb": { + "url": config.mongodb_uri, + "database": config.mongodb_database, + "connected": False, + }, + "redis": { + "url": config.redis_url, + "connected": False, + }, + "qdrant": { + "url": f"http://{config.qdrant_base_url}:{config.qdrant_port}", + "connected": False, + }, + "neo4j": { + "host": os.getenv("NEO4J_HOST", "neo4j-mem0"), + "user": os.getenv("NEO4J_USER", "neo4j"), + "connected": False, + }, + } + + # Check MongoDB + try: + await config.mongo_client.admin.command('ping') + status["mongodb"]["connected"] = True + except Exception as e: + logger.debug(f"MongoDB connection check failed: {e}") + + # Check Redis + try: + from advanced_omi_backend.controllers.queue_controller import redis_conn + redis_conn.ping() + status["redis"]["connected"] = True + except Exception as e: + logger.debug(f"Redis connection check failed: {e}") + + # Check Qdrant + try: + import httpx + async with httpx.AsyncClient() as client: + response = await client.get(f"{status['qdrant']['url']}/", timeout=2.0) + status["qdrant"]["connected"] = response.status_code == 200 + except Exception as e: + logger.debug(f"Qdrant connection check failed: {e}") + + # Neo4j check (optional service) + # We don't check Neo4j connection as it's optional and may not be configured + + return status + + +@router.get("/api-keys/status") +async def get_api_keys_status( + current_user: User = Depends(current_superuser), +): + """ + Get API keys configuration status. Admin only. + + Returns which API keys are configured (but not the actual keys). + """ + import os + + keys_status = { + "openai": { + "name": "OpenAI API Key", + "configured": bool(os.getenv("OPENAI_API_KEY")), + "env_var": "OPENAI_API_KEY", + }, + "deepgram": { + "name": "Deepgram API Key", + "configured": bool(os.getenv("DEEPGRAM_API_KEY")), + "env_var": "DEEPGRAM_API_KEY", + }, + "mistral": { + "name": "Mistral API Key", + "configured": bool(os.getenv("MISTRAL_API_KEY")), + "env_var": "MISTRAL_API_KEY", + }, + "hf_token": { + "name": "HuggingFace Token", + "configured": bool(os.getenv("HF_TOKEN")), + "env_var": "HF_TOKEN", + }, + "langfuse_public": { + "name": "Langfuse Public Key", + "configured": bool(os.getenv("LANGFUSE_PUBLIC_KEY")), + "env_var": "LANGFUSE_PUBLIC_KEY", + }, + "langfuse_secret": { + "name": "Langfuse Secret Key", + "configured": bool(os.getenv("LANGFUSE_SECRET_KEY")), + "env_var": "LANGFUSE_SECRET_KEY", + }, + "ngrok": { + "name": "Ngrok Auth Token", + "configured": bool(os.getenv("NGROK_AUTHTOKEN")), + "env_var": "NGROK_AUTHTOKEN", + }, + } + + return keys_status diff --git a/backends/advanced/src/advanced_omi_backend/settings_manager.py b/backends/advanced/src/advanced_omi_backend/settings_manager.py new file mode 100644 index 00000000..601ce0c3 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/settings_manager.py @@ -0,0 +1,422 @@ +""" +Dynamic settings manager with MongoDB storage and caching. + +Settings are loaded from environment variables on first initialization, +then stored in MongoDB. Subsequent loads use MongoDB as the source of truth. +Changes take effect within the cache TTL (default: 5 seconds). +""" + +import logging +import os +import time +from typing import Dict, Any, Optional, TypeVar, Type + +from motor.motor_asyncio import AsyncIOMotorDatabase + +from advanced_omi_backend.settings_models import ( + AllSettings, + AudioProcessingSettings, + ConversationSettings, + DiarizationSettings, + LLMSettings, + MiscSettings, + NetworkSettings, + ProviderSettings, + SpeechDetectionSettings, + TranscriptionProvider, +) + +logger = logging.getLogger(__name__) + +T = TypeVar('T') + + +class SettingsManager: + """ + Manages dynamic application settings with MongoDB storage and caching. + + Settings are stored in the 'application_settings' collection with documents: + { + "_id": "speech_detection", # Setting category + "values": {...}, # Pydantic model dict + "updated_at": datetime, + "updated_by": "user_id or 'system'" + } + """ + + def __init__(self, db: AsyncIOMotorDatabase, cache_ttl: int = 5): + """ + Initialize settings manager. + + Args: + db: MongoDB database instance + cache_ttl: Cache TTL in seconds (default: 5) + """ + self.db = db + self.settings_col = db["application_settings"] + self.cache_ttl = cache_ttl + + # Cache storage + self._cache: Dict[str, Any] = {} + self._cache_time: Dict[str, float] = {} + + # Initialization flag + self._initialized = False + + async def initialize(self): + """ + Initialize settings from environment variables if not already in MongoDB. + + This is called once on application startup to migrate existing env vars + to the database. + """ + if self._initialized: + return + + logger.info("Initializing settings manager...") + + # Check if settings already exist in DB + count = await self.settings_col.count_documents({}) + + if count == 0: + # First time setup - load from env vars + logger.info("No settings found in database, initializing from environment variables") + await self._initialize_from_env() + else: + logger.info(f"Found {count} setting categories in database") + + self._initialized = True + + async def _initialize_from_env(self): + """Initialize all settings from environment variables.""" + + # Speech detection + speech_detection = SpeechDetectionSettings( + min_words=int(os.getenv("SPEECH_DETECTION_MIN_WORDS", "5")), + min_confidence=float(os.getenv("SPEECH_DETECTION_MIN_CONFIDENCE", "0.5")), + min_duration=float(os.getenv("SPEECH_DETECTION_MIN_DURATION", "10.0")), + ) + await self._save_to_db("speech_detection", speech_detection.dict(), "system") + + # Conversation settings + conversation = ConversationSettings( + transcription_buffer_seconds=float(os.getenv("TRANSCRIPTION_BUFFER_SECONDS", "120")), + speech_inactivity_threshold=float(os.getenv("SPEECH_INACTIVITY_THRESHOLD_SECONDS", "60")), + new_conversation_timeout_minutes=float(os.getenv("NEW_CONVERSATION_TIMEOUT_MINUTES", "1.5")), + record_only_enrolled_speakers=os.getenv("RECORD_ONLY_ENROLLED_SPEAKERS", "true").lower() == "true", + ) + await self._save_to_db("conversation", conversation.dict(), "system") + + # Audio processing + audio_processing = AudioProcessingSettings( + audio_cropping_enabled=os.getenv("AUDIO_CROPPING_ENABLED", "true").lower() == "true", + min_speech_segment_duration=float(os.getenv("MIN_SPEECH_SEGMENT_DURATION", "1.0")), + cropping_context_padding=float(os.getenv("CROPPING_CONTEXT_PADDING", "0.1")), + ) + await self._save_to_db("audio_processing", audio_processing.dict(), "system") + + # Diarization (load from existing config or defaults) + from advanced_omi_backend.config import _diarization_settings + if _diarization_settings: + diarization = DiarizationSettings(**_diarization_settings) + else: + diarization = DiarizationSettings() + await self._save_to_db("diarization", diarization.dict(), "system") + + # LLM settings + llm = LLMSettings( + llm_provider=os.getenv("LLM_PROVIDER", "openai"), + openai_model=os.getenv("OPENAI_MODEL", "gpt-4o-mini"), + chat_llm_model=os.getenv("CHAT_LLM_MODEL"), + chat_temperature=float(os.getenv("CHAT_TEMPERATURE", "0.7")), + ollama_model=os.getenv("OLLAMA_MODEL", "llama3.1:latest"), + ollama_embedder_model=os.getenv("OLLAMA_EMBEDDER_MODEL", "nomic-embed-text:latest"), + ) + await self._save_to_db("llm", llm.dict(), "system") + + # Provider settings + transcription_provider = os.getenv("TRANSCRIPTION_PROVIDER", "auto") + # Map empty string to "auto" + if not transcription_provider: + transcription_provider = "auto" + + providers = ProviderSettings( + memory_provider=os.getenv("MEMORY_PROVIDER", "chronicle"), + transcription_provider=transcription_provider, + ) + await self._save_to_db("providers", providers.dict(), "system") + + # Network settings + network = NetworkSettings( + host_ip=os.getenv("HOST_IP", "localhost"), + backend_public_port=int(os.getenv("BACKEND_PUBLIC_PORT", "8000")), + webui_port=int(os.getenv("WEBUI_PORT", "5173")), + cors_origins=os.getenv("CORS_ORIGINS", "http://localhost:5173,http://localhost:3000"), + ) + await self._save_to_db("network", network.dict(), "system") + + # Misc settings + misc = MiscSettings( + debug_dir=os.getenv("DEBUG_DIR", "./data/debug_dir"), + langfuse_enable_telemetry=os.getenv("LANGFUSE_ENABLE_TELEMETRY", "false").lower() == "true", + ) + await self._save_to_db("misc", misc.dict(), "system") + + logger.info("โœ… Initialized all settings from environment variables") + + async def _get_from_cache_or_db( + self, + key: str, + model_class: Type[T], + ) -> T: + """ + Get settings from cache or database. + + Args: + key: Settings category key + model_class: Pydantic model class + + Returns: + Instance of model_class with current settings + """ + # Check cache freshness + if key in self._cache: + age = time.time() - self._cache_time.get(key, 0) + if age < self.cache_ttl: + return self._cache[key] + + # Load from DB + doc = await self.settings_col.find_one({"_id": key}) + + if doc and "values" in doc: + settings = model_class(**doc["values"]) + else: + # Use defaults if not found + logger.warning(f"Settings '{key}' not found in database, using defaults") + settings = model_class() + + # Update cache + self._cache[key] = settings + self._cache_time[key] = time.time() + + return settings + + async def _save_to_db(self, key: str, values: dict, updated_by: str = "user"): + """ + Save settings to database. + + Args: + key: Settings category key + values: Settings values as dict + updated_by: User ID or 'system' + """ + from datetime import datetime + + await self.settings_col.update_one( + {"_id": key}, + { + "$set": { + "values": values, + "updated_at": datetime.utcnow(), + "updated_by": updated_by, + } + }, + upsert=True, + ) + + async def _update_settings( + self, + key: str, + settings: T, + updated_by: str = "user", + ): + """ + Update settings in database and cache. + + Args: + key: Settings category key + settings: Pydantic model instance + updated_by: User ID or 'system' + """ + # Save to DB + await self._save_to_db(key, settings.dict(), updated_by) + + # Update cache immediately + self._cache[key] = settings + self._cache_time[key] = time.time() + + logger.info(f"Updated settings '{key}' (by: {updated_by})") + + # Speech Detection Settings + + async def get_speech_detection(self) -> SpeechDetectionSettings: + """Get speech detection settings.""" + return await self._get_from_cache_or_db("speech_detection", SpeechDetectionSettings) + + async def update_speech_detection( + self, + settings: SpeechDetectionSettings, + updated_by: str = "user", + ): + """Update speech detection settings.""" + await self._update_settings("speech_detection", settings, updated_by) + + # Conversation Settings + + async def get_conversation(self) -> ConversationSettings: + """Get conversation management settings.""" + return await self._get_from_cache_or_db("conversation", ConversationSettings) + + async def update_conversation( + self, + settings: ConversationSettings, + updated_by: str = "user", + ): + """Update conversation management settings.""" + await self._update_settings("conversation", settings, updated_by) + + # Audio Processing Settings + + async def get_audio_processing(self) -> AudioProcessingSettings: + """Get audio processing settings.""" + return await self._get_from_cache_or_db("audio_processing", AudioProcessingSettings) + + async def update_audio_processing( + self, + settings: AudioProcessingSettings, + updated_by: str = "user", + ): + """Update audio processing settings.""" + await self._update_settings("audio_processing", settings, updated_by) + + # Diarization Settings + + async def get_diarization(self) -> DiarizationSettings: + """Get diarization settings.""" + return await self._get_from_cache_or_db("diarization", DiarizationSettings) + + async def update_diarization( + self, + settings: DiarizationSettings, + updated_by: str = "user", + ): + """Update diarization settings.""" + await self._update_settings("diarization", settings, updated_by) + + # LLM Settings + + async def get_llm(self) -> LLMSettings: + """Get LLM settings.""" + return await self._get_from_cache_or_db("llm", LLMSettings) + + async def update_llm( + self, + settings: LLMSettings, + updated_by: str = "user", + ): + """Update LLM settings.""" + await self._update_settings("llm", settings, updated_by) + + # Provider Settings + + async def get_providers(self) -> ProviderSettings: + """Get provider settings.""" + return await self._get_from_cache_or_db("providers", ProviderSettings) + + async def update_providers( + self, + settings: ProviderSettings, + updated_by: str = "user", + ): + """Update provider settings.""" + await self._update_settings("providers", settings, updated_by) + + # Network Settings + + async def get_network(self) -> NetworkSettings: + """Get network settings.""" + return await self._get_from_cache_or_db("network", NetworkSettings) + + async def update_network( + self, + settings: NetworkSettings, + updated_by: str = "user", + ): + """Update network settings.""" + await self._update_settings("network", settings, updated_by) + + # Misc Settings + + async def get_misc(self) -> MiscSettings: + """Get miscellaneous settings.""" + return await self._get_from_cache_or_db("misc", MiscSettings) + + async def update_misc( + self, + settings: MiscSettings, + updated_by: str = "user", + ): + """Update miscellaneous settings.""" + await self._update_settings("misc", settings, updated_by) + + # Combined Settings + + async def get_all_settings(self) -> AllSettings: + """Get all settings combined.""" + return AllSettings( + speech_detection=await self.get_speech_detection(), + conversation=await self.get_conversation(), + audio_processing=await self.get_audio_processing(), + diarization=await self.get_diarization(), + llm=await self.get_llm(), + providers=await self.get_providers(), + network=await self.get_network(), + misc=await self.get_misc(), + ) + + async def update_all_settings( + self, + settings: AllSettings, + updated_by: str = "user", + ): + """Update all settings at once.""" + await self.update_speech_detection(settings.speech_detection, updated_by) + await self.update_conversation(settings.conversation, updated_by) + await self.update_audio_processing(settings.audio_processing, updated_by) + await self.update_diarization(settings.diarization, updated_by) + await self.update_llm(settings.llm, updated_by) + await self.update_providers(settings.providers, updated_by) + await self.update_network(settings.network, updated_by) + await self.update_misc(settings.misc, updated_by) + + def invalidate_cache(self, key: Optional[str] = None): + """ + Force settings to reload from database on next access. + + Args: + key: Specific settings category to invalidate, or None for all + """ + if key: + self._cache_time[key] = 0 + logger.info(f"Invalidated cache for '{key}'") + else: + self._cache_time.clear() + logger.info("Invalidated all settings cache") + + +# Global settings manager instance (initialized in main.py) +_settings_manager: Optional[SettingsManager] = None + + +def init_settings_manager(db: AsyncIOMotorDatabase): + """Initialize the global settings manager.""" + global _settings_manager + _settings_manager = SettingsManager(db) + return _settings_manager + + +def get_settings_manager() -> SettingsManager: + """Get the global settings manager instance.""" + if _settings_manager is None: + raise RuntimeError("Settings manager not initialized. Call init_settings_manager() first.") + return _settings_manager diff --git a/backends/advanced/src/advanced_omi_backend/settings_models.py b/backends/advanced/src/advanced_omi_backend/settings_models.py new file mode 100644 index 00000000..42da80f8 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/settings_models.py @@ -0,0 +1,252 @@ +""" +Pydantic models for dynamic application settings. + +These settings can be changed by users through the UI and take effect +without requiring a server restart (within the cache TTL). +""" + +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field, validator + + +class LLMProvider(str, Enum): + """Supported LLM providers.""" + OPENAI = "openai" + OLLAMA = "ollama" + + +class MemoryProvider(str, Enum): + """Supported memory providers.""" + CHRONICLE = "chronicle" + OPENMEMORY_MCP = "openmemory_mcp" + MYCELIA = "mycelia" + + +class TranscriptionProvider(str, Enum): + """Supported transcription providers.""" + DEEPGRAM = "deepgram" + MISTRAL = "mistral" + PARAKEET = "parakeet" + AUTO = "auto" + + +class DiarizationSource(str, Enum): + """Supported diarization sources.""" + PYANNOTE = "pyannote" + DEEPGRAM = "deepgram" + + +class SpeechDetectionSettings(BaseModel): + """Speech detection settings for conversation creation.""" + + min_words: int = Field( + default=5, + ge=1, + le=100, + description="Minimum words required to create a conversation" + ) + min_confidence: float = Field( + default=0.5, + ge=0.0, + le=1.0, + description="Minimum word confidence threshold (0.0-1.0)" + ) + min_duration: float = Field( + default=10.0, + ge=0.0, + description="Minimum speech duration in seconds" + ) + + +class ConversationSettings(BaseModel): + """Conversation management settings.""" + + transcription_buffer_seconds: float = Field( + default=120.0, + ge=10.0, + le=600.0, + description="Trigger transcription every N seconds" + ) + speech_inactivity_threshold: float = Field( + default=60.0, + ge=10.0, + le=600.0, + description="Close conversation after N seconds of no speech" + ) + new_conversation_timeout_minutes: float = Field( + default=1.5, + ge=0.1, + le=60.0, + description="Timeout for creating new conversations (minutes)" + ) + record_only_enrolled_speakers: bool = Field( + default=True, + description="Only create conversations when enrolled speakers are detected" + ) + + +class AudioProcessingSettings(BaseModel): + """Audio processing settings.""" + + audio_cropping_enabled: bool = Field( + default=True, + description="Enable automatic silence removal from audio" + ) + min_speech_segment_duration: float = Field( + default=1.0, + ge=0.1, + le=10.0, + description="Minimum speech segment duration in seconds" + ) + cropping_context_padding: float = Field( + default=0.1, + ge=0.0, + le=1.0, + description="Context padding around speech segments" + ) + + +class DiarizationSettings(BaseModel): + """Speaker diarization settings.""" + + diarization_source: DiarizationSource = Field( + default=DiarizationSource.PYANNOTE, + description="Diarization service to use" + ) + similarity_threshold: float = Field( + default=0.15, + ge=0.0, + le=1.0, + description="Speaker similarity threshold" + ) + min_duration: float = Field( + default=0.5, + ge=0.0, + description="Minimum segment duration" + ) + collar: float = Field( + default=2.0, + ge=0.0, + description="Collar for segment merging (seconds)" + ) + min_duration_off: float = Field( + default=1.5, + ge=0.0, + description="Minimum silence duration between segments" + ) + min_speakers: int = Field( + default=2, + ge=1, + le=10, + description="Minimum number of speakers" + ) + max_speakers: int = Field( + default=6, + ge=1, + le=20, + description="Maximum number of speakers" + ) + + @validator('max_speakers') + def validate_max_speakers(cls, v, values): + """Ensure max_speakers >= min_speakers.""" + if 'min_speakers' in values and v < values['min_speakers']: + raise ValueError('max_speakers must be >= min_speakers') + return v + + +class LLMSettings(BaseModel): + """LLM provider and model settings.""" + + llm_provider: LLMProvider = Field( + default=LLMProvider.OPENAI, + description="LLM provider to use" + ) + openai_model: str = Field( + default="gpt-4o-mini", + description="OpenAI model for general tasks" + ) + chat_llm_model: Optional[str] = Field( + default=None, + description="Model for chat (defaults to openai_model if not set)" + ) + chat_temperature: float = Field( + default=0.7, + ge=0.0, + le=2.0, + description="Temperature for chat responses" + ) + ollama_model: Optional[str] = Field( + default="llama3.1:latest", + description="Ollama model name" + ) + ollama_embedder_model: Optional[str] = Field( + default="nomic-embed-text:latest", + description="Ollama embedder model name" + ) + + +class ProviderSettings(BaseModel): + """Service provider selection settings.""" + + memory_provider: MemoryProvider = Field( + default=MemoryProvider.CHRONICLE, + description="Memory provider to use" + ) + transcription_provider: TranscriptionProvider = Field( + default=TranscriptionProvider.AUTO, + description="Transcription provider (auto-selects if 'auto')" + ) + + +class NetworkSettings(BaseModel): + """Network and public access settings.""" + + host_ip: str = Field( + default="localhost", + description="Public IP/hostname for browser access" + ) + backend_public_port: int = Field( + default=8000, + ge=1, + le=65535, + description="Backend API public port" + ) + webui_port: int = Field( + default=5173, + ge=1, + le=65535, + description="WebUI port" + ) + cors_origins: str = Field( + default="http://localhost:5173,http://localhost:3000,http://127.0.0.1:5173,http://127.0.0.1:3000", + description="Comma-separated list of CORS origins" + ) + + +class MiscSettings(BaseModel): + """Miscellaneous settings.""" + + debug_dir: str = Field( + default="./data/debug_dir", + description="Directory for debug files" + ) + langfuse_enable_telemetry: bool = Field( + default=False, + description="Enable Langfuse telemetry" + ) + + +class AllSettings(BaseModel): + """Combined model for all application settings.""" + + speech_detection: SpeechDetectionSettings = Field(default_factory=SpeechDetectionSettings) + conversation: ConversationSettings = Field(default_factory=ConversationSettings) + audio_processing: AudioProcessingSettings = Field(default_factory=AudioProcessingSettings) + diarization: DiarizationSettings = Field(default_factory=DiarizationSettings) + llm: LLMSettings = Field(default_factory=LLMSettings) + providers: ProviderSettings = Field(default_factory=ProviderSettings) + network: NetworkSettings = Field(default_factory=NetworkSettings) + misc: MiscSettings = Field(default_factory=MiscSettings) diff --git a/backends/advanced/webui/src/pages/Settings.tsx b/backends/advanced/webui/src/pages/Settings.tsx new file mode 100644 index 00000000..880f2b4f --- /dev/null +++ b/backends/advanced/webui/src/pages/Settings.tsx @@ -0,0 +1,952 @@ +import { useState, useEffect } from 'react' +import { + Settings as SettingsIcon, + Key, + Copy, + Trash2, + RefreshCw, + CheckCircle, + AlertCircle, + Save, + Server, + MessageSquare, + Mic, + Database, + Settings2, + Shield, +} from 'lucide-react' +import { useAuth } from '../contexts/AuthContext' +import { settingsApi } from '../services/api' + +type Tab = 'core-infra' | 'api-keys' | 'mcp-key' | 'memory' | 'speech' | 'conversations' | 'other' + +interface Message { + type: 'success' | 'error' + text: string +} + +export default function Settings() { + const { user } = useAuth() + const [activeTab, setActiveTab] = useState('core-infra') + + // MCP Key state + const [apiKey, setApiKey] = useState(null) + const [apiKeyCreatedAt, setApiKeyCreatedAt] = useState(null) + const [loading, setLoading] = useState(false) + const [copied, setCopied] = useState(false) + + // Infrastructure status state + const [infraStatus, setInfraStatus] = useState(null) + const [infraLoading, setInfraLoading] = useState(false) + + // API Keys status state + const [apiKeysStatus, setApiKeysStatus] = useState(null) + const [apiKeysLoading, setApiKeysLoading] = useState(false) + + // Application settings state + const [appSettings, setAppSettings] = useState(null) + const [appSettingsLoading, setAppSettingsLoading] = useState(false) + + const [message, setMessage] = useState(null) + + useEffect(() => { + loadApiKeyInfo() + }, [user]) + + useEffect(() => { + if (activeTab === 'core-infra' && !infraStatus) { + loadInfrastructureStatus() + } else if (activeTab === 'api-keys' && !apiKeysStatus) { + loadApiKeysStatus() + } else if (['memory', 'speech', 'conversations', 'other'].includes(activeTab) && !appSettings) { + loadApplicationSettings() + } + }, [activeTab]) + + const loadApiKeyInfo = () => { + if (user?.api_key) { + setApiKey(user.api_key) + setApiKeyCreatedAt(user.api_key_created_at || null) + } + } + + const loadInfrastructureStatus = async () => { + try { + setInfraLoading(true) + const response = await settingsApi.getInfrastructureStatus() + setInfraStatus(response.data) + } catch (error: any) { + console.error('Failed to load infrastructure status:', error) + showMessage('error', 'Failed to load infrastructure status') + } finally { + setInfraLoading(false) + } + } + + const loadApiKeysStatus = async () => { + try { + setApiKeysLoading(true) + const response = await settingsApi.getApiKeysStatus() + setApiKeysStatus(response.data) + } catch (error: any) { + console.error('Failed to load API keys status:', error) + showMessage('error', 'Failed to load API keys status') + } finally { + setApiKeysLoading(false) + } + } + + const loadApplicationSettings = async () => { + try { + setAppSettingsLoading(true) + const response = await settingsApi.getAllSettings() + setAppSettings(response.data) + } catch (error: any) { + console.error('Failed to load application settings:', error) + showMessage('error', 'Failed to load application settings') + } finally { + setAppSettingsLoading(false) + } + } + + const generateApiKey = async () => { + try { + setLoading(true) + setMessage(null) + + const response = await settingsApi.generateApiKey() + + setApiKey(response.data.api_key) + setApiKeyCreatedAt(response.data.created_at) + showMessage('success', 'MCP API key generated successfully!') + } catch (error: any) { + console.error('Failed to generate MCP API key:', error) + showMessage('error', error.response?.data?.detail || 'Failed to generate MCP API key') + } finally { + setLoading(false) + } + } + + const revokeApiKey = async () => { + if ( + !confirm( + 'Are you sure you want to revoke your MCP API key? This will break any existing MCP client integrations.' + ) + ) { + return + } + + try { + setLoading(true) + setMessage(null) + + await settingsApi.revokeApiKey() + + setApiKey(null) + setApiKeyCreatedAt(null) + showMessage('success', 'MCP API key revoked successfully') + } catch (error: any) { + console.error('Failed to revoke MCP API key:', error) + showMessage('error', error.response?.data?.detail || 'Failed to revoke MCP API key') + } finally { + setLoading(false) + } + } + + const copyToClipboard = async () => { + if (!apiKey) return + + try { + await navigator.clipboard.writeText(apiKey) + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } catch (error) { + console.error('Failed to copy:', error) + } + } + + const showMessage = (type: 'success' | 'error', text: string) => { + setMessage({ type, text }) + setTimeout(() => setMessage(null), 3000) + } + + const formatDate = (dateString: string) => { + return new Date(dateString).toLocaleString() + } + + const updateCategorySettings = async (category: string, categorySettings: any) => { + try { + setAppSettingsLoading(true) + setMessage(null) + + const updateMethods: Record Promise> = { + speech_detection: settingsApi.updateSpeechDetection, + conversation: settingsApi.updateConversation, + audio_processing: settingsApi.updateAudioProcessing, + diarization: settingsApi.updateDiarization, + llm: settingsApi.updateLLM, + providers: settingsApi.updateProviders, + network: settingsApi.updateNetwork, + misc: settingsApi.updateMisc, + } + + const updateMethod = updateMethods[category] + if (!updateMethod) { + throw new Error(`Unknown category: ${category}`) + } + + await updateMethod(categorySettings) + await loadApplicationSettings() + + showMessage('success', `Settings updated successfully!`) + } catch (error: any) { + console.error(`Failed to update ${category} settings:`, error) + showMessage( + 'error', + error.response?.data?.detail || `Failed to update ${category} settings` + ) + } finally { + setAppSettingsLoading(false) + } + } + + const renderSettingsField = ( + category: string, + key: string, + value: any, + label: string, + description?: string, + type: 'number' | 'boolean' | 'text' | 'select' = 'text', + options?: { value: string; label: string }[] + ) => { + const fieldId = `${category}_${key}` + + const handleChange = (newValue: any) => { + setAppSettings((prev: any) => ({ + ...prev, + [category]: { + ...prev[category], + [key]: newValue, + }, + })) + } + + if (type === 'boolean') { + return ( +
+ handleChange(e.target.checked)} + className="mt-1 h-4 w-4 rounded border-gray-300 dark:border-gray-600 text-blue-600 focus:ring-blue-500" + /> +
+ + {description && ( +

{description}

+ )} +
+
+ ) + } + + if (type === 'select') { + return ( +
+ + {description &&

{description}

} + +
+ ) + } + + return ( +
+ + {description &&

{description}

} + handleChange(type === 'number' ? parseFloat(e.target.value) : e.target.value)} + step={type === 'number' ? 'any' : undefined} + className="block w-full rounded-md border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 shadow-sm focus:border-blue-500 focus:ring-blue-500 sm:text-sm" + /> +
+ ) + } + + const tabs = [ + { id: 'core-infra' as Tab, label: 'Core Infra', icon: Server, adminOnly: false }, + { id: 'api-keys' as Tab, label: 'API Keys', icon: Shield, adminOnly: true }, + { id: 'mcp-key' as Tab, label: 'MCP Key', icon: Key, adminOnly: false }, + { id: 'memory' as Tab, label: 'Memory', icon: Database, adminOnly: true }, + { id: 'speech' as Tab, label: 'Speech', icon: Mic, adminOnly: true }, + { id: 'conversations' as Tab, label: 'Conversations', icon: MessageSquare, adminOnly: true }, + { id: 'other' as Tab, label: 'Other', icon: Settings2, adminOnly: true }, + ] + + return ( +
+ {/* Header */} +
+ +

Settings

+
+ + {/* Message Display */} + {message && ( +
+
+ {message.type === 'success' ? ( + + ) : ( + + )} +

+ {message.text} +

+
+
+ )} + + {/* Tabs */} +
+ +
+ + {/* Tab Content */} +
+ {/* Core Infrastructure */} + {activeTab === 'core-infra' && ( +
+
+

+ Core Infrastructure +

+ +
+ + {infraLoading && !infraStatus ? ( +
+ +

Loading infrastructure status...

+
+ ) : infraStatus ? ( +
+ {Object.entries(infraStatus).map(([service, info]: [string, any]) => ( +
+
+

+ {service} +

+ + {info.connected ? 'Connected' : 'Disconnected'} + +
+
+ {info.url && ( +

+ URL:{' '} + + {info.url} + +

+ )} + {info.host && ( +

+ Host: {info.host} +

+ )} + {info.database && ( +

+ Database: {info.database} +

+ )} + {info.user && ( +

+ User: {info.user} +

+ )} +
+
+ ))} +
+ ) : ( +
+ +

+ Failed to load infrastructure status +

+ +
+ )} +
+ )} + + {/* API Keys */} + {activeTab === 'api-keys' && ( +
+

+ External Service API Keys +

+ +
+

+ Note: API keys are configured via environment variables and require + a server restart to change. This page shows which keys are currently configured. +

+
+ + {apiKeysLoading ? ( +
+ +

Loading API keys status...

+
+ ) : apiKeysStatus ? ( +
+ {Object.entries(apiKeysStatus).map(([key, info]: [string, any]) => ( +
+
+

{info.name}

+

+ + {info.env_var} + +

+
+ + {info.configured ? 'Configured' : 'Not Set'} + +
+ ))} +
+ ) : ( +
+ +

Failed to load API keys status

+
+ )} +
+ )} + + {/* MCP Key */} + {activeTab === 'mcp-key' && ( +
+

+ MCP API Key +

+ +

+ Generate an API key for Model Context Protocol (MCP) clients like Claude Desktop, + Cursor, or Windsurf to access your conversations. +

+ + {apiKey ? ( +
+
+
+ + Current MCP API Key + + {apiKeyCreatedAt && ( + + Created: {formatDate(apiKeyCreatedAt)} + + )} +
+ +
+ + {apiKey} + + +
+ +
+

+ MCP Server URL:{' '} + + http://your-server:8000/mcp/conversations/sse + +
+ Authorization: Bearer {apiKey} +

+
+
+ +
+ + + +
+
+ ) : ( +
+ +

No MCP API key generated yet

+ +
+ )} +
+ )} + + {/* Memory Settings */} + {activeTab === 'memory' && appSettings && ( +
+

+ Memory Settings +

+ +
+ {renderSettingsField( + 'providers', + 'memory_provider', + appSettings.providers.memory_provider, + 'Memory Provider', + 'Choose where memories are stored and processed', + 'select', + [ + { value: 'chronicle', label: 'Chronicle (Default)' }, + { value: 'openmemory_mcp', label: 'OpenMemory MCP' }, + { value: 'mycelia', label: 'Mycelia' }, + ] + )} +
+ +
+ +
+
+ )} + + {/* Speech Settings */} + {activeTab === 'speech' && appSettings && ( +
+

+ Speech & Audio Settings +

+ +
+
+

+ Speech Detection +

+
+ {renderSettingsField( + 'speech_detection', + 'min_words', + appSettings.speech_detection.min_words, + 'Minimum Words', + 'Minimum words required to create a conversation', + 'number' + )} + {renderSettingsField( + 'speech_detection', + 'min_confidence', + appSettings.speech_detection.min_confidence, + 'Minimum Confidence', + 'Word confidence threshold (0.0-1.0)', + 'number' + )} + {renderSettingsField( + 'speech_detection', + 'min_duration', + appSettings.speech_detection.min_duration, + 'Minimum Duration (seconds)', + 'Minimum speech duration in seconds', + 'number' + )} +
+
+ +
+

+ Audio Processing +

+
+ {renderSettingsField( + 'audio_processing', + 'audio_cropping_enabled', + appSettings.audio_processing.audio_cropping_enabled, + 'Enable Audio Cropping', + 'Automatically remove silence from audio', + 'boolean' + )} + {renderSettingsField( + 'audio_processing', + 'min_speech_segment_duration', + appSettings.audio_processing.min_speech_segment_duration, + 'Min Speech Segment Duration', + 'Minimum duration for speech segments (seconds)', + 'number' + )} + {renderSettingsField( + 'audio_processing', + 'cropping_context_padding', + appSettings.audio_processing.cropping_context_padding, + 'Context Padding', + 'Padding around speech segments (0.0-1.0)', + 'number' + )} +
+
+ +
+

+ Transcription Provider +

+ {renderSettingsField( + 'providers', + 'transcription_provider', + appSettings.providers.transcription_provider, + 'Transcription Service', + 'Choose which service to use for speech-to-text', + 'select', + [ + { value: 'auto', label: 'Auto-detect' }, + { value: 'deepgram', label: 'Deepgram' }, + { value: 'mistral', label: 'Mistral' }, + { value: 'parakeet', label: 'Parakeet (Local)' }, + ] + )} +
+
+ +
+ +
+
+ )} + + {/* Conversations Settings */} + {activeTab === 'conversations' && appSettings && ( +
+

+ Conversation Settings +

+ +
+ {renderSettingsField( + 'conversation', + 'transcription_buffer_seconds', + appSettings.conversation.transcription_buffer_seconds, + 'Transcription Buffer (seconds)', + 'Trigger transcription every N seconds', + 'number' + )} + {renderSettingsField( + 'conversation', + 'speech_inactivity_threshold', + appSettings.conversation.speech_inactivity_threshold, + 'Speech Inactivity Threshold (seconds)', + 'Close conversation after N seconds of silence', + 'number' + )} + {renderSettingsField( + 'conversation', + 'new_conversation_timeout_minutes', + appSettings.conversation.new_conversation_timeout_minutes, + 'New Conversation Timeout (minutes)', + 'Timeout for creating new conversations', + 'number' + )} + {renderSettingsField( + 'conversation', + 'record_only_enrolled_speakers', + appSettings.conversation.record_only_enrolled_speakers, + 'Record Only Enrolled Speakers', + 'Only create conversations when enrolled speakers are detected', + 'boolean' + )} +
+ +
+ +
+
+ )} + + {/* Other Settings */} + {activeTab === 'other' && appSettings && ( +
+

+ Other Settings +

+ +
+
+

+ Speaker Diarization +

+
+ {renderSettingsField( + 'diarization', + 'diarization_source', + appSettings.diarization.diarization_source, + 'Diarization Source', + 'Service to use for speaker identification', + 'select', + [ + { value: 'pyannote', label: 'PyAnnote' }, + { value: 'deepgram', label: 'Deepgram' }, + ] + )} + {renderSettingsField( + 'diarization', + 'min_speakers', + appSettings.diarization.min_speakers, + 'Minimum Speakers', + 'Minimum number of speakers to detect', + 'number' + )} + {renderSettingsField( + 'diarization', + 'max_speakers', + appSettings.diarization.max_speakers, + 'Maximum Speakers', + 'Maximum number of speakers to detect', + 'number' + )} +
+
+ +
+

+ LLM Configuration +

+
+ {renderSettingsField( + 'llm', + 'llm_provider', + appSettings.llm.llm_provider, + 'LLM Provider', + 'Language model provider for memory extraction', + 'select', + [ + { value: 'openai', label: 'OpenAI' }, + { value: 'ollama', label: 'Ollama' }, + ] + )} + {renderSettingsField( + 'llm', + 'openai_model', + appSettings.llm.openai_model, + 'OpenAI Model', + 'Model to use for OpenAI requests', + 'text' + )} + {renderSettingsField( + 'llm', + 'chat_temperature', + appSettings.llm.chat_temperature, + 'Chat Temperature', + 'Temperature for chat responses (0.0-2.0)', + 'number' + )} +
+
+ +
+

+ Network & System +

+
+ {renderSettingsField( + 'network', + 'host_ip', + appSettings.network.host_ip, + 'Host IP', + 'Public IP or hostname for browser access', + 'text' + )} + {renderSettingsField( + 'network', + 'backend_public_port', + appSettings.network.backend_public_port, + 'Backend Port', + 'Public port for backend API', + 'number' + )} + {renderSettingsField( + 'misc', + 'langfuse_enable_telemetry', + appSettings.misc.langfuse_enable_telemetry, + 'Enable Langfuse Telemetry', + 'Enable telemetry for Langfuse', + 'boolean' + )} +
+
+
+ +
+ +
+
+ )} + + {/* Loading state for settings tabs */} + {['memory', 'speech', 'conversations', 'other'].includes(activeTab) && + appSettingsLoading && + !appSettings && ( +
+ +

Loading settings...

+
+ )} +
+
+ ) +} diff --git a/backends/advanced/webui/src/services/api.ts b/backends/advanced/webui/src/services/api.ts index 0d988a9d..0c246d53 100644 --- a/backends/advanced/webui/src/services/api.ts +++ b/backends/advanced/webui/src/services/api.ts @@ -268,4 +268,52 @@ export const speakerApi = { // Check speaker service status (admin only) getSpeakerServiceStatus: () => api.get('/api/speaker-service-status'), +} + +export const settingsApi = { + // Generate new API key for current user + generateApiKey: () => api.post('/api/users/me/api-key'), + + // Revoke current user's API key + revokeApiKey: () => api.delete('/api/users/me/api-key'), + + // Application settings (requires admin) + getAllSettings: () => api.get('/api/settings'), + updateAllSettings: (settings: any) => api.put('/api/settings', settings), + + // Individual setting categories + getSpeechDetection: () => api.get('/api/settings/speech-detection'), + updateSpeechDetection: (settings: any) => api.put('/api/settings/speech-detection', settings), + + getConversation: () => api.get('/api/settings/conversation'), + updateConversation: (settings: any) => api.put('/api/settings/conversation', settings), + + getAudioProcessing: () => api.get('/api/settings/audio-processing'), + updateAudioProcessing: (settings: any) => api.put('/api/settings/audio-processing', settings), + + getDiarization: () => api.get('/api/settings/diarization'), + updateDiarization: (settings: any) => api.put('/api/settings/diarization', settings), + + getLLM: () => api.get('/api/settings/llm'), + updateLLM: (settings: any) => api.put('/api/settings/llm', settings), + + getProviders: () => api.get('/api/settings/providers'), + updateProviders: (settings: any) => api.put('/api/settings/providers', settings), + + getNetwork: () => api.get('/api/settings/network'), + updateNetwork: (settings: any) => api.put('/api/settings/network', settings), + + getMisc: () => api.get('/api/settings/misc'), + updateMisc: (settings: any) => api.put('/api/settings/misc', settings), + + // Cache management + invalidateCache: (category?: string) => api.post('/api/settings/cache/invalidate', null, { + params: category ? { category } : {} + }), + + // Infrastructure status + getInfrastructureStatus: () => api.get('/api/settings/infrastructure/status'), + + // API keys status + getApiKeysStatus: () => api.get('/api/settings/api-keys/status'), } \ No newline at end of file From 325ad2f1a6db8d34f845b48fde59071dccd8bc08 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Thu, 11 Dec 2025 23:30:24 +0000 Subject: [PATCH 04/25] reorganised settings page --- .../routers/modules/settings_routes.py | 160 ++- .../advanced_omi_backend/settings_manager.py | 63 + .../advanced_omi_backend/settings_models.py | 68 ++ .../utils/api_keys_manager.py | 168 +++ .../advanced/webui/src/pages/Settings.tsx | 1088 +++++++++++++---- backends/advanced/webui/src/services/api.ts | 14 + 6 files changed, 1340 insertions(+), 221 deletions(-) create mode 100644 backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py index c490230b..3da73c4d 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py @@ -13,9 +13,11 @@ from advanced_omi_backend.settings_manager import get_settings_manager, SettingsManager from advanced_omi_backend.settings_models import ( AllSettings, + ApiKeysSettings, AudioProcessingSettings, ConversationSettings, DiarizationSettings, + InfrastructureSettings, LLMSettings, MiscSettings, NetworkSettings, @@ -249,6 +251,33 @@ async def update_network_settings( return await settings_mgr.get_network() +# Infrastructure Settings + + +@router.get("/infrastructure", response_model=InfrastructureSettings) +async def get_infrastructure_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get infrastructure settings.""" + return await settings_mgr.get_infrastructure() + + +@router.put("/infrastructure", response_model=InfrastructureSettings) +async def update_infrastructure_settings( + settings: InfrastructureSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update infrastructure settings. Admin only. + + Controls MongoDB, Redis, Qdrant, and Neo4j connection settings. + """ + await settings_mgr.update_infrastructure(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_infrastructure() + + # Miscellaneous Settings @@ -276,6 +305,120 @@ async def update_misc_settings( return await settings_mgr.get_misc() +# API Keys Settings + + +@router.get("/api-keys", response_model=ApiKeysSettings) +async def get_api_keys_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get API keys settings.""" + return await settings_mgr.get_api_keys() + + +@router.put("/api-keys", response_model=ApiKeysSettings) +async def update_api_keys_settings( + settings: ApiKeysSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update API keys settings. Admin only. + + Controls external service API keys. + """ + await settings_mgr.update_api_keys(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_api_keys() + + +@router.get("/api-keys/load-from-file", response_model=ApiKeysSettings) +async def load_api_keys_from_file( + file_path: str = ".env.api-keys", + current_user: User = Depends(current_superuser), +): + """ + Load API keys from a file. Admin only. + + Args: + file_path: Path to the API keys file (default: .env.api-keys) + + Returns: + API keys loaded from the file + """ + from advanced_omi_backend.utils.api_keys_manager import read_api_keys_from_file + + try: + keys_dict = read_api_keys_from_file(file_path) + return ApiKeysSettings(**keys_dict) + except Exception as e: + logger.error(f"Error loading API keys from file {file_path}: {e}") + raise HTTPException( + status_code=500, + detail=f"Failed to load API keys from {file_path}: {str(e)}" + ) + + +@router.post("/api-keys/save") +async def save_api_keys( + settings: ApiKeysSettings, + save_to_file: bool = True, + save_to_database: bool = True, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Save API keys to file and/or database. Admin only. + + Args: + settings: API keys to save + save_to_file: Save to .env.api-keys file (default: True) + save_to_database: Save to MongoDB (default: True) + """ + from advanced_omi_backend.utils.api_keys_manager import write_api_keys_to_file + + results = {"file": False, "database": False, "errors": []} + + # Save to file + if save_to_file: + try: + keys_dict = { + "openai_api_key": settings.openai_api_key, + "deepgram_api_key": settings.deepgram_api_key, + "mistral_api_key": settings.mistral_api_key, + "hf_token": settings.hf_token, + "langfuse_public_key": settings.langfuse_public_key, + "langfuse_secret_key": settings.langfuse_secret_key, + "ngrok_authtoken": settings.ngrok_authtoken, + } + success = write_api_keys_to_file(keys_dict, ".env.api-keys") + results["file"] = success + if not success: + results["errors"].append("Failed to write to .env.api-keys file") + except Exception as e: + logger.error(f"Error writing API keys to file: {e}") + results["errors"].append(f"File write error: {str(e)}") + + # Save to database + if save_to_database: + try: + await settings_mgr.update_api_keys(settings, updated_by=str(current_user.id)) + results["database"] = True + except Exception as e: + logger.error(f"Error saving API keys to database: {e}") + results["errors"].append(f"Database save error: {str(e)}") + + return { + "success": results["file"] or results["database"], + "saved_to": { + "file": results["file"], + "database": results["database"], + }, + "errors": results["errors"], + "settings": await settings_mgr.get_api_keys(), + } + + # Cache Management @@ -304,34 +447,37 @@ async def invalidate_settings_cache( @router.get("/infrastructure/status") async def get_infrastructure_status( current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), ): """ Get infrastructure service connection status. Returns URLs and connection status for MongoDB, Redis, Qdrant, Neo4j. + Uses editable settings from database. """ - import os from advanced_omi_backend.app_config import get_app_config + # Get infrastructure settings from database + infra_settings = await settings_mgr.get_infrastructure() config = get_app_config() status = { "mongodb": { - "url": config.mongodb_uri, - "database": config.mongodb_database, + "url": infra_settings.mongodb_uri, + "database": infra_settings.mongodb_database, "connected": False, }, "redis": { - "url": config.redis_url, + "url": infra_settings.redis_url, "connected": False, }, "qdrant": { - "url": f"http://{config.qdrant_base_url}:{config.qdrant_port}", + "url": f"http://{infra_settings.qdrant_base_url}:{infra_settings.qdrant_port}", "connected": False, }, "neo4j": { - "host": os.getenv("NEO4J_HOST", "neo4j-mem0"), - "user": os.getenv("NEO4J_USER", "neo4j"), + "host": infra_settings.neo4j_host, + "user": infra_settings.neo4j_user, "connected": False, }, } diff --git a/backends/advanced/src/advanced_omi_backend/settings_manager.py b/backends/advanced/src/advanced_omi_backend/settings_manager.py index 601ce0c3..67873334 100644 --- a/backends/advanced/src/advanced_omi_backend/settings_manager.py +++ b/backends/advanced/src/advanced_omi_backend/settings_manager.py @@ -15,9 +15,11 @@ from advanced_omi_backend.settings_models import ( AllSettings, + ApiKeysSettings, AudioProcessingSettings, ConversationSettings, DiarizationSettings, + InfrastructureSettings, LLMSettings, MiscSettings, NetworkSettings, @@ -155,6 +157,20 @@ async def _initialize_from_env(self): ) await self._save_to_db("network", network.dict(), "system") + # Infrastructure settings + from advanced_omi_backend.app_config import get_app_config + config = get_app_config() + infrastructure = InfrastructureSettings( + mongodb_uri=config.mongodb_uri, + mongodb_database=config.mongodb_database, + redis_url=config.redis_url, + qdrant_base_url=config.qdrant_base_url, + qdrant_port=config.qdrant_port, + neo4j_host=os.getenv("NEO4J_HOST", "neo4j-mem0"), + neo4j_user=os.getenv("NEO4J_USER", "neo4j"), + ) + await self._save_to_db("infrastructure", infrastructure.dict(), "system") + # Misc settings misc = MiscSettings( debug_dir=os.getenv("DEBUG_DIR", "./data/debug_dir"), @@ -162,6 +178,21 @@ async def _initialize_from_env(self): ) await self._save_to_db("misc", misc.dict(), "system") + # API Keys settings - read from .env.api-keys file first, fallback to env vars + from advanced_omi_backend.utils.api_keys_manager import read_api_keys_from_file + + file_keys = read_api_keys_from_file(".env.api-keys") + api_keys = ApiKeysSettings( + openai_api_key=file_keys.get("openai_api_key") or os.getenv("OPENAI_API_KEY"), + deepgram_api_key=file_keys.get("deepgram_api_key") or os.getenv("DEEPGRAM_API_KEY"), + mistral_api_key=file_keys.get("mistral_api_key") or os.getenv("MISTRAL_API_KEY"), + hf_token=file_keys.get("hf_token") or os.getenv("HF_TOKEN"), + langfuse_public_key=file_keys.get("langfuse_public_key") or os.getenv("LANGFUSE_PUBLIC_KEY"), + langfuse_secret_key=file_keys.get("langfuse_secret_key") or os.getenv("LANGFUSE_SECRET_KEY"), + ngrok_authtoken=file_keys.get("ngrok_authtoken") or os.getenv("NGROK_AUTHTOKEN"), + ) + await self._save_to_db("api_keys", api_keys.dict(), "system") + logger.info("โœ… Initialized all settings from environment variables") async def _get_from_cache_or_db( @@ -345,6 +376,20 @@ async def update_network( """Update network settings.""" await self._update_settings("network", settings, updated_by) + # Infrastructure Settings + + async def get_infrastructure(self) -> InfrastructureSettings: + """Get infrastructure settings.""" + return await self._get_from_cache_or_db("infrastructure", InfrastructureSettings) + + async def update_infrastructure( + self, + settings: InfrastructureSettings, + updated_by: str = "user", + ): + """Update infrastructure settings.""" + await self._update_settings("infrastructure", settings, updated_by) + # Misc Settings async def get_misc(self) -> MiscSettings: @@ -359,6 +404,20 @@ async def update_misc( """Update miscellaneous settings.""" await self._update_settings("misc", settings, updated_by) + # API Keys Settings + + async def get_api_keys(self) -> ApiKeysSettings: + """Get API keys settings.""" + return await self._get_from_cache_or_db("api_keys", ApiKeysSettings) + + async def update_api_keys( + self, + settings: ApiKeysSettings, + updated_by: str = "user", + ): + """Update API keys settings.""" + await self._update_settings("api_keys", settings, updated_by) + # Combined Settings async def get_all_settings(self) -> AllSettings: @@ -371,7 +430,9 @@ async def get_all_settings(self) -> AllSettings: llm=await self.get_llm(), providers=await self.get_providers(), network=await self.get_network(), + infrastructure=await self.get_infrastructure(), misc=await self.get_misc(), + api_keys=await self.get_api_keys(), ) async def update_all_settings( @@ -387,7 +448,9 @@ async def update_all_settings( await self.update_llm(settings.llm, updated_by) await self.update_providers(settings.providers, updated_by) await self.update_network(settings.network, updated_by) + await self.update_infrastructure(settings.infrastructure, updated_by) await self.update_misc(settings.misc, updated_by) + await self.update_api_keys(settings.api_keys, updated_by) def invalidate_cache(self, key: Optional[str] = None): """ diff --git a/backends/advanced/src/advanced_omi_backend/settings_models.py b/backends/advanced/src/advanced_omi_backend/settings_models.py index 42da80f8..68742f1e 100644 --- a/backends/advanced/src/advanced_omi_backend/settings_models.py +++ b/backends/advanced/src/advanced_omi_backend/settings_models.py @@ -226,6 +226,39 @@ class NetworkSettings(BaseModel): ) +class InfrastructureSettings(BaseModel): + """Core infrastructure service settings.""" + + mongodb_uri: str = Field( + default="mongodb://mongo:27017", + description="MongoDB connection URI" + ) + mongodb_database: str = Field( + default="friend-lite", + description="MongoDB database name" + ) + redis_url: str = Field( + default="redis://localhost:6379/0", + description="Redis connection URL" + ) + qdrant_base_url: str = Field( + default="qdrant", + description="Qdrant base URL/hostname" + ) + qdrant_port: str = Field( + default="6333", + description="Qdrant port" + ) + neo4j_host: str = Field( + default="neo4j-mem0", + description="Neo4j host" + ) + neo4j_user: str = Field( + default="neo4j", + description="Neo4j username" + ) + + class MiscSettings(BaseModel): """Miscellaneous settings.""" @@ -239,6 +272,39 @@ class MiscSettings(BaseModel): ) +class ApiKeysSettings(BaseModel): + """External service API keys.""" + + openai_api_key: Optional[str] = Field( + default=None, + description="OpenAI API Key" + ) + deepgram_api_key: Optional[str] = Field( + default=None, + description="Deepgram API Key" + ) + mistral_api_key: Optional[str] = Field( + default=None, + description="Mistral API Key" + ) + hf_token: Optional[str] = Field( + default=None, + description="HuggingFace Token" + ) + langfuse_public_key: Optional[str] = Field( + default=None, + description="Langfuse Public Key" + ) + langfuse_secret_key: Optional[str] = Field( + default=None, + description="Langfuse Secret Key" + ) + ngrok_authtoken: Optional[str] = Field( + default=None, + description="Ngrok Auth Token" + ) + + class AllSettings(BaseModel): """Combined model for all application settings.""" @@ -249,4 +315,6 @@ class AllSettings(BaseModel): llm: LLMSettings = Field(default_factory=LLMSettings) providers: ProviderSettings = Field(default_factory=ProviderSettings) network: NetworkSettings = Field(default_factory=NetworkSettings) + infrastructure: InfrastructureSettings = Field(default_factory=InfrastructureSettings) misc: MiscSettings = Field(default_factory=MiscSettings) + api_keys: ApiKeysSettings = Field(default_factory=ApiKeysSettings) diff --git a/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py b/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py new file mode 100644 index 00000000..1eca417d --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py @@ -0,0 +1,168 @@ +""" +API Keys Manager - Handle reading/writing API keys from file and database. +""" + +import logging +import os +from pathlib import Path +from typing import Dict, Optional + +logger = logging.getLogger(__name__) + + +def mask_api_key(key: Optional[str]) -> Optional[str]: + """ + Mask an API key for display purposes. + + Shows first 7 chars and last 4 chars, masks the middle. + Example: sk-1234567890abcdef -> sk-1234***cdef + """ + if not key or len(key) < 12: + return None + + return f"{key[:7]}****{key[-4:]}" + + +def read_api_keys_from_file(file_path: str = ".env.api-keys") -> Dict[str, Optional[str]]: + """ + Read API keys from .env.api-keys file. + + Returns: + Dictionary of API key values (not masked) + """ + keys = { + "openai_api_key": None, + "deepgram_api_key": None, + "mistral_api_key": None, + "hf_token": None, + "langfuse_public_key": None, + "langfuse_secret_key": None, + "ngrok_authtoken": None, + } + + # Check if file exists + if not os.path.exists(file_path): + logger.warning(f"API keys file not found: {file_path}") + return keys + + try: + with open(file_path, 'r') as f: + for line in f: + line = line.strip() + # Skip comments and empty lines + if not line or line.startswith('#'): + continue + + # Parse key=value + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + + # Map env var names to our field names + if key == "OPENAI_API_KEY" and value: + keys["openai_api_key"] = value + elif key == "DEEPGRAM_API_KEY" and value: + keys["deepgram_api_key"] = value + elif key == "MISTRAL_API_KEY" and value: + keys["mistral_api_key"] = value + elif key == "HF_TOKEN" and value: + keys["hf_token"] = value + elif key == "LANGFUSE_PUBLIC_KEY" and value: + keys["langfuse_public_key"] = value + elif key == "LANGFUSE_SECRET_KEY" and value: + keys["langfuse_secret_key"] = value + elif key == "NGROK_AUTHTOKEN" and value: + keys["ngrok_authtoken"] = value + + logger.info(f"Loaded API keys from {file_path}") + return keys + + except Exception as e: + logger.error(f"Error reading API keys file: {e}") + return keys + + +def write_api_keys_to_file(keys: Dict[str, Optional[str]], file_path: str = ".env.api-keys") -> bool: + """ + Write API keys to .env.api-keys file. + + Args: + keys: Dictionary of API key values + file_path: Path to the .env.api-keys file + + Returns: + True if successful, False otherwise + """ + try: + # Read template for structure/comments + template_path = f"{file_path}.template" + template_lines = [] + + if os.path.exists(template_path): + with open(template_path, 'r') as f: + template_lines = f.readlines() + + # Build output content + output_lines = [] + + if template_lines: + # Use template structure + for line in template_lines: + stripped = line.strip() + + # Keep comments and empty lines + if not stripped or stripped.startswith('#'): + output_lines.append(line) + continue + + # Parse key=value from template + if '=' in stripped: + key_name = stripped.split('=', 1)[0].strip() + + # Replace with actual values if provided + if key_name == "OPENAI_API_KEY": + value = keys.get("openai_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "DEEPGRAM_API_KEY": + value = keys.get("deepgram_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "MISTRAL_API_KEY": + value = keys.get("mistral_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "HF_TOKEN": + value = keys.get("hf_token", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "LANGFUSE_PUBLIC_KEY": + value = keys.get("langfuse_public_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "LANGFUSE_SECRET_KEY": + value = keys.get("langfuse_secret_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "NGROK_AUTHTOKEN": + value = keys.get("ngrok_authtoken", "") + output_lines.append(f"{key_name}={value}\n") + else: + # Keep other keys from template unchanged + output_lines.append(line) + else: + # No template - create simple format + output_lines.append("# API Keys\n\n") + output_lines.append(f"OPENAI_API_KEY={keys.get('openai_api_key', '')}\n") + output_lines.append(f"DEEPGRAM_API_KEY={keys.get('deepgram_api_key', '')}\n") + output_lines.append(f"MISTRAL_API_KEY={keys.get('mistral_api_key', '')}\n") + output_lines.append(f"HF_TOKEN={keys.get('hf_token', '')}\n") + output_lines.append(f"LANGFUSE_PUBLIC_KEY={keys.get('langfuse_public_key', '')}\n") + output_lines.append(f"LANGFUSE_SECRET_KEY={keys.get('langfuse_secret_key', '')}\n") + output_lines.append(f"NGROK_AUTHTOKEN={keys.get('ngrok_authtoken', '')}\n") + + # Write to file + with open(file_path, 'w') as f: + f.writelines(output_lines) + + logger.info(f"Wrote API keys to {file_path}") + return True + + except Exception as e: + logger.error(f"Error writing API keys file: {e}") + return False diff --git a/backends/advanced/webui/src/pages/Settings.tsx b/backends/advanced/webui/src/pages/Settings.tsx index 880f2b4f..bd3556d9 100644 --- a/backends/advanced/webui/src/pages/Settings.tsx +++ b/backends/advanced/webui/src/pages/Settings.tsx @@ -12,13 +12,15 @@ import { MessageSquare, Mic, Database, - Settings2, Shield, + Brain, + Eye, + EyeOff, } from 'lucide-react' import { useAuth } from '../contexts/AuthContext' import { settingsApi } from '../services/api' -type Tab = 'core-infra' | 'api-keys' | 'mcp-key' | 'memory' | 'speech' | 'conversations' | 'other' +type Tab = 'core-infra' | 'api-keys' | 'mcp-key' | 'memory' | 'llm' | 'speech' | 'conversations' interface Message { type: 'success' | 'error' @@ -39,9 +41,34 @@ export default function Settings() { const [infraStatus, setInfraStatus] = useState(null) const [infraLoading, setInfraLoading] = useState(false) - // API Keys status state - const [apiKeysStatus, setApiKeysStatus] = useState(null) - const [apiKeysLoading, setApiKeysLoading] = useState(false) + // Infrastructure settings state + const [infraSettings, setInfraSettings] = useState(null) + const [infraSettingsOriginal, setInfraSettingsOriginal] = useState(null) + const [infraSettingsLoading, setInfraSettingsLoading] = useState(false) + const [infraSettingsSaving, setInfraSettingsSaving] = useState(false) + + // API Keys settings state + const [apiKeysSettings, setApiKeysSettings] = useState(null) + const [apiKeysSettingsOriginal, setApiKeysSettingsOriginal] = useState(null) + const [apiKeysSettingsLoading, setApiKeysSettingsLoading] = useState(false) + const [apiKeysSettingsSaving, setApiKeysSettingsSaving] = useState(false) + + // API Keys visibility state + const [showApiKeys, setShowApiKeys] = useState>({ + openai_api_key: false, + deepgram_api_key: false, + mistral_api_key: false, + hf_token: false, + langfuse_public_key: false, + langfuse_secret_key: false, + ngrok_authtoken: false, + }) + + // API Keys save options + const [saveToFile, setSaveToFile] = useState(true) + const [saveToDatabase, setSaveToDatabase] = useState(true) + const [apiKeysFilePath, setApiKeysFilePath] = useState('.env.api-keys') + const [loadingFromFile, setLoadingFromFile] = useState(false) // Application settings state const [appSettings, setAppSettings] = useState(null) @@ -54,11 +81,13 @@ export default function Settings() { }, [user]) useEffect(() => { - if (activeTab === 'core-infra' && !infraStatus) { - loadInfrastructureStatus() - } else if (activeTab === 'api-keys' && !apiKeysStatus) { - loadApiKeysStatus() - } else if (['memory', 'speech', 'conversations', 'other'].includes(activeTab) && !appSettings) { + if (activeTab === 'core-infra') { + if (!infraStatus) loadInfrastructureStatus() + if (!infraSettings) loadInfrastructureSettings() + if (!appSettings) loadApplicationSettings() // Load for network & misc settings + } else if (activeTab === 'api-keys') { + if (!apiKeysSettings) loadApiKeysSettings() + } else if (['memory', 'llm', 'speech', 'conversations'].includes(activeTab) && !appSettings) { loadApplicationSettings() } }, [activeTab]) @@ -83,16 +112,101 @@ export default function Settings() { } } - const loadApiKeysStatus = async () => { + const loadInfrastructureSettings = async () => { + try { + setInfraSettingsLoading(true) + const response = await settingsApi.getInfrastructure() + setInfraSettings(response.data) + setInfraSettingsOriginal(response.data) + } catch (error: any) { + console.error('Failed to load infrastructure settings:', error) + showMessage('error', 'Failed to load infrastructure settings') + } finally { + setInfraSettingsLoading(false) + } + } + + const saveInfrastructureSettings = async () => { + try { + setInfraSettingsSaving(true) + await settingsApi.updateInfrastructure(infraSettings) + setInfraSettingsOriginal(infraSettings) + showMessage('success', 'Infrastructure settings saved successfully') + // Reload status to reflect new settings + loadInfrastructureStatus() + } catch (error: any) { + console.error('Failed to save infrastructure settings:', error) + showMessage('error', error.response?.data?.detail || 'Failed to save infrastructure settings') + } finally { + setInfraSettingsSaving(false) + } + } + + const resetInfrastructureSettings = () => { + setInfraSettings({ ...infraSettingsOriginal }) + } + + const loadApiKeysSettings = async () => { + try { + setApiKeysSettingsLoading(true) + const response = await settingsApi.getApiKeys() + setApiKeysSettings(response.data) + setApiKeysSettingsOriginal(response.data) + } catch (error: any) { + console.error('Failed to load API keys settings:', error) + showMessage('error', 'Failed to load API keys settings') + } finally { + setApiKeysSettingsLoading(false) + } + } + + const saveApiKeysSettings = async () => { try { - setApiKeysLoading(true) - const response = await settingsApi.getApiKeysStatus() - setApiKeysStatus(response.data) + setApiKeysSettingsSaving(true) + const response = await settingsApi.saveApiKeys(apiKeysSettings, saveToFile, saveToDatabase) + + if (response.data.success) { + setApiKeysSettingsOriginal(apiKeysSettings) + const savedTo: string[] = [] + if (response.data.saved_to.file) savedTo.push('file') + if (response.data.saved_to.database) savedTo.push('database') + showMessage('success', `API keys saved to ${savedTo.join(' and ')}`) + } else { + showMessage('error', response.data.errors.join(', ') || 'Failed to save API keys') + } } catch (error: any) { - console.error('Failed to load API keys status:', error) - showMessage('error', 'Failed to load API keys status') + console.error('Failed to save API keys:', error) + showMessage('error', error.response?.data?.detail || 'Failed to save API keys') } finally { - setApiKeysLoading(false) + setApiKeysSettingsSaving(false) + } + } + + const resetApiKeysSettings = () => { + setApiKeysSettings({ ...apiKeysSettingsOriginal }) + } + + // Toggle API key visibility + const toggleApiKeyVisibility = (keyName: string) => { + setShowApiKeys(prev => ({ ...prev, [keyName]: !prev[keyName] })) + } + + // Load API keys from file + const loadApiKeysFromFile = async () => { + try { + setLoadingFromFile(true) + const response = await settingsApi.loadApiKeysFromFile(apiKeysFilePath) + + if (response.data) { + setApiKeysSettings(response.data) + setApiKeysSettingsOriginal(response.data) + showMessage('success', `API keys loaded from ${apiKeysFilePath}`) + } + } catch (error: any) { + console.error('Failed to load API keys from file:', error) + showMessage('error', error.response?.data?.detail || 'Failed to load API keys from file') + } finally { + setLoadingFromFile(false) } } @@ -302,9 +416,9 @@ export default function Settings() { { id: 'api-keys' as Tab, label: 'API Keys', icon: Shield, adminOnly: true }, { id: 'mcp-key' as Tab, label: 'MCP Key', icon: Key, adminOnly: false }, { id: 'memory' as Tab, label: 'Memory', icon: Database, adminOnly: true }, + { id: 'llm' as Tab, label: 'LLM', icon: Brain, adminOnly: true }, { id: 'speech' as Tab, label: 'Speech', icon: Mic, adminOnly: true }, { id: 'conversations' as Tab, label: 'Conversations', icon: MessageSquare, adminOnly: true }, - { id: 'other' as Tab, label: 'Other', icon: Settings2, adminOnly: true }, ] return ( @@ -378,77 +492,335 @@ export default function Settings() {

Core Infrastructure

- +
+ +
- {infraLoading && !infraStatus ? ( + {infraSettingsLoading && !infraSettings ? (
-

Loading infrastructure status...

+

Loading infrastructure settings...

- ) : infraStatus ? ( -
- {Object.entries(infraStatus).map(([service, info]: [string, any]) => ( -
-
-

- {service} -

+ ) : infraSettings ? ( +
+ {/* MongoDB */} +
+
+

MongoDB

+ {infraStatus?.mongodb && ( - {info.connected ? 'Connected' : 'Disconnected'} + {infraStatus.mongodb.connected ? 'Connected' : 'Disconnected'} + )} +
+
+
+ + setInfraSettings({ ...infraSettings, mongodb_uri: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="mongodb://mongo:27017" + />
-
- {info.url && ( -

- URL:{' '} - - {info.url} - -

- )} - {info.host && ( -

- Host: {info.host} -

- )} - {info.database && ( -

- Database: {info.database} +

+ + setInfraSettings({ ...infraSettings, mongodb_database: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="friend-lite" + /> +
+
+
+ + {/* Redis */} +
+
+

Redis

+ {infraStatus?.redis && ( + + {infraStatus.redis.connected ? 'Connected' : 'Disconnected'} + + )} +
+
+ + setInfraSettings({ ...infraSettings, redis_url: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="redis://localhost:6379/0" + /> +
+
+ + {/* Qdrant */} +
+
+

Qdrant

+ {infraStatus?.qdrant && ( + + {infraStatus.qdrant.connected ? 'Connected' : 'Disconnected'} + + )} +
+
+
+ + setInfraSettings({ ...infraSettings, qdrant_base_url: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="qdrant" + /> +
+
+ + setInfraSettings({ ...infraSettings, qdrant_port: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="6333" + /> +
+
+
+ + {/* Neo4j */} +
+
+

Neo4j

+ {infraStatus?.neo4j && ( + + {infraStatus.neo4j.connected ? 'Connected' : 'Disconnected'} + + )} +
+
+
+ + setInfraSettings({ ...infraSettings, neo4j_host: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="neo4j-mem0" + /> +
+
+ + setInfraSettings({ ...infraSettings, neo4j_user: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="neo4j" + /> +
+
+
+ + {/* Network Settings */} + {appSettings && ( +
+

Network & Public Access

+
+
+ + setAppSettings({ + ...appSettings, + network: { ...appSettings.network, host_ip: e.target.value } + })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="localhost" + /> +

+ Public IP or hostname for browser access

- )} - {info.user && ( -

- User: {info.user} +

+
+
+ + setAppSettings({ + ...appSettings, + network: { ...appSettings.network, backend_public_port: parseInt(e.target.value) } + })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="8000" + /> +
+
+ + setAppSettings({ + ...appSettings, + network: { ...appSettings.network, webui_port: parseInt(e.target.value) } + })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="5173" + /> +
+
+
+
+ )} + + {/* System Settings */} + {appSettings && ( +
+

System

+
+
+ + setAppSettings({ + ...appSettings, + misc: { ...appSettings.misc, debug_dir: e.target.value } + })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="./data/debug_dir" + /> +

+ Directory for debug files

- )} +
+
+ setAppSettings({ + ...appSettings, + misc: { ...appSettings.misc, langfuse_enable_telemetry: e.target.checked } + })} + className="h-4 w-4 text-blue-600 focus:ring-blue-500 border-gray-300 rounded" + /> + +
- ))} + )} + + {/* Save and Reset buttons */} +
+ + +
) : (

- Failed to load infrastructure status + Failed to load infrastructure settings

+
+

+ Load API keys from a file on the server. Default: .env.api-keys in project root. +

+
+ + {apiKeysSettingsLoading && !apiKeysSettings ? (
-

Loading API keys status...

+

Loading API keys...

- ) : apiKeysStatus ? ( -
- {Object.entries(apiKeysStatus).map(([key, info]: [string, any]) => ( -
+ ) : apiKeysSettings ? ( +
+ {/* OpenAI */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, openai_api_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="sk-..." + /> + +
+

+ For GPT models and embeddings +

+
+ + {/* Deepgram */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, deepgram_api_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="Enter Deepgram API key" + /> + +
+

+ For speech-to-text transcription +

+
+ + {/* Mistral */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, mistral_api_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="Enter Mistral API key" + /> + +
+

+ For Mistral/Voxtral transcription +

+
+ + {/* HuggingFace */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, hf_token: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="hf_..." + /> + +
+

+ For accessing HuggingFace models +

+
+ + {/* Langfuse */} +
+

+ Langfuse (Observability) +

+
+
+ +
+ setApiKeysSettings({ ...apiKeysSettings, langfuse_public_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="pk-lf-..." + /> + +
+
-

{info.name}

-

- - {info.env_var} - -

+ +
+ setApiKeysSettings({ ...apiKeysSettings, langfuse_secret_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="sk-lf-..." + /> + +
- +
+ + {/* Ngrok */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, ngrok_authtoken: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="Enter Ngrok auth token" + /> +
- ))} +

+ For public URL tunneling +

+
+ + {/* Save Options */} +
+

+ Save Options +

+
+ + +
+

+ You can save to file only, database only, or both for redundancy. +

+
+ + {/* Save and Reset buttons */} +
+ + +
) : (
-

Failed to load API keys status

+

+ Failed to load API keys +

+
)}
@@ -644,16 +1285,40 @@ export default function Settings() { {/* Speech Settings */} {activeTab === 'speech' && appSettings && (
-

+

Speech & Audio Settings

-
-

+ {/* Transcription */} +
+

+ Transcription +

+
+ {renderSettingsField( + 'providers', + 'transcription_provider', + appSettings.providers.transcription_provider, + 'Transcription Service', + 'Choose which service to use for speech-to-text', + 'select', + [ + { value: 'auto', label: 'Auto-detect' }, + { value: 'deepgram', label: 'Deepgram' }, + { value: 'mistral', label: 'Mistral' }, + { value: 'parakeet', label: 'Parakeet (Local)' }, + ] + )} +
+
+ + {/* Speech Detection */} +
+

Speech Detection

-
+
{renderSettingsField( 'speech_detection', 'min_words', @@ -681,11 +1346,49 @@ export default function Settings() {
-
-

+ {/* Diarization */} +
+

+ Speaker Diarization +

+
+ {renderSettingsField( + 'diarization', + 'diarization_source', + appSettings.diarization.diarization_source, + 'Diarization Source', + 'Service to use for speaker identification', + 'select', + [ + { value: 'pyannote', label: 'PyAnnote' }, + { value: 'deepgram', label: 'Deepgram' }, + ] + )} + {renderSettingsField( + 'diarization', + 'min_speakers', + appSettings.diarization.min_speakers, + 'Minimum Speakers', + 'Minimum number of speakers to detect', + 'number' + )} + {renderSettingsField( + 'diarization', + 'max_speakers', + appSettings.diarization.max_speakers, + 'Maximum Speakers', + 'Maximum number of speakers to detect', + 'number' + )} +
+
+ + {/* Audio Processing */} +
+

Audio Processing

-
+
{renderSettingsField( 'audio_processing', 'audio_cropping_enabled', @@ -712,34 +1415,15 @@ export default function Settings() { )}
- -
-

- Transcription Provider -

- {renderSettingsField( - 'providers', - 'transcription_provider', - appSettings.providers.transcription_provider, - 'Transcription Service', - 'Choose which service to use for speech-to-text', - 'select', - [ - { value: 'auto', label: 'Auto-detect' }, - { value: 'deepgram', label: 'Deepgram' }, - { value: 'mistral', label: 'Mistral' }, - { value: 'parakeet', label: 'Parakeet (Local)' }, - ] - )} -
-
+
)} - {/* Other Settings */} - {activeTab === 'other' && appSettings && ( + {/* LLM Settings */} + {activeTab === 'llm' && appSettings && (

- Other Settings + LLM Configuration

+ {/* Provider Selection */}
-

- Speaker Diarization -

-
- {renderSettingsField( - 'diarization', - 'diarization_source', - appSettings.diarization.diarization_source, - 'Diarization Source', - 'Service to use for speaker identification', - 'select', - [ - { value: 'pyannote', label: 'PyAnnote' }, - { value: 'deepgram', label: 'Deepgram' }, - ] - )} - {renderSettingsField( - 'diarization', - 'min_speakers', - appSettings.diarization.min_speakers, - 'Minimum Speakers', - 'Minimum number of speakers to detect', - 'number' - )} - {renderSettingsField( - 'diarization', - 'max_speakers', - appSettings.diarization.max_speakers, - 'Maximum Speakers', - 'Maximum number of speakers to detect', - 'number' - )} -
-
- -
-

- LLM Configuration +

+ Provider

{renderSettingsField( @@ -860,85 +1509,96 @@ export default function Settings() { 'llm_provider', appSettings.llm.llm_provider, 'LLM Provider', - 'Language model provider for memory extraction', + 'Language model provider for memory extraction and chat', 'select', [ { value: 'openai', label: 'OpenAI' }, { value: 'ollama', label: 'Ollama' }, ] )} - {renderSettingsField( - 'llm', - 'openai_model', - appSettings.llm.openai_model, - 'OpenAI Model', - 'Model to use for OpenAI requests', - 'text' - )} - {renderSettingsField( - 'llm', - 'chat_temperature', - appSettings.llm.chat_temperature, - 'Chat Temperature', - 'Temperature for chat responses (0.0-2.0)', - 'number' - )}
-
-

- Network & System -

-
- {renderSettingsField( - 'network', - 'host_ip', - appSettings.network.host_ip, - 'Host IP', - 'Public IP or hostname for browser access', - 'text' - )} - {renderSettingsField( - 'network', - 'backend_public_port', - appSettings.network.backend_public_port, - 'Backend Port', - 'Public port for backend API', - 'number' - )} - {renderSettingsField( - 'misc', - 'langfuse_enable_telemetry', - appSettings.misc.langfuse_enable_telemetry, - 'Enable Langfuse Telemetry', - 'Enable telemetry for Langfuse', - 'boolean' - )} + {/* OpenAI Settings */} + {appSettings.llm.llm_provider === 'openai' && ( +
+

+ OpenAI Settings +

+
+ {renderSettingsField( + 'llm', + 'openai_model', + appSettings.llm.openai_model, + 'OpenAI Model', + 'Model to use for general tasks', + 'text' + )} + {renderSettingsField( + 'llm', + 'chat_llm_model', + appSettings.llm.chat_llm_model || '', + 'Chat Model (Optional)', + 'Specific model for chat (defaults to OpenAI model if not set)', + 'text' + )} + {renderSettingsField( + 'llm', + 'chat_temperature', + appSettings.llm.chat_temperature, + 'Chat Temperature', + 'Temperature for chat responses (0.0-2.0)', + 'number' + )} +
-
+ )} + + {/* Ollama Settings */} + {appSettings.llm.llm_provider === 'ollama' && ( +
+

+ Ollama Settings +

+
+ {renderSettingsField( + 'llm', + 'ollama_model', + appSettings.llm.ollama_model || '', + 'Ollama Model', + 'Model name for Ollama', + 'text' + )} + {renderSettingsField( + 'llm', + 'ollama_embedder_model', + appSettings.llm.ollama_embedder_model || '', + 'Ollama Embedder Model', + 'Embedder model name for Ollama', + 'text' + )} +
+
+ )}
)} {/* Loading state for settings tabs */} - {['memory', 'speech', 'conversations', 'other'].includes(activeTab) && + {['memory', 'llm', 'speech', 'conversations'].includes(activeTab) && appSettingsLoading && !appSettings && (
diff --git a/backends/advanced/webui/src/services/api.ts b/backends/advanced/webui/src/services/api.ts index 0c246d53..4ee895db 100644 --- a/backends/advanced/webui/src/services/api.ts +++ b/backends/advanced/webui/src/services/api.ts @@ -303,9 +303,23 @@ export const settingsApi = { getNetwork: () => api.get('/api/settings/network'), updateNetwork: (settings: any) => api.put('/api/settings/network', settings), + getInfrastructure: () => api.get('/api/settings/infrastructure'), + updateInfrastructure: (settings: any) => api.put('/api/settings/infrastructure', settings), + getMisc: () => api.get('/api/settings/misc'), updateMisc: (settings: any) => api.put('/api/settings/misc', settings), + getApiKeys: () => api.get('/api/settings/api-keys'), + updateApiKeys: (settings: any) => api.put('/api/settings/api-keys', settings), + saveApiKeys: (settings: any, saveToFile: boolean = true, saveToDatabase: boolean = true) => + api.post('/api/settings/api-keys/save', settings, { + params: { save_to_file: saveToFile, save_to_database: saveToDatabase } + }), + loadApiKeysFromFile: (filePath: string = '.env.api-keys') => + api.get('/api/settings/api-keys/load-from-file', { + params: { file_path: filePath } + }), + // Cache management invalidateCache: (category?: string) => api.post('/api/settings/cache/invalidate', null, { params: category ? { category } : {} From d8b360da8af9ec30d3e040e0cb092f9a044639d6 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Fri, 5 Dec 2025 00:48:43 +0000 Subject: [PATCH 05/25] Changed name to chronicle # Conflicts: # README-K8S.md # backends/advanced/src/advanced_omi_backend/auth.py # backends/advanced/src/advanced_omi_backend/services/mycelia_sync.py # backends/advanced/webui/package-lock.json # backends/advanced/webui/package.json # quickstart.md # tests/infrastructure/infra_tests.robot # tests/integration/websocket_streaming_tests.robot # Conflicts: # README.md # app/app.json # backends/advanced/src/advanced_omi_backend/app_config.py # backends/advanced/src/advanced_omi_backend/auth.py # backends/advanced/src/advanced_omi_backend/controllers/system_controller.py # backends/advanced/src/advanced_omi_backend/database.py # backends/advanced/src/advanced_omi_backend/models/job.py # backends/advanced/src/advanced_omi_backend/routers/modules/health_routes.py # backends/advanced/src/advanced_omi_backend/services/memory/config.py # backends/advanced/src/advanced_omi_backend/services/memory/service_factory.py # backends/advanced/src/advanced_omi_backend/services/mycelia_sync.py # backends/advanced/webui/src/pages/System.tsx # tests/infrastructure/infra_tests.robot # tests/resources/transcript_verification.robot # tests/setup/test_env.py --- app/app.json | 10 +++++----- app/app/components/DeviceDetails.tsx | 2 +- app/app/components/DeviceListItem.tsx | 2 +- app/app/hooks/useAudioListener.ts | 2 +- app/app/hooks/useDeviceConnection.ts | 2 +- app/app/hooks/useDeviceScanning.ts | 2 +- app/app/index.tsx | 4 ++-- app/package.json | 4 ++-- backends/advanced/webui/package-lock.json | 10 +--------- backends/advanced/webui/package.json | 2 +- 10 files changed, 16 insertions(+), 24 deletions(-) diff --git a/app/app.json b/app/app.json index 66fbb8c2..c2446e12 100644 --- a/app/app.json +++ b/app/app.json @@ -1,7 +1,7 @@ { "expo": { - "name": "friend-lite-app", - "slug": "friend-lite-app", + "name": "chronicle-app", + "slug": "chronicle-app", "version": "1.0.0", "orientation": "portrait", "icon": "./assets/icon.png", @@ -17,9 +17,9 @@ ], "ios": { "supportsTablet": true, - "bundleIdentifier": "com.cupbearer5517.friendlite", + "bundleIdentifier": "com.cupbearer5517.chronicle", "infoPlist": { - "NSMicrophoneUsageDescription": "Friend Lite needs access to your microphone to stream audio to the backend for processing." + "NSMicrophoneUsageDescription": "Chronicle needs access to your microphone to stream audio to the backend for processing." } }, "android": { @@ -27,7 +27,7 @@ "foregroundImage": "./assets/adaptive-icon.png", "backgroundColor": "#ffffff" }, - "package": "com.cupbearer5517.friendlite", + "package": "com.cupbearer5517.chronicle", "permissions": [ "android.permission.BLUETOOTH", "android.permission.BLUETOOTH_ADMIN", diff --git a/app/app/components/DeviceDetails.tsx b/app/app/components/DeviceDetails.tsx index ebf204c3..3bd22b4a 100644 --- a/app/app/components/DeviceDetails.tsx +++ b/app/app/components/DeviceDetails.tsx @@ -1,6 +1,6 @@ import React from 'react'; import { View, Text, TouchableOpacity, StyleSheet, TextInput } from 'react-native'; -import { BleAudioCodec } from 'friend-lite-react-native'; +import { BleAudioCodec } from 'chronicle-react-native'; interface DeviceDetailsProps { // Device Info diff --git a/app/app/components/DeviceListItem.tsx b/app/app/components/DeviceListItem.tsx index a8083035..3da559de 100644 --- a/app/app/components/DeviceListItem.tsx +++ b/app/app/components/DeviceListItem.tsx @@ -1,6 +1,6 @@ import React from 'react'; import { View, Text, TouchableOpacity, StyleSheet } from 'react-native'; -import { OmiDevice } from 'friend-lite-react-native'; +import { OmiDevice } from 'chronicle-react-native'; interface DeviceListItemProps { device: OmiDevice; diff --git a/app/app/hooks/useAudioListener.ts b/app/app/hooks/useAudioListener.ts index 391ed125..1dcf225e 100644 --- a/app/app/hooks/useAudioListener.ts +++ b/app/app/hooks/useAudioListener.ts @@ -1,6 +1,6 @@ import { useState, useRef, useCallback, useEffect } from 'react'; import { Alert } from 'react-native'; -import { OmiConnection } from 'friend-lite-react-native'; +import { OmiConnection } from 'chronicle-react-native'; import { Subscription, ConnectionPriority } from 'react-native-ble-plx'; // OmiConnection might use this type for subscriptions interface UseAudioListener { diff --git a/app/app/hooks/useDeviceConnection.ts b/app/app/hooks/useDeviceConnection.ts index e729169e..964e4d4e 100644 --- a/app/app/hooks/useDeviceConnection.ts +++ b/app/app/hooks/useDeviceConnection.ts @@ -1,6 +1,6 @@ import { useState, useCallback } from 'react'; import { Alert } from 'react-native'; -import { OmiConnection, BleAudioCodec, OmiDevice } from 'friend-lite-react-native'; +import { OmiConnection, BleAudioCodec, OmiDevice } from 'chronicle-react-native'; interface UseDeviceConnection { connectedDevice: OmiDevice | null; diff --git a/app/app/hooks/useDeviceScanning.ts b/app/app/hooks/useDeviceScanning.ts index d7780266..f4c16ff3 100644 --- a/app/app/hooks/useDeviceScanning.ts +++ b/app/app/hooks/useDeviceScanning.ts @@ -1,6 +1,6 @@ import { useState, useEffect, useCallback, useRef } from 'react'; import { BleManager, State as BluetoothState } from 'react-native-ble-plx'; -import { OmiConnection, OmiDevice } from 'friend-lite-react-native'; // Assuming this is the correct import for Omi types +import { OmiConnection, OmiDevice } from 'chronicle-react-native'; // Assuming this is the correct import for Omi types interface UseDeviceScanning { devices: OmiDevice[]; diff --git a/app/app/index.tsx b/app/app/index.tsx index 8bb1234a..2b20cb7b 100644 --- a/app/app/index.tsx +++ b/app/app/index.tsx @@ -1,6 +1,6 @@ import React, { useRef, useCallback, useEffect, useState } from 'react'; import { StyleSheet, Text, View, SafeAreaView, ScrollView, Platform, FlatList, ActivityIndicator, Alert, Switch, Button, TouchableOpacity, KeyboardAvoidingView } from 'react-native'; -import { OmiConnection } from 'friend-lite-react-native'; // OmiDevice also comes from here +import { OmiConnection } from 'chronicle-react-native'; // OmiDevice also comes from here import { State as BluetoothState } from 'react-native-ble-plx'; // Import State from ble-plx // Hooks @@ -521,7 +521,7 @@ export default function App() { contentContainerStyle={styles.content} keyboardShouldPersistTaps="handled" > - Friend Lite + Chronicle {/* Backend Connection - moved to top */} Date: Thu, 11 Dec 2025 14:18:28 +0000 Subject: [PATCH 06/25] changed mobile app package to friend-lite for the moment --- app/app.json | 10 +++++----- app/app/components/DeviceDetails.tsx | 2 +- app/app/components/DeviceListItem.tsx | 2 +- app/app/hooks/useAudioListener.ts | 2 +- app/app/hooks/useDeviceConnection.ts | 2 +- app/app/hooks/useDeviceScanning.ts | 2 +- app/app/index.tsx | 4 ++-- app/package.json | 4 ++-- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/app/app.json b/app/app.json index c2446e12..66fbb8c2 100644 --- a/app/app.json +++ b/app/app.json @@ -1,7 +1,7 @@ { "expo": { - "name": "chronicle-app", - "slug": "chronicle-app", + "name": "friend-lite-app", + "slug": "friend-lite-app", "version": "1.0.0", "orientation": "portrait", "icon": "./assets/icon.png", @@ -17,9 +17,9 @@ ], "ios": { "supportsTablet": true, - "bundleIdentifier": "com.cupbearer5517.chronicle", + "bundleIdentifier": "com.cupbearer5517.friendlite", "infoPlist": { - "NSMicrophoneUsageDescription": "Chronicle needs access to your microphone to stream audio to the backend for processing." + "NSMicrophoneUsageDescription": "Friend Lite needs access to your microphone to stream audio to the backend for processing." } }, "android": { @@ -27,7 +27,7 @@ "foregroundImage": "./assets/adaptive-icon.png", "backgroundColor": "#ffffff" }, - "package": "com.cupbearer5517.chronicle", + "package": "com.cupbearer5517.friendlite", "permissions": [ "android.permission.BLUETOOTH", "android.permission.BLUETOOTH_ADMIN", diff --git a/app/app/components/DeviceDetails.tsx b/app/app/components/DeviceDetails.tsx index 3bd22b4a..ebf204c3 100644 --- a/app/app/components/DeviceDetails.tsx +++ b/app/app/components/DeviceDetails.tsx @@ -1,6 +1,6 @@ import React from 'react'; import { View, Text, TouchableOpacity, StyleSheet, TextInput } from 'react-native'; -import { BleAudioCodec } from 'chronicle-react-native'; +import { BleAudioCodec } from 'friend-lite-react-native'; interface DeviceDetailsProps { // Device Info diff --git a/app/app/components/DeviceListItem.tsx b/app/app/components/DeviceListItem.tsx index 3da559de..a8083035 100644 --- a/app/app/components/DeviceListItem.tsx +++ b/app/app/components/DeviceListItem.tsx @@ -1,6 +1,6 @@ import React from 'react'; import { View, Text, TouchableOpacity, StyleSheet } from 'react-native'; -import { OmiDevice } from 'chronicle-react-native'; +import { OmiDevice } from 'friend-lite-react-native'; interface DeviceListItemProps { device: OmiDevice; diff --git a/app/app/hooks/useAudioListener.ts b/app/app/hooks/useAudioListener.ts index 1dcf225e..391ed125 100644 --- a/app/app/hooks/useAudioListener.ts +++ b/app/app/hooks/useAudioListener.ts @@ -1,6 +1,6 @@ import { useState, useRef, useCallback, useEffect } from 'react'; import { Alert } from 'react-native'; -import { OmiConnection } from 'chronicle-react-native'; +import { OmiConnection } from 'friend-lite-react-native'; import { Subscription, ConnectionPriority } from 'react-native-ble-plx'; // OmiConnection might use this type for subscriptions interface UseAudioListener { diff --git a/app/app/hooks/useDeviceConnection.ts b/app/app/hooks/useDeviceConnection.ts index 964e4d4e..e729169e 100644 --- a/app/app/hooks/useDeviceConnection.ts +++ b/app/app/hooks/useDeviceConnection.ts @@ -1,6 +1,6 @@ import { useState, useCallback } from 'react'; import { Alert } from 'react-native'; -import { OmiConnection, BleAudioCodec, OmiDevice } from 'chronicle-react-native'; +import { OmiConnection, BleAudioCodec, OmiDevice } from 'friend-lite-react-native'; interface UseDeviceConnection { connectedDevice: OmiDevice | null; diff --git a/app/app/hooks/useDeviceScanning.ts b/app/app/hooks/useDeviceScanning.ts index f4c16ff3..d7780266 100644 --- a/app/app/hooks/useDeviceScanning.ts +++ b/app/app/hooks/useDeviceScanning.ts @@ -1,6 +1,6 @@ import { useState, useEffect, useCallback, useRef } from 'react'; import { BleManager, State as BluetoothState } from 'react-native-ble-plx'; -import { OmiConnection, OmiDevice } from 'chronicle-react-native'; // Assuming this is the correct import for Omi types +import { OmiConnection, OmiDevice } from 'friend-lite-react-native'; // Assuming this is the correct import for Omi types interface UseDeviceScanning { devices: OmiDevice[]; diff --git a/app/app/index.tsx b/app/app/index.tsx index 2b20cb7b..8bb1234a 100644 --- a/app/app/index.tsx +++ b/app/app/index.tsx @@ -1,6 +1,6 @@ import React, { useRef, useCallback, useEffect, useState } from 'react'; import { StyleSheet, Text, View, SafeAreaView, ScrollView, Platform, FlatList, ActivityIndicator, Alert, Switch, Button, TouchableOpacity, KeyboardAvoidingView } from 'react-native'; -import { OmiConnection } from 'chronicle-react-native'; // OmiDevice also comes from here +import { OmiConnection } from 'friend-lite-react-native'; // OmiDevice also comes from here import { State as BluetoothState } from 'react-native-ble-plx'; // Import State from ble-plx // Hooks @@ -521,7 +521,7 @@ export default function App() { contentContainerStyle={styles.content} keyboardShouldPersistTaps="handled" > - Chronicle + Friend Lite {/* Backend Connection - moved to top */} Date: Thu, 11 Dec 2025 15:13:37 +0000 Subject: [PATCH 07/25] rabbit aI fixes --- backends/advanced/webui/package-lock.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/backends/advanced/webui/package-lock.json b/backends/advanced/webui/package-lock.json index 5b800a48..1090d0bb 100644 --- a/backends/advanced/webui/package-lock.json +++ b/backends/advanced/webui/package-lock.json @@ -20,6 +20,7 @@ }, "devDependencies": { "@types/d3": "^7.4.3", + "@types/frappe-gantt": "^0.9.0", "@types/react": "^18.2.43", "@types/react-dom": "^18.2.17", "@types/react-vertical-timeline-component": "^3.3.6", @@ -1990,6 +1991,13 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/frappe-gantt": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@types/frappe-gantt/-/frappe-gantt-0.9.0.tgz", + "integrity": "sha512-n00ElvRvJ1/+HkJwt57yjnTtAM7FcH/pEV9LbRCy3+hR39TY6l0mQuy4o909uxvw97aCNhQjNh8J8xACKJ2G3w==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/geojson": { "version": "7946.0.16", "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz", From 98c3498ac8209be60a3ebc0febb19ffb21eda593 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Mon, 17 Nov 2025 18:19:11 +0000 Subject: [PATCH 08/25] Added mcp server and auth/apikey --- .../src/advanced_omi_backend/app_factory.py | 5 + .../src/advanced_omi_backend/models/user.py | 5 + .../routers/modules/user_routes.py | 45 +- .../services/mcp_server.py | 532 ++++++++++++++++++ backends/advanced/webui/src/App.tsx | 6 + .../webui/src/components/layout/Layout.tsx | 3 +- .../webui/src/contexts/AuthContext.tsx | 2 + backends/advanced/webui/src/services/api.ts | 10 +- 8 files changed, 600 insertions(+), 8 deletions(-) create mode 100644 backends/advanced/src/advanced_omi_backend/services/mcp_server.py diff --git a/backends/advanced/src/advanced_omi_backend/app_factory.py b/backends/advanced/src/advanced_omi_backend/app_factory.py index 1eba0df6..fdde55de 100644 --- a/backends/advanced/src/advanced_omi_backend/app_factory.py +++ b/backends/advanced/src/advanced_omi_backend/app_factory.py @@ -37,6 +37,7 @@ from advanced_omi_backend.routers.modules.websocket_routes import router as websocket_router from advanced_omi_backend.services.audio_service import get_audio_stream_service from advanced_omi_backend.task_manager import init_task_manager, get_task_manager +from advanced_omi_backend.services.mcp_server import setup_mcp_server logger = logging.getLogger(__name__) application_logger = logging.getLogger("audio_processing") @@ -215,6 +216,10 @@ def create_app() -> FastAPI: tags=["users"], ) + # Setup MCP server for conversation access + setup_mcp_server(app) + logger.info("MCP server configured for conversation access") + # Mount static files LAST (mounts are catch-all patterns) CHUNK_DIR = Path("/app/audio_chunks") app.mount("/audio", StaticFiles(directory=CHUNK_DIR), name="audio") diff --git a/backends/advanced/src/advanced_omi_backend/models/user.py b/backends/advanced/src/advanced_omi_backend/models/user.py index b0ced195..7998c5b3 100644 --- a/backends/advanced/src/advanced_omi_backend/models/user.py +++ b/backends/advanced/src/advanced_omi_backend/models/user.py @@ -25,6 +25,8 @@ class UserRead(BaseUser[PydanticObjectId]): display_name: Optional[str] = None registered_clients: dict[str, dict] = Field(default_factory=dict) primary_speakers: list[dict] = Field(default_factory=list) + api_key: Optional[str] = None + api_key_created_at: Optional[datetime] = None class UserUpdate(BaseUserUpdate): @@ -62,6 +64,9 @@ class User(BeanieBaseUser, Document): registered_clients: dict[str, dict] = Field(default_factory=dict) # Speaker processing filter configuration primary_speakers: list[dict] = Field(default_factory=list) + # API key for MCP access + api_key: Optional[str] = None + api_key_created_at: Optional[datetime] = None class Settings: name = "users" # Collection name in MongoDB - standardized from "fastapi_users" diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py index 12ed5c63..233ddd68 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py @@ -5,10 +5,12 @@ """ import logging +import secrets +from datetime import UTC, datetime -from fastapi import APIRouter, Depends +from fastapi import APIRouter, Depends, HTTPException -from advanced_omi_backend.auth import current_superuser +from advanced_omi_backend.auth import current_active_user, current_superuser from advanced_omi_backend.controllers import user_controller from advanced_omi_backend.users import User, UserCreate, UserUpdate @@ -44,3 +46,42 @@ async def delete_user( ): """Delete a user and optionally their associated data. Admin only.""" return await user_controller.delete_user(user_id, delete_conversations, delete_memories) + + +@router.post("/me/api-key") +async def generate_api_key(current_user: User = Depends(current_active_user)): + """Generate a new API key for the current user.""" + try: + # Generate a secure random API key (32 bytes = 64 hex characters) + new_api_key = secrets.token_urlsafe(32) + + # Update user with new API key + current_user.api_key = new_api_key + current_user.api_key_created_at = datetime.now(UTC) + await current_user.save() + + logger.info(f"Generated new API key for user {current_user.id}") + + return { + "api_key": new_api_key, + "created_at": current_user.api_key_created_at.isoformat() + } + except Exception as e: + logger.error(f"Failed to generate API key for user {current_user.id}: {e}") + raise HTTPException(status_code=500, detail="Failed to generate API key") + + +@router.delete("/me/api-key") +async def revoke_api_key(current_user: User = Depends(current_active_user)): + """Revoke the current user's API key.""" + try: + current_user.api_key = None + current_user.api_key_created_at = None + await current_user.save() + + logger.info(f"Revoked API key for user {current_user.id}") + + return {"status": "success", "message": "API key revoked"} + except Exception as e: + logger.error(f"Failed to revoke API key for user {current_user.id}: {e}") + raise HTTPException(status_code=500, detail="Failed to revoke API key") diff --git a/backends/advanced/src/advanced_omi_backend/services/mcp_server.py b/backends/advanced/src/advanced_omi_backend/services/mcp_server.py new file mode 100644 index 00000000..27288599 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/services/mcp_server.py @@ -0,0 +1,532 @@ +""" +MCP Server for Friend-Lite conversations. + +This module implements an MCP (Model Context Protocol) server that provides +conversation access tools for LLMs to retrieve conversation data, transcripts, +and audio files. + +Key features: +- List conversations with filtering and pagination +- Get detailed conversation data including transcripts and segments +- Access conversation audio files as resources +- User-scoped access with proper authentication +""" + +import base64 +import contextvars +import json +import logging +from pathlib import Path +from typing import Optional, List + +from fastapi import FastAPI, Request +from fastapi.routing import APIRouter +from mcp.server.fastmcp import FastMCP +from mcp.server.sse import SseServerTransport + +from advanced_omi_backend.config import CHUNK_DIR +from advanced_omi_backend.models.conversation import Conversation +from advanced_omi_backend.models.user import User + +logger = logging.getLogger(__name__) + +# Initialize MCP +mcp = FastMCP("friend-lite-conversations") + +# Context variables for user_id +user_id_var: contextvars.ContextVar[str] = contextvars.ContextVar("user_id") + +# Create a router for MCP endpoints +mcp_router = APIRouter(prefix="/mcp") + +# Initialize SSE transport +sse = SseServerTransport("/mcp/messages/") + + +async def resolve_user_identifier(identifier: str) -> Optional[str]: + """ + Resolve a user identifier (email or user_id) to a user_id. + + Args: + identifier: Either an email address or a MongoDB ObjectId string + + Returns: + User ID string if found, None otherwise + """ + try: + # First try to find by email (case-insensitive) + user = await User.find_one(User.email == identifier.lower()) + if user: + logger.info(f"Resolved email '{identifier}' to user_id: {user.id}") + return str(user.id) + + # If not found by email, assume it's already a user_id + # Verify it exists + from bson import ObjectId + try: + user = await User.find_one(User.id == ObjectId(identifier)) + if user: + logger.info(f"Verified user_id: {identifier}") + return str(user.id) + except: + pass + + logger.warning(f"Could not resolve user identifier: {identifier}") + return None + except Exception as e: + logger.error(f"Error resolving user identifier '{identifier}': {e}") + return None + + +@mcp.tool(description="List all conversations. Returns conversation_id, title, summary, created_at, client_id, segment_count, memory_count, and has_audio. Supports date filtering and pagination.") +async def list_conversations( + limit: int = 20, + offset: int = 0, + order_by: str = "created_at_desc", + start_date: Optional[str] = None, + end_date: Optional[str] = None +) -> str: + """ + List conversations with optional date filtering. + + Args: + limit: Maximum number of conversations to return (default: 20, max: 100) + offset: Number of conversations to skip for pagination (default: 0) + order_by: Sort order - "created_at_desc" (newest first) or "created_at_asc" (oldest first) + start_date: Optional ISO 8601 date string (e.g., "2025-01-01T00:00:00Z") - filter conversations after this date + end_date: Optional ISO 8601 date string (e.g., "2025-12-31T23:59:59Z") - filter conversations before this date + + Returns: + JSON string with list of conversations and pagination info + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Validate and limit parameters + limit = min(max(1, limit), 100) # Clamp between 1 and 100 + offset = max(0, offset) + + # Build base query + # If uid is "all", return all conversations (temporary for development) + # In the future, this will filter by speaker identity + if uid == "all": + query = Conversation.find_all() + else: + query = Conversation.find(Conversation.user_id == uid) + + # Apply date filtering if provided + from datetime import datetime + + if start_date: + try: + start_dt = datetime.fromisoformat(start_date.replace('Z', '+00:00')) + query = query.find(Conversation.start_datetime >= start_dt) + except ValueError as e: + logger.warning(f"Invalid start_date format: {start_date}, error: {e}") + return json.dumps({"error": f"Invalid start_date format: {start_date}. Use ISO 8601 format."}, indent=2) + + if end_date: + try: + end_dt = datetime.fromisoformat(end_date.replace('Z', '+00:00')) + query = query.find(Conversation.start_datetime <= end_dt) + except ValueError as e: + logger.warning(f"Invalid end_date format: {end_date}, error: {e}") + return json.dumps({"error": f"Invalid end_date format: {end_date}. Use ISO 8601 format."}, indent=2) + + # Get total count with same filters + total_count = await query.count() + + # Apply sorting + if order_by == "created_at_asc": + query = query.sort(Conversation.start_datetime) + else: # Default to newest first + query = query.sort(-Conversation.start_datetime) + + # Apply pagination + conversations = await query.skip(offset).limit(limit).to_list() + + # Format conversations for response + formatted_convs = [] + for conv in conversations: + + formatted_convs.append({ + "conversation_id": conv.conversation_id, + "title": conv.title, + "summary": conv.summary, + "start_datetime": conv.start_datetime.isoformat(), + "end_datetime": conv.end_datetime.isoformat() if conv.end_datetime else None, + "segment_count": len(conv.segments), + "memory_count": conv.memory_count, + "client_id": conv.client_id, + }) + + + result = { + "conversations": formatted_convs, + "pagination": { + "total": total_count, + "limit": limit, + "offset": offset, + "returned": len(formatted_convs), + "has_more": (offset + len(formatted_convs)) < total_count + } + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error listing conversations: {e}") + return json.dumps({"error": f"Failed to list conversations: {str(e)}"}, indent=2) + + +@mcp.tool(description="Get detailed information about a specific conversation including full transcript, speaker segments, memories, and version history. Use the conversation_id from list_conversations.") +async def get_conversation(conversation_id: str) -> str: + """ + Get detailed conversation data. + + Args: + conversation_id: The unique conversation identifier + + Returns: + JSON string with complete conversation details + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Format conversation data with explicit fields + conv_data = { + # Core identifiers + "conversation_id": conversation.conversation_id, + "audio_uuid": conversation.audio_uuid, + "user_id": conversation.user_id, + "client_id": conversation.client_id, + + # Metadata + "start_datetime": conversation.start_datetime.isoformat(), + "end_datetime": conversation.end_datetime.isoformat() if conversation.end_datetime else None, + "title": conversation.title, + "summary": conversation.summary, + # "detailed_summary": conversation.detailed_summary, + + # Transcript data + "transcript": conversation.transcript, + + # Memory data + "memory_count": conversation.memory_count, + + # Audio paths + "has_audio": bool(conversation.audio_path), + "has_cropped_audio": bool(conversation.cropped_audio_path), + + # Version information + "active_transcript_version": conversation.active_transcript_version, + "active_memory_version": conversation.active_memory_version, + "transcript_versions_count": len(conversation.transcript_versions), + "memory_versions_count": len(conversation.memory_versions) + } + + return json.dumps(conv_data, indent=2) + + except Exception as e: + logger.exception(f"Error getting conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get conversation: {str(e)}"}, indent=2) + + +@mcp.tool(description="Get speaker segments from a conversation. Returns detailed timing and speaker information for each segment of the transcript.") +async def get_segments_from_conversation(conversation_id: str) -> str: + """ + Get speaker segments from a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + JSON string with speaker segments including timing and text + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Format segments + segments_data = { + "conversation_id": conversation_id, + "segment_count": len(conversation.segments), + "segments": [ + { + "start": seg.start, + "end": seg.end, + "duration": seg.end - seg.start, + "text": seg.text, + "speaker": seg.speaker, + "confidence": seg.confidence + } for seg in conversation.segments + ] + } + + return json.dumps(segments_data, indent=2) + + except Exception as e: + logger.exception(f"Error getting segments for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get segments: {str(e)}"}, indent=2) + + +@mcp.resource(uri="conversation://{conversation_id}/audio", name="Conversation Audio", description="Get the audio file for a conversation") +async def get_conversation_audio(conversation_id: str) -> str: + """ + Get audio file for a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + Base64-encoded audio data with metadata + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Default to regular audio (not cropped) + audio_type = "audio" + + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Get the appropriate audio path + if audio_type == "cropped_audio": + audio_path = conversation.cropped_audio_path + if not audio_path: + return json.dumps({"error": "No cropped audio available for this conversation"}, indent=2) + else: # Default to regular audio + audio_path = conversation.audio_path + if not audio_path: + return json.dumps({"error": "No audio file available for this conversation"}, indent=2) + + # Resolve full path + full_path = CHUNK_DIR / audio_path + + if not full_path.exists(): + return json.dumps({"error": f"Audio file not found at path: {audio_path}"}, indent=2) + + # Read and encode audio file + with open(full_path, "rb") as f: + audio_data = f.read() + + audio_base64 = base64.b64encode(audio_data).decode('utf-8') + + result = { + "conversation_id": conversation_id, + "audio_type": audio_type, + "file_path": str(audio_path), + "file_size_bytes": len(audio_data), + "mime_type": "audio/wav", # Friend-Lite stores audio as WAV + "audio_base64": audio_base64 + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error getting audio for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get audio: {str(e)}"}, indent=2) + + +@mcp.resource(uri="conversation://{conversation_id}/cropped_audio", name="Conversation Cropped Audio", description="Get the cropped (speech-only) audio file for a conversation") +async def get_conversation_cropped_audio(conversation_id: str) -> str: + """ + Get cropped audio file for a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + Base64-encoded cropped audio data with metadata + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Get cropped audio path + audio_path = conversation.cropped_audio_path + if not audio_path: + return json.dumps({"error": "No cropped audio available for this conversation"}, indent=2) + + # Resolve full path + full_path = CHUNK_DIR / audio_path + + if not full_path.exists(): + return json.dumps({"error": f"Audio file not found at path: {audio_path}"}, indent=2) + + # Read and encode audio file + with open(full_path, "rb") as f: + audio_data = f.read() + + audio_base64 = base64.b64encode(audio_data).decode('utf-8') + + result = { + "conversation_id": conversation_id, + "audio_type": "cropped_audio", + "file_path": str(audio_path), + "file_size_bytes": len(audio_data), + "mime_type": "audio/wav", + "audio_base64": audio_base64 + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error getting cropped audio for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get cropped audio: {str(e)}"}, indent=2) + + +@mcp_router.get("/conversations/sse") +async def handle_sse(request: Request): + """ + Handle SSE connections with Bearer token authentication. + + The access token should be provided in the Authorization header: + Authorization: Bearer + + Note: For development, this bypasses user authentication and returns all conversations. + In the future, this will validate speaker identity from conversations. + """ + from fastapi.responses import JSONResponse + + # Extract access token from Authorization header + auth_header = request.headers.get("authorization") + if not auth_header: + logger.error("No Authorization header provided") + return JSONResponse( + status_code=401, + content={"error": "Authorization header required. Use: Authorization: Bearer "} + ) + + # Parse Bearer token + parts = auth_header.split() + if len(parts) != 2 or parts[0].lower() != "bearer": + logger.error(f"Invalid Authorization header format: {auth_header}") + return JSONResponse( + status_code=401, + content={"error": "Invalid Authorization header. Use format: Authorization: Bearer "} + ) + + access_token = parts[1] + if not access_token: + logger.error("Empty access token") + return JSONResponse( + status_code=401, + content={"error": "Access token cannot be empty"} + ) + + # For now, use "all" as the user_id to bypass filtering + # This will be replaced with speaker-based permissions later + logger.info(f"MCP connection established with access token: {access_token[:min(8, len(access_token))]}...") + user_token = user_id_var.set("all") + + try: + # Handle SSE connection + async with sse.connect_sse( + request.scope, + request.receive, + request._send, + ) as (read_stream, write_stream): + await mcp._mcp_server.run( + read_stream, + write_stream, + mcp._mcp_server.create_initialization_options(), + ) + finally: + # Clean up context variables + user_id_var.reset(user_token) + + +@mcp_router.post("/messages/") +async def handle_get_message(request: Request): + return await handle_post_message(request) + + +@mcp_router.post("/conversations/sse/{user_id}/messages/") +async def handle_post_message_with_user(request: Request): + return await handle_post_message(request) + + +async def handle_post_message(request: Request): + """Handle POST messages for SSE""" + try: + body = await request.body() + + # Create a simple receive function that returns the body + async def receive(): + return {"type": "http.request", "body": body, "more_body": False} + + # Create a simple send function that does nothing + async def send(message): + return {} + + # Call handle_post_message with the correct arguments + await sse.handle_post_message(request.scope, receive, send) + + # Return a success response + return {"status": "ok"} + finally: + pass + + +def setup_mcp_server(app: FastAPI): + """Setup MCP server with the FastAPI application""" + mcp._mcp_server.name = "friend-lite-conversations" + + # Include MCP router in the FastAPI app + app.include_router(mcp_router) + + logger.info("Friend-Lite MCP server initialized with conversation tools") diff --git a/backends/advanced/webui/src/App.tsx b/backends/advanced/webui/src/App.tsx index fca59623..4c9add41 100644 --- a/backends/advanced/webui/src/App.tsx +++ b/backends/advanced/webui/src/App.tsx @@ -13,6 +13,7 @@ import System from './pages/System' import Upload from './pages/Upload' import Queue from './pages/Queue' import LiveRecord from './pages/LiveRecord' +import Settings from './pages/Settings' import ProtectedRoute from './components/auth/ProtectedRoute' import { ErrorBoundary, PageErrorBoundary } from './components/ErrorBoundary' @@ -89,6 +90,11 @@ function App() { } /> + + + + } /> diff --git a/backends/advanced/webui/src/components/layout/Layout.tsx b/backends/advanced/webui/src/components/layout/Layout.tsx index 5995f823..83a161ab 100644 --- a/backends/advanced/webui/src/components/layout/Layout.tsx +++ b/backends/advanced/webui/src/components/layout/Layout.tsx @@ -15,10 +15,11 @@ export default function Layout() { { path: '/memories', label: 'Memories', icon: Brain }, { path: '/timeline', label: 'Timeline', icon: Calendar }, { path: '/users', label: 'User Management', icon: Users }, + { path: '/settings', label: 'Settings', icon: Settings }, ...(isAdmin ? [ { path: '/upload', label: 'Upload Audio', icon: Upload }, { path: '/queue', label: 'Queue Management', icon: Layers }, - { path: '/system', label: 'System State', icon: Settings }, + { path: '/system', label: 'System State', icon: Shield }, ] : []), ] diff --git a/backends/advanced/webui/src/contexts/AuthContext.tsx b/backends/advanced/webui/src/contexts/AuthContext.tsx index 7745e871..97a5b42c 100644 --- a/backends/advanced/webui/src/contexts/AuthContext.tsx +++ b/backends/advanced/webui/src/contexts/AuthContext.tsx @@ -7,6 +7,8 @@ interface User { name: string email: string is_superuser: boolean + api_key?: string + api_key_created_at?: string } interface AuthContextType { diff --git a/backends/advanced/webui/src/services/api.ts b/backends/advanced/webui/src/services/api.ts index 4ee895db..323bd369 100644 --- a/backends/advanced/webui/src/services/api.ts +++ b/backends/advanced/webui/src/services/api.ts @@ -258,14 +258,14 @@ export const chatApi = { export const speakerApi = { // Get current user's speaker configuration getSpeakerConfiguration: () => api.get('/api/speaker-configuration'), - + // Update current user's speaker configuration - updateSpeakerConfiguration: (primarySpeakers: Array<{speaker_id: string, name: string, user_id: number}>) => + updateSpeakerConfiguration: (primarySpeakers: Array<{speaker_id: string, name: string, user_id: number}>) => api.post('/api/speaker-configuration', primarySpeakers), - - // Get enrolled speakers from speaker recognition service + + // Get enrolled speakers from speaker recognition service getEnrolledSpeakers: () => api.get('/api/enrolled-speakers'), - + // Check speaker service status (admin only) getSpeakerServiceStatus: () => api.get('/api/speaker-service-status'), } From 5b04ea42be11507eca4109ec0fe9ec51103b1694 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Thu, 18 Dec 2025 19:08:32 +0000 Subject: [PATCH 09/25] fix test env --- backends/advanced/docker-compose.yml | 3 ++- tests/infrastructure/infra_tests.robot | 3 ++- tests/setup/test_env.py | 18 ------------------ 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/backends/advanced/docker-compose.yml b/backends/advanced/docker-compose.yml index 8d4bc42f..5f84d1d3 100644 --- a/backends/advanced/docker-compose.yml +++ b/backends/advanced/docker-compose.yml @@ -224,9 +224,10 @@ services: # Shared network for cross-project communication networks: - default: + chronicle-network: name: chronicle-network + volumes: ollama_data: driver: local diff --git a/tests/infrastructure/infra_tests.robot b/tests/infrastructure/infra_tests.robot index 48b1a057..bc4dd286 100644 --- a/tests/infrastructure/infra_tests.robot +++ b/tests/infrastructure/infra_tests.robot @@ -163,7 +163,8 @@ Worker Registration Loss Detection Test END # Cleanup: Always restart workers after this test to ensure subsequent tests work - [Teardown] Run Keywords + [Teardown] + ... Run Keywords ... Log To Console \n๐Ÿงน Cleanup: Restarting workers for subsequent tests ... AND Restart Workers Container diff --git a/tests/setup/test_env.py b/tests/setup/test_env.py index 7e3ca983..8af9777d 100644 --- a/tests/setup/test_env.py +++ b/tests/setup/test_env.py @@ -3,24 +3,6 @@ from pathlib import Path from dotenv import load_dotenv -# Load .env file from backends/advanced directory if it exists -# This allows tests to work when run from VSCode or command line -# def load_env_file(): -# """Load environment variables from .env file if it exists.""" -# # Look for .env in backends/advanced directory -# env_file = Path(__file__).parent.parent.parent / "backends" / "advanced" / ".env" -# if env_file.exists(): -# with open(env_file) as f: -# for line in f: -# line = line.strip() -# if line and not line.startswith('#') and '=' in line: -# key, value = line.split('=', 1) -# # Only set if not already in environment (CI takes precedence) -# if key not in os.environ: -# os.environ[key] = value - -# Load .env file (CI environment variables take precedence) -# load_env_file() # Load .env from backends/advanced directory to get COMPOSE_PROJECT_NAME backend_env_path = Path(__file__).resolve().parents[2] / "backends" / "advanced" / ".env" From 26cd5b74e05391071cb6832c893a9fa428ece221 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 08:22:05 +0000 Subject: [PATCH 10/25] added record button to header --- .../components/header/HeaderRecordButton.tsx | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 backends/advanced/webui/src/components/header/HeaderRecordButton.tsx diff --git a/backends/advanced/webui/src/components/header/HeaderRecordButton.tsx b/backends/advanced/webui/src/components/header/HeaderRecordButton.tsx new file mode 100644 index 00000000..491f2f95 --- /dev/null +++ b/backends/advanced/webui/src/components/header/HeaderRecordButton.tsx @@ -0,0 +1,124 @@ +import { useEffect, useRef } from 'react' +import { Mic, Square } from 'lucide-react' +import { useSimpleAudioRecording } from '../../hooks/useSimpleAudioRecording' + +export default function HeaderRecordButton() { + const recording = useSimpleAudioRecording() + const canvasRef = useRef(null) + const animationRef = useRef() + + // Waveform visualization + useEffect(() => { + if (!recording.isRecording || !recording.analyser || !canvasRef.current) { + // Clear animation when not recording + if (animationRef.current) { + cancelAnimationFrame(animationRef.current) + } + // Clear canvas + if (canvasRef.current) { + const canvas = canvasRef.current + const ctx = canvas.getContext('2d') + if (ctx) { + ctx.clearRect(0, 0, canvas.width, canvas.height) + } + } + return + } + + const canvas = canvasRef.current + const ctx = canvas.getContext('2d') + if (!ctx) return + + const analyser = recording.analyser + analyser.fftSize = 32 // Smaller for compact visualization + const bufferLength = analyser.frequencyBinCount + const dataArray = new Uint8Array(bufferLength) + + const draw = () => { + animationRef.current = requestAnimationFrame(draw) + + analyser.getByteFrequencyData(dataArray) + + // Clear canvas + ctx.clearRect(0, 0, canvas.width, canvas.height) + + const barWidth = canvas.width / bufferLength + let x = 0 + + for (let i = 0; i < bufferLength; i++) { + const barHeight = (dataArray[i] / 255) * canvas.height * 0.8 + + // Gradient color based on intensity + const intensity = dataArray[i] / 255 + const r = Math.floor(59 + intensity * 40) + const g = Math.floor(130 + intensity * 70) + const b = Math.floor(246 - intensity * 50) + + ctx.fillStyle = `rgb(${r}, ${g}, ${b})` + ctx.fillRect(x, canvas.height - barHeight, barWidth - 1, barHeight) + + x += barWidth + } + } + + draw() + + return () => { + if (animationRef.current) { + cancelAnimationFrame(animationRef.current) + } + } + }, [recording.isRecording, recording.analyser]) + + const handleClick = async () => { + if (recording.isRecording) { + recording.stopRecording() + } else { + await recording.startRecording() + } + } + + return ( + + ) +} From 083002d39dc52b265673ef0bc52840f8f79001bc Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 08:22:53 +0000 Subject: [PATCH 11/25] made dark mode default --- backends/advanced/webui/src/contexts/ThemeContext.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backends/advanced/webui/src/contexts/ThemeContext.tsx b/backends/advanced/webui/src/contexts/ThemeContext.tsx index 9f63b503..4c76725f 100644 --- a/backends/advanced/webui/src/contexts/ThemeContext.tsx +++ b/backends/advanced/webui/src/contexts/ThemeContext.tsx @@ -10,7 +10,8 @@ const ThemeContext = createContext(undefined) export function ThemeProvider({ children }: { children: ReactNode }) { const [isDark, setIsDark] = useState(() => { const saved = localStorage.getItem('theme') - return saved ? saved === 'dark' : window.matchMedia('(prefers-color-scheme: dark)').matches + // Default to dark mode if no preference is saved + return saved ? saved === 'dark' : true }) useEffect(() => { From bd0fe83c52067f1e7964ce9ce9de29e5035e53ad Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 08:23:33 +0000 Subject: [PATCH 12/25] Various design and UI polish --- .../webui/src/components/layout/Layout.tsx | 248 ++++++++++++++---- backends/advanced/webui/src/index.css | 164 +++++++++++- backends/advanced/webui/src/pages/Chat.tsx | 144 +++++----- .../webui/src/pages/Conversations.tsx | 107 ++++++-- .../webui/src/pages/ConversationsRouter.tsx | 34 +-- .../webui/src/pages/ConversationsTimeline.tsx | 39 ++- .../advanced/webui/src/pages/LiveRecord.tsx | 33 ++- .../advanced/webui/src/pages/LoginPage.tsx | 137 ++++++---- backends/advanced/webui/src/pages/Queue.tsx | 184 ++++++------- backends/advanced/webui/tailwind.config.js | 195 +++++++++++++- 10 files changed, 953 insertions(+), 332 deletions(-) diff --git a/backends/advanced/webui/src/components/layout/Layout.tsx b/backends/advanced/webui/src/components/layout/Layout.tsx index 83a161ab..ab0d7dc2 100644 --- a/backends/advanced/webui/src/components/layout/Layout.tsx +++ b/backends/advanced/webui/src/components/layout/Layout.tsx @@ -1,12 +1,28 @@ import { Link, useLocation, Outlet } from 'react-router-dom' -import { Music, MessageSquare, MessageCircle, Brain, Users, Upload, Settings, LogOut, Sun, Moon, Shield, Radio, Layers, Calendar } from 'lucide-react' +import { useState, useRef, useEffect } from 'react' +import { MessageSquare, MessageCircle, Brain, Users, Upload, Settings, LogOut, Sun, Moon, Shield, Radio, Layers, Calendar, Search, Bell, User, ChevronDown } from 'lucide-react' import { useAuth } from '../../contexts/AuthContext' import { useTheme } from '../../contexts/ThemeContext' +import HeaderRecordButton from '../header/HeaderRecordButton' export default function Layout() { const location = useLocation() const { user, logout, isAdmin } = useAuth() const { isDark, toggleTheme } = useTheme() + const [userMenuOpen, setUserMenuOpen] = useState(false) + const [searchQuery, setSearchQuery] = useState('') + const userMenuRef = useRef(null) + + // Close dropdown when clicking outside + useEffect(() => { + function handleClickOutside(event: MouseEvent) { + if (userMenuRef.current && !userMenuRef.current.contains(event.target as Node)) { + setUserMenuOpen(false) + } + } + document.addEventListener('mousedown', handleClickOutside) + return () => document.removeEventListener('mousedown', handleClickOutside) + }, []) const navigationItems = [ { path: '/live-record', label: 'Live Record', icon: Radio }, @@ -24,75 +40,202 @@ export default function Layout() { ] return ( -
+
{/* Header */} -
-
+
+
-
- -

- Chronicle Dashboard -

+ {/* Logo & Brand */} +
+
+ +
+
+

+ Chronicle +

+

AI Memory System

+
-
+ + {/* Search Bar */} +
+
+ + setSearchQuery(e.target.value)} + className="w-full pl-10 pr-4 py-2 bg-neutral-100 dark:bg-neutral-700/50 border border-transparent rounded-lg text-sm text-neutral-900 dark:text-neutral-100 placeholder-neutral-500 focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-transparent transition-all" + /> +
+
+ + {/* Header Actions */} +
+ {/* Record Button */} + + + {/* Divider */} +
+ + {/* Search Icon (Mobile) */} + + + {/* Notifications */} + + + {/* Theme Toggle */} - - {/* User info */} -
-
- {isAdmin && } - {user?.name || user?.email} -
+ + {/* User Menu */} +
+ + + {/* Dropdown Menu */} + {userMenuOpen && ( +
+ {/* User Info */} +
+
+
+ +
+
+

+ {user?.name || 'User'} +

+

+ {user?.email} +

+
+
+ {isAdmin && ( + Admin + )} +
+ + {/* Menu Items */} +
+ setUserMenuOpen(false)} + > + + Settings + +
+ + {/* Logout */} +
+ +
+
+ )}
- -
-
-
+ {/* Main Container */} +
+
{/* Sidebar Navigation */}
{/* Main Content */} -
-
+
+
@@ -100,10 +243,13 @@ export default function Layout() {
{/* Footer */} -
+
-
- ๐ŸŽต Chronicle Dashboard v1.0 | AI-powered personal audio system +
+ + Chronicle v1.0 + โ€ข + AI-powered personal audio system
diff --git a/backends/advanced/webui/src/index.css b/backends/advanced/webui/src/index.css index 96d55fdc..b1cac532 100644 --- a/backends/advanced/webui/src/index.css +++ b/backends/advanced/webui/src/index.css @@ -1,9 +1,171 @@ +@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap'); + @tailwind base; @tailwind components; @tailwind utilities; @layer base { + /* Base Styles */ + html { + font-family: 'Inter', system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + text-rendering: optimizeLegibility; + } + + body { + @apply bg-neutral-50 dark:bg-neutral-900 text-neutral-900 dark:text-neutral-100; + } + + /* Improve focus states for accessibility */ + *:focus-visible { + @apply outline-none ring-2 ring-primary-500 ring-offset-2 ring-offset-neutral-0 dark:ring-offset-neutral-950; + } + + /* Smooth scrolling */ html { - font-family: system-ui, sans-serif; + scroll-behavior: smooth; + } + + /* Custom scrollbar */ + ::-webkit-scrollbar { + width: 12px; + height: 12px; + } + + ::-webkit-scrollbar-track { + @apply bg-neutral-100 dark:bg-neutral-800; + } + + ::-webkit-scrollbar-thumb { + @apply bg-neutral-300 dark:bg-neutral-600 rounded-lg hover:bg-neutral-400 dark:hover:bg-neutral-500; + } +} + +@layer components { + /* Button Components */ + .btn { + @apply inline-flex items-center justify-center px-4 py-2 rounded-lg font-medium transition-all duration-200 focus:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 disabled:opacity-50 disabled:cursor-not-allowed; + } + + .btn-primary { + @apply btn bg-primary-600 text-white hover:bg-primary-700 active:bg-primary-800 focus-visible:ring-primary-500 shadow-sm; + } + + .btn-secondary { + @apply btn bg-neutral-200 dark:bg-neutral-700 text-neutral-900 dark:text-neutral-100 hover:bg-neutral-300 dark:hover:bg-neutral-600 active:bg-neutral-400 dark:active:bg-neutral-500 focus-visible:ring-neutral-500; + } + + .btn-ghost { + @apply btn bg-transparent hover:bg-neutral-100 dark:hover:bg-neutral-800 text-neutral-700 dark:text-neutral-300 focus-visible:ring-neutral-500; + } + + .btn-destructive { + @apply btn bg-error-600 text-white hover:bg-error-700 active:bg-error-800 focus-visible:ring-error-500 shadow-sm; + } + + /* Card Components */ + .card { + @apply bg-white dark:bg-neutral-800 rounded-lg border border-neutral-200 dark:border-neutral-700 shadow-sm transition-shadow duration-200; + } + + .card-hover { + @apply card hover:shadow-md; + } + + /* Input Components */ + .input { + @apply w-full px-3 py-2 bg-white dark:bg-neutral-800 border border-neutral-300 dark:border-neutral-600 rounded-lg text-neutral-900 dark:text-neutral-100 placeholder-neutral-400 dark:placeholder-neutral-500 transition-colors duration-200 focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-transparent disabled:bg-neutral-100 dark:disabled:bg-neutral-900 disabled:cursor-not-allowed; + } + + /* Badge Components */ + .badge { + @apply inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium; + } + + .badge-primary { + @apply badge bg-primary-100 dark:bg-primary-900/30 text-primary-800 dark:text-primary-200; + } + + .badge-success { + @apply badge bg-success-100 dark:bg-success-900/30 text-success-800 dark:text-success-200; + } + + .badge-error { + @apply badge bg-error-100 dark:bg-error-900/30 text-error-800 dark:text-error-200; + } + + .badge-warning { + @apply badge bg-warning-100 dark:bg-warning-900/30 text-warning-800 dark:text-warning-200; + } + + .badge-neutral { + @apply badge bg-neutral-100 dark:bg-neutral-800 text-neutral-800 dark:text-neutral-200; + } +} + +@layer utilities { + /* Animation utilities */ + .animate-fade-in { + animation: fadeIn 0.3s ease-in-out; + } + + .animate-slide-up { + animation: slideUp 0.3s ease-out; + } + + .animate-slide-down { + animation: slideDown 0.3s ease-out; + } + + @keyframes fadeIn { + from { + opacity: 0; + } + to { + opacity: 1; + } + } + + @keyframes slideUp { + from { + transform: translateY(10px); + opacity: 0; + } + to { + transform: translateY(0); + opacity: 1; + } + } + + @keyframes slideDown { + from { + transform: translateY(-10px); + opacity: 0; + } + to { + transform: translateY(0); + opacity: 1; + } + } + + @keyframes slideInLeft { + from { + transform: translateX(-20px); + opacity: 0; + } + to { + transform: translateX(0); + opacity: 1; + } + } + + .animate-slide-in-left { + animation: slideInLeft 0.3s ease-out; + } + + /* Text utilities */ + .text-balance { + text-wrap: balance; } } \ No newline at end of file diff --git a/backends/advanced/webui/src/pages/Chat.tsx b/backends/advanced/webui/src/pages/Chat.tsx index f696518a..d716699b 100644 --- a/backends/advanced/webui/src/pages/Chat.tsx +++ b/backends/advanced/webui/src/pages/Chat.tsx @@ -287,42 +287,51 @@ export default function Chat() { } return ( -
- {/* Sidebar */} -
- {/* Header */} -
-
-
- -

Chat

-
- +
+ {/* Header */} +
+
+
+ +
+
+

+ Chat +

+

+ Chat with your AI assistant using your memories +

+ +
- {/* Sessions List */} -
- {isLoading ? ( -
Loading sessions...
- ) : sessions.length === 0 ? ( -
- No chat sessions yet. -
- -
- ) : ( + {/* Chat Container */} +
+ {/* Sidebar */} +
+ {/* Sessions List */} +
+ {isLoading ? ( +
Loading sessions...
+ ) : sessions.length === 0 ? ( +
+ No chat sessions yet. +
+ +
+ ) : (
{sessions.map((session) => (
{/* Main Chat Area */} -
+
{currentSession ? ( <> {/* Chat Header */} -
+

{currentSession.title} @@ -479,7 +488,7 @@ export default function Chat() {

{/* Input Area */} -
+
{error && (
{error} @@ -519,7 +528,7 @@ export default function Chat() { ) : ( /* No Session Selected */ -
+

@@ -539,38 +548,39 @@ export default function Chat() { )}

- {/* Memory Panel (if enabled and has context) */} - {showMemoryPanel && memoryContext && memoryContext.memory_count > 0 && ( -
-
-

- - Memory Context -

- -
-
-

Using {memoryContext.memory_count} relevant memories to enhance this conversation.

-
- {memoryContext.memory_ids.slice(0, 3).map((id) => ( -
- Memory ID: {id} -
- ))} - {memoryContext.memory_ids.length > 3 && ( -
- +{memoryContext.memory_ids.length - 3} more memories -
- )} + {/* Memory Panel (if enabled and has context) */} + {showMemoryPanel && memoryContext && memoryContext.memory_count > 0 && ( +
+
+

+ + Memory Context +

+ +
+
+

Using {memoryContext.memory_count} relevant memories to enhance this conversation.

+
+ {memoryContext.memory_ids.slice(0, 3).map((id) => ( +
+ Memory ID: {id} +
+ ))} + {memoryContext.memory_ids.length > 3 && ( +
+ +{memoryContext.memory_ids.length - 3} more memories +
+ )} +
-
- )} + )} +
) } \ No newline at end of file diff --git a/backends/advanced/webui/src/pages/Conversations.tsx b/backends/advanced/webui/src/pages/Conversations.tsx index d4b76ed3..cee0fccf 100644 --- a/backends/advanced/webui/src/pages/Conversations.tsx +++ b/backends/advanced/webui/src/pages/Conversations.tsx @@ -49,7 +49,12 @@ const SPEAKER_COLOR_PALETTE = [ 'text-cyan-600 dark:text-cyan-400', ]; -export default function Conversations() { +interface ConversationsProps { + activeTab?: 'classic' | 'timeline' + setActiveTab?: (tab: 'classic' | 'timeline') => void +} + +export default function Conversations({ activeTab = 'classic', setActiveTab }: ConversationsProps = {}) { const [conversations, setConversations] = useState([]) const [loading, setLoading] = useState(true) const [error, setError] = useState(null) @@ -385,20 +390,28 @@ export default function Conversations() { if (loading) { return ( -
-
- Loading conversations... +
+
+

Loading conversations...

) } if (error) { return ( -
-
{error}
+
+
+ +
+

+ Unable to load conversations +

+

+ {error} +

@@ -409,26 +422,33 @@ export default function Conversations() { return (
{/* Header */} -
-
- -

- Latest Conversations -

+
+
+
+ +
+
+

+ Conversations +

+

+ View and manage your audio conversations +

+
-
-
+ {/* Tab Navigation */} + {setActiveTab && ( +
+ +
+ )} + {/* Conversations List */}
{conversations.length === 0 ? ( -
- -

No conversations found

+
+
+ +
+

+ No conversations yet +

+

+ Start recording audio to create your first conversation and build your personal knowledge base. +

) : ( conversations.map((conversation) => (
{/* Deleted Conversation Warning */} diff --git a/backends/advanced/webui/src/pages/ConversationsRouter.tsx b/backends/advanced/webui/src/pages/ConversationsRouter.tsx index c7e6e95c..3ec2e448 100644 --- a/backends/advanced/webui/src/pages/ConversationsRouter.tsx +++ b/backends/advanced/webui/src/pages/ConversationsRouter.tsx @@ -7,41 +7,11 @@ export default function ConversationsRouter() { return (
- {/* Tab Navigation */} -
- -
- {/* Content */} {activeTab === 'classic' ? ( - + ) : ( - + )}
) diff --git a/backends/advanced/webui/src/pages/ConversationsTimeline.tsx b/backends/advanced/webui/src/pages/ConversationsTimeline.tsx index 5c3f748f..fe51d66a 100644 --- a/backends/advanced/webui/src/pages/ConversationsTimeline.tsx +++ b/backends/advanced/webui/src/pages/ConversationsTimeline.tsx @@ -193,7 +193,12 @@ function ConversationCard({ conversation, formatDuration }: ConversationCardProp ) } -export default function ConversationsTimeline() { +interface ConversationsTimelineProps { + activeTab?: 'classic' | 'timeline' + setActiveTab?: (tab: 'classic' | 'timeline') => void +} + +export default function ConversationsTimeline({ activeTab = 'timeline', setActiveTab }: ConversationsTimelineProps = {}) { const [conversations, setConversations] = useState([]) const [loading, setLoading] = useState(true) const [error, setError] = useState(null) @@ -277,6 +282,38 @@ export default function ConversationsTimeline() {
+ {/* Tab Navigation */} + {setActiveTab && ( +
+ +
+ )} + {/* Timeline */} {conversations.length === 0 ? (
diff --git a/backends/advanced/webui/src/pages/LiveRecord.tsx b/backends/advanced/webui/src/pages/LiveRecord.tsx index 4b763746..c1c44705 100644 --- a/backends/advanced/webui/src/pages/LiveRecord.tsx +++ b/backends/advanced/webui/src/pages/LiveRecord.tsx @@ -11,24 +11,31 @@ export default function LiveRecord() { return (
{/* Header */} -
-
- -

- Live Audio Recording -

+
+
+
+ +
+
+

+ Live Audio Recording +

+

+ Record audio in real-time with streaming or batch processing +

+
{/* Mode Toggle */} -
+
{/* Mode Description */} -
-

+

+

{recording.mode === 'streaming' ? ( <> Streaming Mode: Audio is sent in real-time chunks and processed immediately. diff --git a/backends/advanced/webui/src/pages/LoginPage.tsx b/backends/advanced/webui/src/pages/LoginPage.tsx index 7093e73a..1ffcfe89 100644 --- a/backends/advanced/webui/src/pages/LoginPage.tsx +++ b/backends/advanced/webui/src/pages/LoginPage.tsx @@ -2,7 +2,7 @@ import React, { useState } from 'react' import { Navigate } from 'react-router-dom' import { useAuth } from '../contexts/AuthContext' import { BACKEND_URL } from '../services/api' -import { Music, Eye, EyeOff } from 'lucide-react' +import { Brain, Eye, EyeOff } from 'lucide-react' export default function LoginPage() { const [email, setEmail] = useState('') @@ -51,23 +51,36 @@ export default function LoginPage() { } return ( -

-
-
-
- +
+ {/* Decorative background elements */} +
+
+
+
+ +
+ {/* Logo & Header */} +
+
+
-

- Chronicle Dashboard +

+ Chronicle

-

- Sign in to your account +

+ AI-Powered Personal Audio System +

+

+ Sign in to access your dashboard

-
-
-
-
) diff --git a/backends/advanced/webui/src/pages/Queue.tsx b/backends/advanced/webui/src/pages/Queue.tsx index 3dc774f3..18521dcc 100644 --- a/backends/advanced/webui/src/pages/Queue.tsx +++ b/backends/advanced/webui/src/pages/Queue.tsx @@ -699,8 +699,8 @@ const Queue: React.FC = () => {
-

Queue Management

-

+

Queue Management

+

Last updated: {new Date(lastUpdate).toLocaleTimeString()} โ€ข Auto-refresh every 2s

@@ -748,71 +748,71 @@ const Queue: React.FC = () => { {/* Stats Cards */} {stats && (
-
+
- +
-

Total

+

Total

{stats.total_jobs}

-
+
-

Queued

+

Queued

{stats.queued_jobs}

-
+
0 ? 'animate-pulse' : ''}`} />
-

Processing

+

Processing

{stats.processing_jobs}

-
+
-

Completed

+

Completed

{stats.completed_jobs}

-
+
-

Failed

+

Failed

{stats.failed_jobs}

-
+
- +
-

Cancelled

-

{stats.cancelled_jobs}

+

Cancelled

+

{stats.cancelled_jobs}

-
+
-

Deferred

+

Deferred

{stats.deferred_jobs}

@@ -822,7 +822,7 @@ const Queue: React.FC = () => { {/* Streaming Status */} {streamingStatus && ( -
+

Audio Streaming & Conversations

@@ -917,7 +917,7 @@ const Queue: React.FC = () => { console.log(` - All listen jobs (active): ${allListenJobs.length}, showing latest: ${listenJobs.length}`); return ( -
+
{streamKey} Active @@ -925,21 +925,21 @@ const Queue: React.FC = () => {
- Stream Length: + Stream Length: {health.stream_length}
- Age: + Age: {(health.stream_age_seconds || 0).toFixed(0)}s
- Pending: + Pending: 0 ? 'text-yellow-600' : 'text-green-600'}`}> {health.total_pending}
{health.consumer_groups && health.consumer_groups.map((group) => ( -
+
{group.name}:
{group.consumers.map((consumer) => (
@@ -954,7 +954,7 @@ const Queue: React.FC = () => { {/* Current Speech Detection Job */} {listenJobs.length > 0 && ( -
+
Current Speech Detection:
{listenJobs.map((job) => { const runtime = job.started_at @@ -964,7 +964,7 @@ const Queue: React.FC = () => { const seconds = runtime % 60; return ( -
+
{getStatusIcon(job.status)} @@ -1019,17 +1019,17 @@ const Queue: React.FC = () => { if (!session) return null; return ( -
+
Speech Detection Events:
{session.last_event && (
- Last Event: + Last Event: {session.last_event.split(':')[0]}
)} {session.speaker_check_status && (
- Speaker Check: + Speaker Check: { )} {session.identified_speakers && (
- Speakers: + Speakers: {session.identified_speakers}
)} @@ -1134,7 +1134,7 @@ const Queue: React.FC = () => { if (conversationMap.size === 0) { return ( -
+
No active conversations
); @@ -1180,7 +1180,7 @@ const Queue: React.FC = () => { ) : ( )} - {clientId} + {clientId} {hasFailedJob ? ( {failedJobCount} Error{failedJobCount > 1 ? 's' : ''} @@ -1194,7 +1194,7 @@ const Queue: React.FC = () => { )}
-
+
Conversation: {conversationId.substring(0, 8)}... โ€ข {createdAt && `Started: ${new Date(createdAt).toLocaleTimeString()} โ€ข `} Words: {wordCount} @@ -1210,7 +1210,7 @@ const Queue: React.FC = () => { {/* Expanded Jobs Section */} {isExpanded && ( -
+
{/* Pipeline Timeline */}
Pipeline Timeline:
@@ -1290,7 +1290,7 @@ const Queue: React.FC = () => { return (
{/* Time axis */} -
+
{timeMarkers.map((marker, idx) => (
{ {job.status} - {job.queue} + {job.queue} {/* Show memory count badge on collapsed card */} {!expandedJobs.has(job.job_id) && job.job_type === 'process_memory_job' && job.result?.memories_created !== undefined && ( @@ -1405,7 +1405,7 @@ const Queue: React.FC = () => { {/* Show job-specific metadata */} {job.meta && ( -
+
{/* open_conversation_job metadata */} {job.job_type === 'open_conversation_job' && ( <> @@ -1474,7 +1474,7 @@ const Queue: React.FC = () => { {/* Show conversation_id if present */} {job.meta.conversation_id && ( -
+
Conv: {job.meta.conversation_id.substring(0, 8)}...
)} @@ -1510,9 +1510,9 @@ const Queue: React.FC = () => { {/* Completed Conversations - Grouped by conversation_id */}
-

Completed Conversations

+

Completed Conversations

- + setApiKeys(prev => ({ ...prev, openai_api_key: e.target.value }))} + placeholder="sk-..." + className="w-full px-3 py-2 text-sm border border-gray-300 dark:border-gray-600 rounded-md bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-blue-500" + /> +
+ For memory extraction and chat +
+
+ + {/* Deepgram API Key */} +
+ + setApiKeys(prev => ({ ...prev, deepgram_api_key: e.target.value }))} + placeholder="Enter Deepgram API key" + className="w-full px-3 py-2 text-sm border border-gray-300 dark:border-gray-600 rounded-md bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-blue-500" + /> +
+ For audio transcription +
+
+ + {/* Mistral API Key */} +
+ + setApiKeys(prev => ({ ...prev, mistral_api_key: e.target.value }))} + placeholder="Enter Mistral API key" + className="w-full px-3 py-2 text-sm border border-gray-300 dark:border-gray-600 rounded-md bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-blue-500" + /> +
+ Alternative transcription provider +
+
+
+ + {/* Save Button and Message */} +
+
+ {configMessage && ( +

+ {configMessage} +

+ )} +
+ +
+
+ + {/* Info Banner */} + {!configStatus.features.llm_enabled && configStatus.graceful_degradation.allow_missing_api_keys && ( +
+
+ +
+

+ Quick Start Mode Active +

+

+ The system is running with limited features. Add API keys above to enable memory extraction, transcription, and chat features. Services will automatically activate after saving and restarting. +

+
+
+
+ )} +
+ )} +
{/* Services Status */} {healthData?.services && ( diff --git a/backends/advanced/webui/src/services/api.ts b/backends/advanced/webui/src/services/api.ts index 323bd369..8d777e3d 100644 --- a/backends/advanced/webui/src/services/api.ts +++ b/backends/advanced/webui/src/services/api.ts @@ -172,6 +172,19 @@ export const systemApi = { // Memory Provider Management getMemoryProvider: () => api.get('/api/admin/memory/provider'), setMemoryProvider: (provider: string) => api.post('/api/admin/memory/provider', { provider }), + + // API Key and Configuration Management + getConfigurationStatus: () => api.get('/api/admin/config/status'), + updateApiKeys: (apiKeys: { + openai_api_key?: string + deepgram_api_key?: string + mistral_api_key?: string + }) => api.post('/api/admin/config/api-keys', apiKeys), + updateProviderConfig: (providerConfig: { + llm_provider?: string + transcription_provider?: string + memory_provider?: string + }) => api.post('/api/admin/config/providers', providerConfig), } export const queueApi = { From 903714c008e06eb0c66c4895c369702667e92078 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Thu, 18 Dec 2025 19:08:32 +0000 Subject: [PATCH 16/25] fix test env --- tests/setup/test_env.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/setup/test_env.py b/tests/setup/test_env.py index 8af9777d..aeb4d417 100644 --- a/tests/setup/test_env.py +++ b/tests/setup/test_env.py @@ -3,7 +3,6 @@ from pathlib import Path from dotenv import load_dotenv - # Load .env from backends/advanced directory to get COMPOSE_PROJECT_NAME backend_env_path = Path(__file__).resolve().parents[2] / "backends" / "advanced" / ".env" if backend_env_path.exists(): From 19266d8fc448ca237c590dec0f0cc3622d88ec17 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 08:44:39 +0000 Subject: [PATCH 17/25] added config parser --- backends/advanced/compose/backend.yml | 1 + .../advanced_omi_backend/config/__init__.py | 46 ++++ .../config/config_parser.py | 62 +++++ .../config/config_schema.py | 117 ++++++++ .../config/settings_adapter.py | 259 ++++++++++++++++++ 5 files changed, 485 insertions(+) create mode 100644 backends/advanced/src/advanced_omi_backend/config/__init__.py create mode 100644 backends/advanced/src/advanced_omi_backend/config/config_parser.py create mode 100644 backends/advanced/src/advanced_omi_backend/config/config_schema.py create mode 100644 backends/advanced/src/advanced_omi_backend/config/settings_adapter.py diff --git a/backends/advanced/compose/backend.yml b/backends/advanced/compose/backend.yml index a7f22f02..0ccc178f 100644 --- a/backends/advanced/compose/backend.yml +++ b/backends/advanced/compose/backend.yml @@ -15,6 +15,7 @@ services: - ../data/audio_chunks:/app/audio_chunks - ../data/debug_dir:/app/debug_dir - ../data:/app/data + - ../config.yaml:/app/config.yaml environment: # Service URLs (Docker internal network) - REDIS_URL=redis://redis:6379/${REDIS_DATABASE:-0} diff --git a/backends/advanced/src/advanced_omi_backend/config/__init__.py b/backends/advanced/src/advanced_omi_backend/config/__init__.py new file mode 100644 index 00000000..c12417b8 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/config/__init__.py @@ -0,0 +1,46 @@ +""" +Configuration management module. + +Provides YAML-based configuration with MongoDB caching, hot reload, and migration utilities. +Also re-exports legacy config functions for backward compatibility. +""" + +from .config_parser import ConfigParser, get_config_parser, init_config_parser +from .config_schema import ChronicleConfig + +# Re-export legacy config functions for backward compatibility +from advanced_omi_backend.legacy_config import ( + DATA_DIR, + CHUNK_DIR, + DEFAULT_DIARIZATION_SETTINGS, + DEFAULT_SPEECH_DETECTION_SETTINGS, + DEFAULT_CONVERSATION_STOP_SETTINGS, + DEFAULT_AUDIO_STORAGE_SETTINGS, + load_diarization_settings_from_file, + save_diarization_settings_to_file, + get_diarization_config_path, + get_speech_detection_settings, + get_conversation_stop_settings, + get_audio_storage_settings, +) + +__all__ = [ + # New config system + "ConfigParser", + "get_config_parser", + "init_config_parser", + "ChronicleConfig", + # Legacy config functions + "DATA_DIR", + "CHUNK_DIR", + "DEFAULT_DIARIZATION_SETTINGS", + "DEFAULT_SPEECH_DETECTION_SETTINGS", + "DEFAULT_CONVERSATION_STOP_SETTINGS", + "DEFAULT_AUDIO_STORAGE_SETTINGS", + "load_diarization_settings_from_file", + "save_diarization_settings_to_file", + "get_diarization_config_path", + "get_speech_detection_settings", + "get_conversation_stop_settings", + "get_audio_storage_settings", +] diff --git a/backends/advanced/src/advanced_omi_backend/config/config_parser.py b/backends/advanced/src/advanced_omi_backend/config/config_parser.py new file mode 100644 index 00000000..51a0e518 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/config/config_parser.py @@ -0,0 +1,62 @@ +""" +Config Parser - Simple YAML-based configuration management. +""" + +from pathlib import Path +from typing import Optional +from ruamel.yaml import YAML + +from .config_schema import ChronicleConfig + +yaml = YAML() + + +class ConfigParser: + """Simple configuration parser for config.yaml using ruamel.yaml.""" + + def __init__(self, config_path: str = "config.yaml"): + self.config_path = Path(config_path) + + async def load(self) -> ChronicleConfig: + """Load and validate configuration from YAML file.""" + if not self.config_path.exists(): + return ChronicleConfig() + + with open(self.config_path) as f: + data = yaml.load(f) or {} + return ChronicleConfig(**data) + + async def save(self, config: ChronicleConfig) -> None: + """Save configuration to YAML file.""" + self.config_path.parent.mkdir(parents=True, exist_ok=True) + + with open(self.config_path, 'w') as f: + yaml.dump(config.model_dump(mode='json'), f) + + async def update(self, updates: dict, updated_by: str = "user") -> None: + """Update specific config fields and save.""" + config = await self.load() + + for key, value in updates.items(): + if hasattr(config, key): + setattr(config, key, value) + + await self.save(config) + + +# Global instance +_config_parser: Optional[ConfigParser] = None + + +def init_config_parser(config_path: str = "config.yaml") -> ConfigParser: + """Initialize global config parser.""" + global _config_parser + _config_parser = ConfigParser(config_path) + return _config_parser + + +def get_config_parser() -> ConfigParser: + """Get global config parser instance.""" + if _config_parser is None: + raise RuntimeError("ConfigParser not initialized") + return _config_parser diff --git a/backends/advanced/src/advanced_omi_backend/config/config_schema.py b/backends/advanced/src/advanced_omi_backend/config/config_schema.py new file mode 100644 index 00000000..1a9e042b --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/config/config_schema.py @@ -0,0 +1,117 @@ +""" +Pydantic schema for config.yaml structure. + +Extends existing settings models with top-level configuration for wizard state, +authentication, and optional services. +""" + +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel, Field, PrivateAttr + +# Import all existing settings models +from advanced_omi_backend.settings_models import ( + ApiKeysSettings, + AudioProcessingSettings, + ConversationSettings, + DiarizationSettings, + InfrastructureSettings, + LLMSettings, + MiscSettings, + NetworkSettings, + ProviderSettings, + SpeechDetectionSettings, +) + + +class AuthConfig(BaseModel): + """Authentication and admin account configuration.""" + + secret_key: str = Field( + default="", + description="JWT signing key (auto-generated if empty)" + ) + admin_name: str = Field( + default="admin", + description="Admin account name" + ) + admin_email: str = Field( + default="admin@example.com", + description="Admin email address" + ) + admin_password_hash: str = Field( + default="", + description="Bcrypt password hash (never store plaintext)" + ) + + +class ChronicleConfig(BaseModel): + """ + Root configuration model for Chronicle. + + This is the complete config.yaml structure, combining all settings + categories with top-level metadata. + """ + + # Metadata + version: str = Field( + default="1.0.0", + description="Config schema version" + ) + wizard_completed: bool = Field( + default=False, + description="Whether first-time setup wizard has been completed" + ) + + # Authentication + auth: AuthConfig = Field( + default_factory=AuthConfig, + description="Authentication configuration" + ) + + # Core Settings (from existing models) + speech_detection: SpeechDetectionSettings = Field( + default_factory=SpeechDetectionSettings, + description="Speech detection settings" + ) + conversation: ConversationSettings = Field( + default_factory=ConversationSettings, + description="Conversation management settings" + ) + audio_processing: AudioProcessingSettings = Field( + default_factory=AudioProcessingSettings, + description="Audio processing settings" + ) + diarization: DiarizationSettings = Field( + default_factory=DiarizationSettings, + description="Speaker diarization settings" + ) + llm: LLMSettings = Field( + default_factory=LLMSettings, + description="LLM provider and model settings" + ) + providers: ProviderSettings = Field( + default_factory=ProviderSettings, + description="Service provider selection" + ) + network: NetworkSettings = Field( + default_factory=NetworkSettings, + description="Network and CORS settings" + ) + infrastructure: InfrastructureSettings = Field( + default_factory=InfrastructureSettings, + description="Core infrastructure services" + ) + misc: MiscSettings = Field( + default_factory=MiscSettings, + description="Miscellaneous settings" + ) + api_keys: ApiKeysSettings = Field( + default_factory=ApiKeysSettings, + description="External service API keys" + ) + + # Internal metadata (not shown in UI, runtime only) + _updated_at: Optional[datetime] = PrivateAttr(default=None) + _updated_by: Optional[str] = PrivateAttr(default=None) diff --git a/backends/advanced/src/advanced_omi_backend/config/settings_adapter.py b/backends/advanced/src/advanced_omi_backend/config/settings_adapter.py new file mode 100644 index 00000000..5669d7f3 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/config/settings_adapter.py @@ -0,0 +1,259 @@ +""" +Settings adapter - bridges ConfigParser to SettingsManager interface. + +This allows existing settings_routes.py to work without modification +while using config.yaml as the source of truth. +""" + +import logging +from typing import TypeVar + +from .config_parser import ConfigParser, get_config_parser +from .config_schema import ChronicleConfig +from advanced_omi_backend.settings_models import ( + AllSettings, + ApiKeysSettings, + AudioProcessingSettings, + ConversationSettings, + DiarizationSettings, + InfrastructureSettings, + LLMSettings, + MiscSettings, + NetworkSettings, + ProviderSettings, + SpeechDetectionSettings, +) + +logger = logging.getLogger(__name__) + +T = TypeVar('T') + + +class ConfigBasedSettingsManager: + """ + Settings manager that uses ConfigParser (config.yaml) instead of MongoDB. + + Implements the same interface as SettingsManager for backward compatibility. + """ + + def __init__(self, config_parser: ConfigParser): + self.config_parser = config_parser + + async def initialize(self): + """Initialize settings (no-op for config-based system).""" + logger.info("ConfigBasedSettingsManager initialized (using config.yaml)") + + # Individual setting getters + + async def get_speech_detection(self) -> SpeechDetectionSettings: + """Get speech detection settings.""" + config = await self.config_parser.load() + return config.speech_detection + + async def get_conversation(self) -> ConversationSettings: + """Get conversation management settings.""" + config = await self.config_parser.load() + return config.conversation + + async def get_audio_processing(self) -> AudioProcessingSettings: + """Get audio processing settings.""" + config = await self.config_parser.load() + return config.audio_processing + + async def get_diarization(self) -> DiarizationSettings: + """Get diarization settings.""" + config = await self.config_parser.load() + return config.diarization + + async def get_llm(self) -> LLMSettings: + """Get LLM settings.""" + config = await self.config_parser.load() + return config.llm + + async def get_providers(self) -> ProviderSettings: + """Get provider settings.""" + config = await self.config_parser.load() + return config.providers + + async def get_network(self) -> NetworkSettings: + """Get network settings.""" + config = await self.config_parser.load() + return config.network + + async def get_infrastructure(self) -> InfrastructureSettings: + """Get infrastructure settings.""" + config = await self.config_parser.load() + return config.infrastructure + + async def get_misc(self) -> MiscSettings: + """Get miscellaneous settings.""" + config = await self.config_parser.load() + return config.misc + + async def get_api_keys(self) -> ApiKeysSettings: + """Get API keys settings.""" + config = await self.config_parser.load() + return config.api_keys + + async def get_all_settings(self) -> AllSettings: + """Get all settings combined.""" + config = await self.config_parser.load() + return AllSettings( + speech_detection=config.speech_detection, + conversation=config.conversation, + audio_processing=config.audio_processing, + diarization=config.diarization, + llm=config.llm, + providers=config.providers, + network=config.network, + infrastructure=config.infrastructure, + misc=config.misc, + api_keys=config.api_keys, + ) + + # Individual setting updaters + + async def update_speech_detection( + self, + settings: SpeechDetectionSettings, + updated_by: str = "user", + ): + """Update speech detection settings.""" + await self.config_parser.update( + {"speech_detection": settings.dict()}, + updated_by=updated_by, + ) + + async def update_conversation( + self, + settings: ConversationSettings, + updated_by: str = "user", + ): + """Update conversation management settings.""" + await self.config_parser.update( + {"conversation": settings.dict()}, + updated_by=updated_by, + ) + + async def update_audio_processing( + self, + settings: AudioProcessingSettings, + updated_by: str = "user", + ): + """Update audio processing settings.""" + await self.config_parser.update( + {"audio_processing": settings.dict()}, + updated_by=updated_by, + ) + + async def update_diarization( + self, + settings: DiarizationSettings, + updated_by: str = "user", + ): + """Update diarization settings.""" + await self.config_parser.update( + {"diarization": settings.dict()}, + updated_by=updated_by, + ) + + async def update_llm( + self, + settings: LLMSettings, + updated_by: str = "user", + ): + """Update LLM settings.""" + await self.config_parser.update( + {"llm": settings.dict()}, + updated_by=updated_by, + ) + + async def update_providers( + self, + settings: ProviderSettings, + updated_by: str = "user", + ): + """Update provider settings.""" + await self.config_parser.update( + {"providers": settings.dict()}, + updated_by=updated_by, + ) + + async def update_network( + self, + settings: NetworkSettings, + updated_by: str = "user", + ): + """Update network settings.""" + await self.config_parser.update( + {"network": settings.dict()}, + updated_by=updated_by, + ) + + async def update_infrastructure( + self, + settings: InfrastructureSettings, + updated_by: str = "user", + ): + """Update infrastructure settings.""" + await self.config_parser.update( + {"infrastructure": settings.dict()}, + updated_by=updated_by, + ) + + async def update_misc( + self, + settings: MiscSettings, + updated_by: str = "user", + ): + """Update miscellaneous settings.""" + await self.config_parser.update( + {"misc": settings.dict()}, + updated_by=updated_by, + ) + + async def update_api_keys( + self, + settings: ApiKeysSettings, + updated_by: str = "user", + ): + """Update API keys settings.""" + await self.config_parser.update( + {"api_keys": settings.dict()}, + updated_by=updated_by, + ) + + async def update_all_settings( + self, + settings: AllSettings, + updated_by: str = "user", + ): + """Update all settings at once.""" + config = await self.config_parser.load() + + # Update all sections + config.speech_detection = settings.speech_detection + config.conversation = settings.conversation + config.audio_processing = settings.audio_processing + config.diarization = settings.diarization + config.llm = settings.llm + config.providers = settings.providers + config.network = settings.network + config.infrastructure = settings.infrastructure + config.misc = settings.misc + config.api_keys = settings.api_keys + + # Set private attribute (runtime only, not saved to YAML) + config._updated_by = updated_by + + await self.config_parser.save(config) + + def invalidate_cache(self, key: str = None): + """ + Force settings to reload from file on next access. + + Args: + key: Specific settings category (ignored - always reloads all) + """ + # Config parser auto-reloads based on file mtime + logger.info(f"Cache invalidation requested (config auto-reloads from file)") From fc4ed63f3c7eb6b4d97ecb058d02d12b9a7871a9 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Mon, 17 Nov 2025 18:19:11 +0000 Subject: [PATCH 18/25] Added mcp server and auth/apikey From 1045c7889396039458de61bda706aa2866812282 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 09:35:41 +0000 Subject: [PATCH 19/25] Remove env_writer and complete config.yaml migration - Deleted env_writer.py (no longer needed) - Updated system_controller to read from config.yaml - Moved config files to config/ folder - Added config.defaults.yaml as committed template - Simplified config_parser.py using ruamel.yaml --- .gitignore | 4 + QUICK_START.md | 397 ++++++++++++++++++ backends/advanced/compose/backend.yml | 2 +- backends/advanced/compose/overrides/dev.yml | 4 + backends/advanced/config/config.defaults.yaml | 91 ++++ backends/advanced/pyproject.toml | 2 +- .../src/advanced_omi_backend/app_factory.py | 33 +- .../config/config_parser.py | 14 +- .../controllers/system_controller.py | 16 +- .../{config.py => legacy_config.py} | 0 10 files changed, 550 insertions(+), 13 deletions(-) create mode 100644 QUICK_START.md create mode 100644 backends/advanced/compose/overrides/dev.yml create mode 100644 backends/advanced/config/config.defaults.yaml rename backends/advanced/src/advanced_omi_backend/{config.py => legacy_config.py} (100%) diff --git a/.gitignore b/.gitignore index f0a73d86..6f3a2882 100644 --- a/.gitignore +++ b/.gitignore @@ -5,8 +5,12 @@ .env.secrets .env.api-keys .env.quick-start +.env.default .env.backup.* config.env.backup.* +backends/advanced/config/config.yaml +backends/advanced/config/config.yaml.backup +backends/advanced/config/config.yaml.lock **/memory_config.yaml !**/memory_config.yaml.template example/* diff --git a/QUICK_START.md b/QUICK_START.md new file mode 100644 index 00000000..69968d87 --- /dev/null +++ b/QUICK_START.md @@ -0,0 +1,397 @@ +# Chronicle Zero-Configuration Quick Start + +Get Chronicle running in under 5 minutes with automatic configuration, no API keys required upfront. + +## Overview + +The quick-start script provides a streamlined way to launch Chronicle with: +- **Auto-generated credentials** - Secure admin account created automatically +- **Graceful degradation** - System works without API keys, features disabled until configured +- **Separate infrastructure** - Database services persist across application restarts +- **Web-based configuration** - Add API keys through the dashboard UI + +## Prerequisites + +- **Docker** and **Docker Compose** installed +- **Git** for cloning the repository +- **Ports available**: 4000 (web UI), 9000 (backend), 27017 (MongoDB), 6379 (Redis), 6333/6334 (Qdrant) + +## Quick Start + +### 1. Clone and Start + +```bash +git clone https://github.com/chronicle-ai/chronicle.git +cd chronicle +./quick-start.sh +``` + +### 2. Access the Dashboard + +The script will display your login credentials. Navigate to: + +**http://localhost:3000** + +Log in with the credentials shown in the terminal output. + +### 3. Configure API Keys (Optional) + +Navigate to **System** page in the web dashboard and scroll to **API Key Configuration**: + +1. **OpenAI API Key** - For memory extraction and chat features +2. **Deepgram API Key** - For audio transcription +3. **Mistral API Key** - Alternative transcription provider (optional) + +Click **Save API Keys** and restart services: + +```bash +make restart +``` + +## Feature Availability + +### Without API Keys (Quick Start Mode) + +โœ… **Working:** +- User authentication and management +- Audio file uploads +- Basic system monitoring +- Database operations + +โš ๏ธ **Limited:** +- Audio transcription (disabled) +- Memory extraction (disabled) +- Chat features (disabled) + +### With API Keys (Full Features) + +โœ… **All features enabled:** +- Real-time audio transcription +- Automatic memory extraction +- Semantic memory search +- Chat interface with context +- Action item detection + +## Docker Compose Architecture + +Chronicle uses **two separate compose files** for infrastructure and application: + +### Infrastructure Layer (`docker-compose.infra.yml`) +Persistent database services in separate project (`infra`): +- **MongoDB** (`mongo` container) - User data and conversations +- **Redis** (`redis` container) - Job queues and caching +- **Qdrant** (`qdrant` container) - Vector storage for memories +- **Neo4j** (optional, `--profile neo4j`) - Graph database +- **Caddy** (optional, `--profile caddy`) - Reverse proxy + +### Application Layer (`docker-compose.yml`) +Application services in separate project (`chronicle`): +- **Backend API** - FastAPI server +- **Workers** - Background job processors +- **Web UI** - React dashboard + +### How They Work Together +- Infrastructure runs in its own project with named containers (`mongo`, `redis`, `qdrant`) +- Application runs in a separate project (`chronicle`) +- Both share the `chronicle-network` bridge network +- Application services reference infrastructure by container name +- You can stop/restart the app without affecting databases + +## Common Commands + +### Standard Docker Workflow + +```bash +# First time: Start infrastructure then application +docker compose -f docker-compose.infra.yml up -d +docker compose up -d + +# Daily development: Start application only (requires infra running) +docker compose up -d + +# Rebuild after code changes +docker compose build +docker compose up -d + +# Restart application (super fast!) +docker compose restart + +# Stop application only (keeps infrastructure running) +docker compose down + +# Stop everything (infrastructure + application) +docker compose down +docker compose -f docker-compose.infra.yml down + +# View application logs +docker compose logs -f +``` + +### Using Make Targets (Recommended) + +The Makefile simplifies the workflow: + +```bash +# Start Chronicle (auto-starts infrastructure if needed) +make up + +# Stop application only (keeps infrastructure) +make down + +# Stop everything (infrastructure + application) +make down-all + +# Rebuild application images +make build + +# Restart application only (fast!) +make restart + +# Restart everything +make restart-all + +# View application logs +make logs + +# View all logs (infrastructure + application) +make logs-all +``` + +### Infrastructure Control + +```bash +# Start infrastructure only +make infra-start +# or +docker compose --profile infra up -d mongo redis qdrant + +# Stop infrastructure (keeps data volumes) +make infra-stop +# or +docker compose stop mongo redis qdrant + +# Remove infrastructure and all data +make infra-clean +# or +docker compose --profile infra down -v +``` + +## Configuration Files + +### `.env.default` +Auto-generated by `quick-start.sh`, contains: +- Admin credentials +- Database connection strings +- Port configuration +- Feature flags +- API keys (added later via UI) + +**โš ๏ธ Security Note**: This file contains sensitive credentials and is git-ignored. Never commit it to version control. + +### `config-defaults.yml` +Sensible defaults for all services. Can be customized for advanced configuration. + +## Resetting Configuration + +To start over with fresh configuration: + +```bash +./quick-start.sh --reset +``` + +This will: +1. Prompt for new admin credentials +2. Generate new authentication secrets +3. Restart all services with fresh configuration + +## Troubleshooting + +### Services Won't Start + +**Check Docker is running:** +```bash +docker ps +``` + +**Check port availability:** +```bash +# On Linux/Mac +lsof -i :4000 +lsof -i :9000 + +# On Windows (PowerShell) +netstat -ano | findstr :4000 +``` + +**View service logs:** +```bash +# All services +docker compose logs -f + +# Specific service +docker compose logs -f friend-backend +``` + +### Network Warnings + +If you see: `WARN[0000] a network with name chronicle-network exists...` + +**Fix:** +```bash +docker network rm chronicle-network +./quick-start.sh +``` + +The script will recreate the network correctly. + +### Application Not Responding + +**Check backend health:** +```bash +curl http://localhost:8000/health +``` + +**Restart application:** +```bash +make restart +# or +docker compose restart +``` + +### Database Connection Issues + +**Check infrastructure is running:** +```bash +docker compose -f docker-compose.infra.yml ps +``` + +**Restart infrastructure:** +```bash +make infra-stop +make infra-start +``` + +### "Permission Denied" Errors + +**On Linux, try:** +```bash +sudo ./quick-start.sh +``` + +**For Docker permission issues:** +```bash +# Add your user to docker group +sudo usermod -aG docker $USER + +# Log out and back in, then try again +``` + +## Upgrading + +To upgrade Chronicle to the latest version: + +```bash +# Pull latest code +git pull origin main + +# Rebuild application +make build + +# Restart +make restart +``` + +**Note**: Infrastructure services (MongoDB, Redis, Qdrant) don't need rebuilding - your data persists across upgrades. + +## Advanced Configuration + +### Custom Ports + +Edit `.env.default` to change ports: + +```bash +BACKEND_PORT=9000 # Backend API +WEBUI_PORT=4000 # Web dashboard +``` + +Then restart: +```bash +make restart +``` + +### Enable Neo4j Graph Database + +```bash +docker compose -f docker-compose.infra.yml --profile neo4j up -d +``` + +Access Neo4j browser at: **http://localhost:7474** +- Username: `neo4j` +- Password: `password` (or value of `NEO4J_PASSWORD` in `.env.default`) + +### Production Deployment + +For production, consider: + +1. **Use strong passwords**: Edit `.env.default` with secure credentials +2. **Enable HTTPS**: Configure Caddy for SSL/TLS +3. **Set feature flags**: + ```bash + ALLOW_MISSING_API_KEYS=false + LLM_REQUIRED=true + TRANSCRIPTION_REQUIRED=true + ``` +4. **Backup data volumes**: Regular backups of MongoDB, Qdrant, and Redis data + +## API Keys Reference + +### OpenAI +- **Purpose**: LLM for memory extraction and chat +- **Get key**: [platform.openai.com/api-keys](https://platform.openai.com/api-keys) +- **Cost**: ~$1-5/month typical usage with gpt-4o-mini +- **Format**: `sk-...` + +### Deepgram +- **Purpose**: Speech-to-text transcription +- **Get key**: [console.deepgram.com](https://console.deepgram.com/) +- **Free tier**: $200 credits +- **Format**: Alphanumeric string + +### Mistral +- **Purpose**: Alternative transcription (Voxtral models) +- **Get key**: [console.mistral.ai](https://console.mistral.ai/) +- **Format**: Alphanumeric string + +## Next Steps + +After quick-start setup: + +1. **Explore the Dashboard** + - View system health on **System** page + - Check **Conversations** for audio processing + - Browse **Memories** for extracted information + +2. **Test Audio Processing** + - Use **Upload** page to process audio files + - Or use **Live Recording** for microphone capture + +3. **Connect Mobile App** + - See [quickstart.md](quickstart.md#step-5-install-chronicle-on-your-phone) for phone setup + - Configure backend URL in app settings + +4. **Add Advanced Features** + - Speaker recognition service + - Offline ASR with Parakeet + - Distributed deployment with Tailscale + +## Further Reading + +- **Full Documentation**: [CLAUDE.md](CLAUDE.md) +- **Complete Setup Guide**: [quickstart.md](quickstart.md) +- **Architecture Details**: [backends/advanced/Docs/README.md](backends/advanced/Docs/README.md) +- **Docker/Kubernetes**: [README-K8S.md](README-K8S.md) + +## Getting Help + +- **GitHub Issues**: [github.com/chronicle-ai/chronicle/issues](https://github.com/chronicle-ai/chronicle/issues) +- **Discussions**: [github.com/chronicle-ai/chronicle/discussions](https://github.com/chronicle-ai/chronicle/discussions) diff --git a/backends/advanced/compose/backend.yml b/backends/advanced/compose/backend.yml index 0ccc178f..f4ea2abb 100644 --- a/backends/advanced/compose/backend.yml +++ b/backends/advanced/compose/backend.yml @@ -15,7 +15,7 @@ services: - ../data/audio_chunks:/app/audio_chunks - ../data/debug_dir:/app/debug_dir - ../data:/app/data - - ../config.yaml:/app/config.yaml + - ../config:/app/config environment: # Service URLs (Docker internal network) - REDIS_URL=redis://redis:6379/${REDIS_DATABASE:-0} diff --git a/backends/advanced/compose/overrides/dev.yml b/backends/advanced/compose/overrides/dev.yml new file mode 100644 index 00000000..deb405df --- /dev/null +++ b/backends/advanced/compose/overrides/dev.yml @@ -0,0 +1,4 @@ +# Development environment overrides +# This file is included by default in docker-compose.yml + +services: {} diff --git a/backends/advanced/config/config.defaults.yaml b/backends/advanced/config/config.defaults.yaml new file mode 100644 index 00000000..e5dbe937 --- /dev/null +++ b/backends/advanced/config/config.defaults.yaml @@ -0,0 +1,91 @@ +# Chronicle Default Configuration +# This file contains safe default values and serves as a template. +# Copy to config.yaml and customize for your deployment. +# +# DO NOT store secrets in this file - it is committed to git. +# Actual secrets should go in config.yaml (which is gitignored). + +version: 1.0.0 +wizard_completed: false + +# Authentication Configuration +auth: + secret_key: '' # Auto-generated on first run if empty + admin_name: admin + admin_email: admin@example.com + admin_password_hash: '' # Set via wizard or environment variable + +# Speech Detection Settings +speech_detection: + min_words: 5 + min_confidence: 0.5 + min_duration: 10.0 + +# Conversation Management Settings +conversation: + transcription_buffer_seconds: 120.0 + speech_inactivity_threshold: 60.0 + new_conversation_timeout_minutes: 1.5 + record_only_enrolled_speakers: true + +# Audio Processing Settings +audio_processing: + audio_cropping_enabled: true + min_speech_segment_duration: 1.0 + cropping_context_padding: 0.1 + +# Speaker Diarization Settings +diarization: + diarization_source: pyannote + similarity_threshold: 0.15 + min_duration: 0.5 + collar: 2.0 + min_duration_off: 1.5 + min_speakers: 2 + max_speakers: 6 + +# LLM Configuration +llm: + llm_provider: openai + openai_model: gpt-4o-mini + chat_llm_model: null + chat_temperature: 0.7 + ollama_model: llama3.1:latest + ollama_embedder_model: nomic-embed-text:latest + +# Service Providers +providers: + memory_provider: chronicle + transcription_provider: auto + +# Network Configuration +network: + host_ip: localhost + backend_public_port: 8000 + webui_port: 5173 + cors_origins: http://localhost:5173,http://localhost:3000,http://127.0.0.1:5173,http://127.0.0.1:3000 + +# Infrastructure Services +infrastructure: + mongodb_uri: mongodb://mongo:27017 + mongodb_database: chronicle + redis_url: redis://redis:6379/0 + qdrant_base_url: qdrant + qdrant_port: '6333' + neo4j_host: neo4j-mem0 + neo4j_user: neo4j + +# Miscellaneous Settings +misc: + debug_dir: ./data/debug_dir + langfuse_enable_telemetry: false + +# API Keys (leave empty - set via wizard or config.yaml) +api_keys: + openai_api_key: null + deepgram_api_key: null + mistral_api_key: null + hf_token: null + langfuse_public_key: null + langfuse_secret_key: null + ngrok_authtoken: null diff --git a/backends/advanced/pyproject.toml b/backends/advanced/pyproject.toml index 5af2ec2e..630c2df2 100644 --- a/backends/advanced/pyproject.toml +++ b/backends/advanced/pyproject.toml @@ -18,7 +18,7 @@ dependencies = [ "wyoming>=1.6.1", "aiohttp>=3.8.0", "fastapi-users[beanie]>=14.0.1", - "PyYAML>=6.0.1", + "ruamel.yaml>=0.18.0", "langfuse>=3.3.0", "spacy>=3.8.2", "en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.8.0/en_core_web_sm-3.8.0-py3-none-any.whl", diff --git a/backends/advanced/src/advanced_omi_backend/app_factory.py b/backends/advanced/src/advanced_omi_backend/app_factory.py index fdde55de..a8ba4cc9 100644 --- a/backends/advanced/src/advanced_omi_backend/app_factory.py +++ b/backends/advanced/src/advanced_omi_backend/app_factory.py @@ -15,6 +15,13 @@ from fastapi.staticfiles import StaticFiles from advanced_omi_backend.app_config import get_app_config +from advanced_omi_backend.config import ( + ChronicleConfig, + get_config_parser, + init_config_parser, +) +from advanced_omi_backend.config.settings_adapter import ConfigBasedSettingsManager +import advanced_omi_backend.settings_manager as settings_manager_module from advanced_omi_backend.auth import ( bearer_backend, cookie_backend, @@ -67,15 +74,31 @@ async def lifespan(app: FastAPI): application_logger.error(f"Failed to initialize Beanie: {e}") raise - # Initialize settings manager + # Initialize config parser (new config.yaml system) + try: + config_parser = init_config_parser("config/config.yaml") + + # Load config (auto-copies from config/config.defaults.yaml if needed) + chronicle_config = await config_parser.load() + application_logger.info(f"โœ… Configuration loaded (wizard_completed={chronicle_config.wizard_completed})") + + except Exception as e: + application_logger.error(f"Failed to initialize config parser: {e}") + raise + + # Initialize settings manager (for backward compatibility with settings_routes) try: - from advanced_omi_backend.settings_manager import init_settings_manager - settings_mgr = init_settings_manager(config.db) + config_parser = get_config_parser() + settings_mgr = ConfigBasedSettingsManager(config_parser) await settings_mgr.initialize() - application_logger.info("โœ… Settings manager initialized and loaded from environment/database") + + # Register as global settings manager + settings_manager_module._settings_manager = settings_mgr + + application_logger.info("โœ… Settings manager initialized (using config.yaml)") except Exception as e: application_logger.error(f"Failed to initialize settings manager: {e}") - # Don't raise - use fallback to environment variables if settings manager fails + raise # Create admin user if needed try: diff --git a/backends/advanced/src/advanced_omi_backend/config/config_parser.py b/backends/advanced/src/advanced_omi_backend/config/config_parser.py index 51a0e518..30fb59c1 100644 --- a/backends/advanced/src/advanced_omi_backend/config/config_parser.py +++ b/backends/advanced/src/advanced_omi_backend/config/config_parser.py @@ -2,6 +2,7 @@ Config Parser - Simple YAML-based configuration management. """ +import shutil from pathlib import Path from typing import Optional from ruamel.yaml import YAML @@ -14,13 +15,22 @@ class ConfigParser: """Simple configuration parser for config.yaml using ruamel.yaml.""" - def __init__(self, config_path: str = "config.yaml"): + def __init__(self, config_path: str = "config/config.yaml", defaults_path: str = None): self.config_path = Path(config_path) + # Auto-determine defaults path based on config path + if defaults_path is None: + defaults_path = str(self.config_path.parent / "config.defaults.yaml") + self.defaults_path = Path(defaults_path) async def load(self) -> ChronicleConfig: """Load and validate configuration from YAML file.""" if not self.config_path.exists(): - return ChronicleConfig() + # Try to copy from defaults + if self.defaults_path.exists(): + shutil.copy(self.defaults_path, self.config_path) + else: + # Fallback to empty config + return ChronicleConfig() with open(self.config_path) as f: data = yaml.load(f) or {} diff --git a/backends/advanced/src/advanced_omi_backend/controllers/system_controller.py b/backends/advanced/src/advanced_omi_backend/controllers/system_controller.py index 02888259..ea6d31d0 100644 --- a/backends/advanced/src/advanced_omi_backend/controllers/system_controller.py +++ b/backends/advanced/src/advanced_omi_backend/controllers/system_controller.py @@ -567,13 +567,21 @@ async def set_memory_provider(provider: str): async def get_api_key_status(): """Get current API key configuration status.""" try: - from advanced_omi_backend.utils.env_writer import get_env_writer + from advanced_omi_backend.config import get_config_parser - env_writer = get_env_writer() - status = env_writer.get_configuration_status() + config_parser = get_config_parser() + config = await config_parser.load() + + # Check which API keys are configured + api_keys = config.api_keys return { - **status, + "openai_configured": bool(api_keys.openai_api_key), + "deepgram_configured": bool(api_keys.deepgram_api_key), + "mistral_configured": bool(api_keys.mistral_api_key), + "huggingface_configured": bool(api_keys.hf_token), + "langfuse_configured": bool(api_keys.langfuse_public_key and api_keys.langfuse_secret_key), + "ngrok_configured": bool(api_keys.ngrok_authtoken), "status": "success" } diff --git a/backends/advanced/src/advanced_omi_backend/config.py b/backends/advanced/src/advanced_omi_backend/legacy_config.py similarity index 100% rename from backends/advanced/src/advanced_omi_backend/config.py rename to backends/advanced/src/advanced_omi_backend/legacy_config.py From 85a3d739d97cd61fecddd63acb0da43dd29a68b9 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 12:34:32 +0000 Subject: [PATCH 20/25] Add dev branch marker --- .env.api-keys.template | 75 ++++++++++++++++++++++++++++++++++++++++++ DEV_README.md | 3 ++ 2 files changed, 78 insertions(+) create mode 100644 .env.api-keys.template create mode 100644 DEV_README.md diff --git a/.env.api-keys.template b/.env.api-keys.template new file mode 100644 index 00000000..c0b04700 --- /dev/null +++ b/.env.api-keys.template @@ -0,0 +1,75 @@ +# ======================================== +# Friend-Lite API Keys Template +# ======================================== +# Copy this file to .env.api-keys and fill in your actual values +# .env.api-keys is gitignored and should NEVER be committed +# +# Usage: cp .env.api-keys.template .env.api-keys +# +# IMPORTANT: This file contains API KEYS for external services +# These might be shared across environments or different per environment +# For environment-specific credentials, see .env.secrets.template +# ======================================== + +# ======================================== +# LLM API KEYS +# ======================================== + +# OpenAI API key +# Get from: https://platform.openai.com/api-keys +OPENAI_API_KEY=sk-your-openai-key-here + +# Mistral API key (optional - only if using Mistral transcription) +# Get from: https://console.mistral.ai/ +MISTRAL_API_KEY= + +# Groq API key (optional - only if using Groq as LLM provider) +# Get from: https://console.groq.com/ +GROQ_API_KEY= + +# Ollama (no API key needed - local/self-hosted) +# OLLAMA_BASE_URL is in .env (not secret) + +# ======================================== +# SPEECH-TO-TEXT API KEYS +# ======================================== + +# Deepgram API key +# Get from: https://console.deepgram.com/ +DEEPGRAM_API_KEY=your-deepgram-key-here + +# ======================================== +# MODEL PROVIDERS +# ======================================== + +# Hugging Face token for speaker recognition models +# Get from: https://huggingface.co/settings/tokens +HF_TOKEN=hf_your_huggingface_token_here + +# OpenAI compatible endpoints (optional) +# OPENAI_API_BASE= + +# ======================================== +# MEMORY PROVIDERS (OPTIONAL) +# ======================================== + +# Mem0 API key (if using hosted Mem0) +# MEM0_API_KEY= + +# OpenMemory MCP (no API key - self-hosted) +# Configuration is in .env (not secret) + +# ======================================== +# NOTES +# ======================================== +# +# Sharing API Keys Across Environments: +# - Development: Use separate API keys with lower rate limits +# - Staging: Can share with development or use production keys +# - Production: Always use dedicated production API keys +# +# Security Best Practices: +# - Rotate API keys regularly +# - Use API key restrictions where available (IP restrictions, etc.) +# - Monitor API usage for unusual activity +# - Never commit API keys to version control diff --git a/DEV_README.md b/DEV_README.md new file mode 100644 index 00000000..c5d25ebe --- /dev/null +++ b/DEV_README.md @@ -0,0 +1,3 @@ +# Development Branch + +This is ushadow/dev - integration point for all worktrees. From f3f46da87dd7023bed021e643ba5bcb17357fa9d Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 12:37:30 +0000 Subject: [PATCH 21/25] Test: Update dev --- DEV_README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DEV_README.md b/DEV_README.md index c5d25ebe..962c4f8d 100644 --- a/DEV_README.md +++ b/DEV_README.md @@ -1,3 +1,4 @@ # Development Branch This is ushadow/dev - integration point for all worktrees. +test update From f792edc56ed51b0519814ab043617b39efcb6f17 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 15:03:33 +0000 Subject: [PATCH 22/25] added offset to create multiple env --- .env.default | 80 +++++++++ .gitignore | 2 +- Makefile | 61 ++++--- backends/advanced/compose/backend.yml | 6 +- backends/advanced/compose/frontend.yml | 3 +- backends/advanced/docker-compose-test.yml | 193 +++++----------------- quick-start.sh | 150 +++++++++++------ 7 files changed, 258 insertions(+), 237 deletions(-) create mode 100644 .env.default diff --git a/.env.default b/.env.default new file mode 100644 index 00000000..05df89ef --- /dev/null +++ b/.env.default @@ -0,0 +1,80 @@ +# Chronicle Default Configuration +# This file is committed to the repository and provides base defaults. +# To customize for your environment, run ./quick-start.sh which will generate .env + +# ========================================== +# DOCKER COMPOSE PROJECT NAME +# ========================================== +COMPOSE_PROJECT_NAME=chronicle + +# ========================================== +# AUTHENTICATION & SECURITY +# ========================================== +# Run ./quick-start.sh to generate secure credentials +AUTH_SECRET_KEY= +ADMIN_NAME=admin +ADMIN_EMAIL=admin@example.com +ADMIN_PASSWORD= + +# ========================================== +# GRACEFUL DEGRADATION SETTINGS +# ========================================== +ALLOW_MISSING_API_KEYS=true +LLM_REQUIRED=false +TRANSCRIPTION_REQUIRED=false + +# ========================================== +# DATABASE CONFIGURATION (Shared Infrastructure) +# ========================================== +# All worktrees share the same MongoDB/Redis/Qdrant instances +# Isolation is achieved via database names (MongoDB) and database numbers (Redis) +MONGODB_URI=mongodb://mongo:27017 +MONGODB_DATABASE=chronicle +REDIS_URL=redis://redis:6379/0 +REDIS_DATABASE=0 +QDRANT_BASE_URL=qdrant +QDRANT_PORT=6333 + +# ========================================== +# NETWORK CONFIGURATION (Application Ports Only) +# ========================================== +# Port offset for running multiple worktree instances (default: 0) +# Each worktree should use a different offset: blue=0, gold=10, green=20, red=30 +PORT_OFFSET=0 +BACKEND_PORT=8000 +WEBUI_PORT=3000 +HOST_IP=localhost + +# CORS origins with port offset support +# For multi-worktree setups, this uses BACKEND_PORT and WEBUI_PORT variables +CORS_ORIGINS=http://localhost:${WEBUI_PORT:-3000},http://127.0.0.1:${WEBUI_PORT:-3000},http://localhost:${BACKEND_PORT:-8000},http://127.0.0.1:${BACKEND_PORT:-8000} +VITE_BACKEND_URL=http://localhost:${BACKEND_PORT:-8000} + +# ========================================== +# ENVIRONMENT IDENTIFICATION +# ========================================== +ENV_NAME=default + +# ========================================== +# TEST ENVIRONMENT PORTS (for parallel testing) +# ========================================== +# Tests use SHARED infrastructure (MongoDB:27017, Redis:6379, Qdrant:6333) +# Only backend/webui ports are offset to support parallel testing across worktrees +TEST_BACKEND_PORT=8001 +TEST_WEBUI_PORT=3001 + +# ========================================== +# SERVICE CONFIGURATION +# ========================================== +LLM_PROVIDER=openai +OPENAI_MODEL=gpt-4o-mini +MEMORY_PROVIDER=chronicle +TRANSCRIPTION_PROVIDER=deepgram + +# ========================================== +# API KEYS +# ========================================== +# Add your API keys here or run ./quick-start.sh +# OPENAI_API_KEY= +# DEEPGRAM_API_KEY= +# MISTRAL_API_KEY= diff --git a/.gitignore b/.gitignore index 6f3a2882..a53ddfb5 100644 --- a/.gitignore +++ b/.gitignore @@ -2,10 +2,10 @@ *.wav **/*.env !**/.env.template +backends/advanced/.env # Generated by quick-start.sh .env.secrets .env.api-keys .env.quick-start -.env.default .env.backup.* config.env.backup.* backends/advanced/config/config.yaml diff --git a/Makefile b/Makefile index be709d70..94fe5f09 100644 --- a/Makefile +++ b/Makefile @@ -235,46 +235,58 @@ up: ## ๐Ÿš€ Start Chronicle (infrastructure + application) else \ if ! docker ps --filter "name=^mongo$$" --filter "status=running" -q | grep -q .; then \ echo "๐Ÿ—๏ธ Infrastructure not running, starting it first..."; \ - docker compose -f docker-compose.infra.yml up -d; \ + docker compose -f compose/infrastructure-shared.yml up -d; \ sleep 3; \ fi; \ - docker compose --env-file .env.default up -d; \ + cd backends/advanced && docker compose up -d; \ echo "โœ… Chronicle started"; \ + echo ""; \ + WEBUI_PORT=$$(grep '^WEBUI_PORT=' backends/advanced/.env 2>/dev/null | cut -d= -f2 || echo "3000"); \ + echo "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—"; \ + echo "โ•‘ โ•‘"; \ + echo "โ•‘ ๐Ÿš€ Open Chronicle WebUI: โ•‘"; \ + echo "โ•‘ โ•‘"; \ + echo "โ•‘ http://localhost:$$WEBUI_PORT โ•‘"; \ + echo "โ•‘ โ•‘"; \ + echo "โ•‘ (Click the link above or copy to browser) โ•‘"; \ + echo "โ•‘ โ•‘"; \ + echo "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•"; \ + echo ""; \ fi down: ## ๐Ÿ›‘ Stop Chronicle application only (keeps infrastructure running) @echo "๐Ÿ›‘ Stopping Chronicle application..." - @docker compose down + @cd backends/advanced && docker compose down @echo "โœ… Application stopped (infrastructure still running)" @echo "๐Ÿ’ก To stop everything: make down-all" down-all: ## ๐Ÿ›‘ Stop everything (infrastructure + application) @echo "๐Ÿ›‘ Stopping all services..." - @docker compose down - @docker compose -f docker-compose.infra.yml down + @cd backends/advanced && docker compose down + @docker compose -f compose/infrastructure-shared.yml down @echo "โœ… All services stopped" build: ## ๐Ÿ”จ Rebuild Chronicle application images @echo "๐Ÿ”จ Building Chronicle..." - @docker compose build + @cd backends/advanced && docker compose build restart: ## ๐Ÿ”„ Restart Chronicle application only @echo "๐Ÿ”„ Restarting Chronicle application..." - @docker compose restart + @cd backends/advanced && docker compose restart @echo "โœ… Application restarted" restart-all: ## ๐Ÿ”„ Restart everything (infrastructure + application) @echo "๐Ÿ”„ Restarting all services..." - @docker compose restart - @docker compose -f docker-compose.infra.yml restart + @cd backends/advanced && docker compose restart + @docker compose -f compose/infrastructure-shared.yml restart @echo "โœ… All services restarted" logs: ## ๐Ÿ“‹ View Chronicle application logs - @docker compose logs -f + @cd backends/advanced && docker compose logs -f logs-all: ## ๐Ÿ“‹ View all logs (infrastructure + application) - @docker compose logs -f & - @docker compose -f docker-compose.infra.yml logs -f + @cd backends/advanced && docker compose logs -f & + @docker compose -f compose/infrastructure-shared.yml logs -f quick-start: ## ๐Ÿš€ Start Chronicle with zero configuration (interactive setup) @./quick-start.sh @@ -284,38 +296,23 @@ quick-start-reset: ## ๐Ÿ”„ Reset and regenerate quick-start configuration quick-start-stop: ## ๐Ÿ›‘ Stop quick-start environment @echo "๐Ÿ›‘ Stopping application..." - @docker compose down + @cd backends/advanced && docker compose down @echo "โœ… Application stopped (data preserved)" quick-start-clean: ## ๐Ÿ—‘๏ธ Stop application and remove all data volumes @echo "๐Ÿ—‘๏ธ Stopping application and removing data..." - @docker compose down -v - @docker compose -f docker-compose.infra.yml down -v + @cd backends/advanced && docker compose down -v + @docker compose -f compose/infrastructure-shared.yml down -v @echo "โœ… Environment cleaned" quick-start-logs: ## ๐Ÿ“‹ View quick-start logs - @docker compose logs -f + @cd backends/advanced && docker compose logs -f quick-start-rebuild: ## ๐Ÿ”จ Rebuild and restart application (keeps infrastructure running) @echo "๐Ÿ”จ Rebuilding application..." - @docker compose up -d --build + @cd backends/advanced && docker compose up -d --build @echo "โœ… Application rebuilt and restarted" -infra-start: ## ๐Ÿ—๏ธ Start infrastructure only (MongoDB, Redis, Qdrant) - @echo "๐Ÿ—๏ธ Starting infrastructure..." - @docker compose -f docker-compose.infra.yml up -d - @echo "โœ… Infrastructure started" - -infra-stop: ## ๐Ÿ›‘ Stop infrastructure (keeps data) - @echo "๐Ÿ›‘ Stopping infrastructure..." - @docker compose -f docker-compose.infra.yml down - @echo "โœ… Infrastructure stopped (data preserved)" - -infra-clean: ## ๐Ÿ—‘๏ธ Stop infrastructure and remove all data - @echo "๐Ÿ—‘๏ธ Stopping infrastructure and removing data..." - @docker compose -f docker-compose.infra.yml down -v - @echo "โœ… Infrastructure cleaned" - # ======================================== # INTERACTIVE SETUP WIZARD # ======================================== diff --git a/backends/advanced/compose/backend.yml b/backends/advanced/compose/backend.yml index f4ea2abb..10d8ae0a 100644 --- a/backends/advanced/compose/backend.yml +++ b/backends/advanced/compose/backend.yml @@ -7,7 +7,8 @@ services: context: .. dockerfile: Dockerfile env_file: - - ../../../.env.default + - ../../../.env.default # Committed defaults + - ../.env # Generated overrides (if exists) ports: - "${BACKEND_PORT:-8000}:8000" volumes: @@ -40,7 +41,8 @@ services: dockerfile: Dockerfile command: ["./start-workers.sh"] env_file: - - ../../../.env.default + - ../../../.env.default # Committed defaults + - ../.env # Generated overrides (if exists) volumes: - ../src:/app/src - ../start-workers.sh:/app/start-workers.sh diff --git a/backends/advanced/compose/frontend.yml b/backends/advanced/compose/frontend.yml index 5da122d5..3c817f69 100644 --- a/backends/advanced/compose/frontend.yml +++ b/backends/advanced/compose/frontend.yml @@ -10,7 +10,8 @@ services: - VITE_BASE_PATH=${VITE_BASE_PATH:-/} - VITE_BACKEND_URL=${VITE_BACKEND_URL:-http://localhost:8000} env_file: - - ../../../.env.default + - ../../../.env.default # Committed defaults + - ../.env # Generated overrides (if exists) ports: - "${WEBUI_PORT:-3000}:80" depends_on: diff --git a/backends/advanced/docker-compose-test.yml b/backends/advanced/docker-compose-test.yml index f72ca54d..130655fd 100644 --- a/backends/advanced/docker-compose-test.yml +++ b/backends/advanced/docker-compose-test.yml @@ -1,6 +1,8 @@ # docker-compose-test.yml -# Isolated test environment for integration tests -# Uses different ports to avoid conflicts with development environment +# Test environment for integration tests +# Uses shared infrastructure (MongoDB:27017, Redis:6379, Qdrant:6333) with _test database names +# Only backend/webui use offset ports for parallel testing across worktrees +# Example: BLUE uses 8001/3001, GOLD uses 8011/3011 services: chronicle-backend-test: @@ -8,18 +10,19 @@ services: context: . dockerfile: Dockerfile ports: - - "8001:8000" # Avoid conflict with dev on 8000 + - "${TEST_BACKEND_PORT:-8001}:8000" # Default 8001, or 8000 + PORT_OFFSET + 1 volumes: - ./src:/app/src # Mount source code for easier development - ./data/test_audio_chunks:/app/audio_chunks - ./data/test_debug_dir:/app/debug_dir - ./data/test_data:/app/data environment: - # Override with test-specific settings - - MONGODB_URI=mongodb://mongo-test:27017/test_db - - QDRANT_BASE_URL=qdrant-test + # Use shared infrastructure with _test database suffix for isolation + - MONGODB_URI=mongodb://mongo:27017 + - MONGODB_DATABASE=${MONGODB_DATABASE:-chronicle}_test + - QDRANT_BASE_URL=qdrant - QDRANT_PORT=6333 - - REDIS_URL=redis://redis-test:6379/0 + - REDIS_URL=redis://redis:6379/${REDIS_DATABASE:-0} - DEBUG_DIR=/app/debug_dir # Import API keys from environment - DEEPGRAM_API_KEY=${DEEPGRAM_API_KEY} @@ -34,28 +37,20 @@ services: - ADMIN_EMAIL=test-admin@example.com # Transcription provider configuration - TRANSCRIPTION_PROVIDER=${TRANSCRIPTION_PROVIDER:-deepgram} - # - PARAKEET_ASR_URL=${PARAKEET_ASR_URL} # Memory provider configuration - MEMORY_PROVIDER=${MEMORY_PROVIDER:-chronicle} - OPENMEMORY_MCP_URL=${OPENMEMORY_MCP_URL:-http://host.docker.internal:8765} - OPENMEMORY_USER_ID=${OPENMEMORY_USER_ID:-openmemory} - - MYCELIA_URL=http://mycelia-backend-test:5173 - - MYCELIA_DB=mycelia_test + - MYCELIA_URL=http://mycelia-backend:5173 + - MYCELIA_DB=mycelia_${MONGODB_DATABASE:-chronicle}_test # Disable speaker recognition in test environment to prevent segment duplication - DISABLE_SPEAKER_RECOGNITION=false - SPEAKER_SERVICE_URL=https://localhost:8085 - - CORS_ORIGINS=http://localhost:3001,http://localhost:8001,https://localhost:3001,https://localhost:8001 + - CORS_ORIGINS=http://localhost:${TEST_WEBUI_PORT:-3001},http://localhost:${TEST_BACKEND_PORT:-8001},https://localhost:${TEST_WEBUI_PORT:-3001},https://localhost:${TEST_BACKEND_PORT:-8001} # Set low inactivity timeout for tests (2 seconds instead of 60) - SPEECH_INACTIVITY_THRESHOLD_SECONDS=2 # Wait for audio queue to drain before timing out (test mode) - WAIT_FOR_AUDIO_QUEUE_DRAIN=true - depends_on: - qdrant-test: - condition: service_started - mongo-test: - condition: service_healthy - redis-test: - condition: service_started healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/readiness"] interval: 10s @@ -63,63 +58,25 @@ services: retries: 5 start_period: 30s restart: unless-stopped + networks: + - chronicle-network webui-test: build: context: ./webui dockerfile: Dockerfile args: - - VITE_BACKEND_URL=http://localhost:8001 - - BACKEND_URL=http://localhost:8001 + - VITE_BACKEND_URL=http://localhost:${TEST_BACKEND_PORT:-8001} + - BACKEND_URL=http://localhost:${TEST_BACKEND_PORT:-8001} volumes: - ./webui/src:/app/src # Mount source code for easier development ports: - - "3001:80" # Avoid conflict with dev on 3000 + - "${TEST_WEBUI_PORT:-3001}:80" # Default 3001, or 3000 + PORT_OFFSET + 1 depends_on: chronicle-backend-test: condition: service_healthy - mongo-test: - condition: service_healthy - qdrant-test: - condition: service_started - redis-test: - condition: service_started - - qdrant-test: - image: qdrant/qdrant:latest - ports: - - "6337:6333" # gRPC - avoid conflict with dev 6333 - - "6338:6334" # HTTP - avoid conflict with dev 6334 - volumes: - - ./data/test_qdrant_data:/qdrant/storage - - mongo-test: - image: mongo:8.0.14 - ports: - - "27018:27017" # Avoid conflict with dev on 27017 - volumes: - - ./data/test_mongo_data:/data/db - # Use test database name to ensure isolation - command: mongod --dbpath /data/db --bind_ip_all - healthcheck: - test: ["CMD", "mongosh", "--eval", "db.runCommand('ping').ok", "--quiet"] - interval: 5s - timeout: 5s - retries: 10 - start_period: 10s - - redis-test: - image: redis:7-alpine - ports: - - "6380:6379" # Avoid conflict with dev on 6379 - volumes: - - ./data/test_redis_data:/data - command: redis-server --appendonly yes - healthcheck: - test: ["CMD", "redis-cli", "ping"] - interval: 5s - timeout: 3s - retries: 5 + networks: + - chronicle-network workers-test: build: @@ -132,11 +89,12 @@ services: - ./data/test_debug_dir:/app/debug_dir - ./data/test_data:/app/data environment: - # Same environment as backend - - MONGODB_URI=mongodb://mongo-test:27017/test_db - - QDRANT_BASE_URL=qdrant-test + # Use shared infrastructure with _test database suffix for isolation + - MONGODB_URI=mongodb://mongo:27017 + - MONGODB_DATABASE=${MONGODB_DATABASE:-chronicle}_test + - QDRANT_BASE_URL=qdrant - QDRANT_PORT=6333 - - REDIS_URL=redis://redis-test:6379/0 + - REDIS_URL=redis://redis:6379/${REDIS_DATABASE:-0} - DEBUG_DIR=/app/debug_dir - DEEPGRAM_API_KEY=${DEEPGRAM_API_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} @@ -149,8 +107,8 @@ services: - MEMORY_PROVIDER=${MEMORY_PROVIDER:-chronicle} - OPENMEMORY_MCP_URL=${OPENMEMORY_MCP_URL:-http://host.docker.internal:8765} - OPENMEMORY_USER_ID=${OPENMEMORY_USER_ID:-openmemory} - - MYCELIA_URL=http://mycelia-backend-test:5173 - - MYCELIA_DB=mycelia_test + - MYCELIA_URL=http://mycelia-backend:5173 + - MYCELIA_DB=mycelia_${MONGODB_DATABASE:-chronicle}_test - DISABLE_SPEAKER_RECOGNITION=false - SPEAKER_SERVICE_URL=https://localhost:8085 # Set low inactivity timeout for tests (2 seconds instead of 60) @@ -160,90 +118,19 @@ services: depends_on: chronicle-backend-test: condition: service_healthy - mongo-test: - condition: service_healthy - redis-test: - condition: service_started - qdrant-test: - condition: service_started restart: unless-stopped + networks: + - chronicle-network - # Mycelia - AI memory and timeline service (test environment) - # mycelia-backend-test: - # build: - # context: ../../extras/mycelia/backend - # dockerfile: Dockerfile.simple - # ports: - # - "5100:5173" # Test backend port - # environment: - # # Shared JWT secret for Chronicle authentication (test key) - # - JWT_SECRET=test-jwt-signing-key-for-integration-tests - # - SECRET_KEY=test-jwt-signing-key-for-integration-tests - # # MongoDB connection (test database) - # - MONGO_URL=mongodb://mongo-test:27017 - # - MONGO_DB=mycelia_test - # - DATABASE_NAME=mycelia_test - # # Redis connection (ioredis uses individual host/port, not URL) - # - REDIS_HOST=redis-test - # - REDIS_PORT=6379 - # volumes: - # - ../../extras/mycelia/backend/app:/app/app # Mount source for development - # depends_on: - # mongo-test: - # condition: service_healthy - # redis-test: - # condition: service_started - # healthcheck: - # test: ["CMD", "deno", "eval", "fetch('http://localhost:5173/health').then(r => r.ok ? Deno.exit(0) : Deno.exit(1))"] - # interval: 30s - # timeout: 10s - # retries: 3 - # start_period: 5s - # restart: unless-stopped - # profiles: - # - mycelia - - # mycelia-frontend-test: - # build: - # context: ../../extras/mycelia - # dockerfile: frontend/Dockerfile.simple - # args: - # - VITE_API_URL=http://localhost:5100 - # ports: - # - "3002:8080" # Nginx serves on 8080 internally - # environment: - # - VITE_API_URL=http://localhost:5100 - # volumes: - # - ../../extras/mycelia/frontend/src:/app/src # Mount source for development - # depends_on: - # mycelia-backend-test: - # condition: service_healthy - # restart: unless-stopped - # profiles: - # - mycelia - - # caddy: - # image: caddy:2-alpine - # ports: - # - "443:443" - # - "80:80" # HTTP redirect to HTTPS - # volumes: - # - ./Caddyfile-test:/etc/caddy/Caddyfile:ro - # - ./data/caddy_data:/data - # - ./data/caddy_config:/config - # depends_on: - # webui-test: - # condition: service_started - # chronicle-backend-test: - # condition: service_healthy - # restart: unless-stopped +# Shared network configuration +networks: + chronicle-network: + name: chronicle-network + external: true -# CI Considerations (for future implementation): -# - GitHub Actions can run these services in isolated containers -# - Port conflicts won't exist in CI since each job runs in isolation -# - For CI, we could add: -# - --build flag for fresh builds -# - --force-recreate for clean state -# - Volume cleanup between test runs -# - Environment variables can be injected via GitHub secrets -# - Health checks ensure services are ready before tests run \ No newline at end of file +# Notes: +# - Tests use shared infrastructure (MongoDB:27017, Redis:6379, Qdrant:6333) +# - Database isolation via database names: {worktree}_test (e.g., chronicle_blue_test) +# - Redis isolation via database number from REDIS_DATABASE variable +# - Only backend/webui ports are offset for parallel testing +# - To run tests: ./run-test.sh (uses variables from .env.default) diff --git a/quick-start.sh b/quick-start.sh index 42826ee6..31d29c9d 100755 --- a/quick-start.sh +++ b/quick-start.sh @@ -14,7 +14,7 @@ BOLD='\033[1m' NC='\033[0m' # No Color # Configuration -ENV_FILE=".env.default" +ENV_FILE="backends/advanced/.env" # Overrides .env.default in backends/advanced CONFIG_FILE="config-defaults.yml" # Parse arguments @@ -69,14 +69,76 @@ if [[ ! -f "$ENV_FILE" ]] || [[ "$RESET_CONFIG" == true ]]; then echo "" ADMIN_PASSWORD="${INPUT_ADMIN_PASSWORD:-password-123}" - # Create .env.quick-start + # Prompt for environment name (for multi-worktree setups) + echo "" + echo -e "${BOLD}Environment Name${NC}" + echo -e "${YELLOW}For multi-worktree setups, give each environment a unique name${NC}" + echo -e "${YELLOW}Examples: chronicle, blue, gold, green, dev, staging${NC}" + echo "" + + read -p "Environment name [chronicle]: " INPUT_ENV_NAME + ENV_NAME="${INPUT_ENV_NAME:-chronicle}" + + # Convert to lowercase and replace spaces/special chars with hyphens + ENV_NAME=$(echo "$ENV_NAME" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '-' | sed 's/-$//') + + # Prompt for port offset (for multi-worktree environments) + echo "" + echo -e "${BOLD}Port Configuration${NC}" + echo -e "${YELLOW}For multi-worktree setups, use different offsets for each environment${NC}" + echo -e "${YELLOW}Suggested: blue=0, gold=10, green=20, red=30${NC}" + echo "" + read -p "Port offset [0]: " INPUT_PORT_OFFSET + PORT_OFFSET="${INPUT_PORT_OFFSET:-0}" + + # Calculate application ports from offset (backend and frontend only) + BACKEND_PORT=$((8000 + PORT_OFFSET)) + WEBUI_PORT=$((3000 + PORT_OFFSET)) + + # Calculate Redis database number for isolation (shared Redis instance) + REDIS_DATABASE=$((PORT_OFFSET / 10)) + + # Calculate test environment ports (for parallel testing across worktrees) + # Tests use shared infrastructure (MongoDB, Redis, Qdrant) but need unique app ports + TEST_BACKEND_PORT=$((8001 + PORT_OFFSET)) + TEST_WEBUI_PORT=$((3001 + PORT_OFFSET)) + + # Set database and project names based on environment name + # Avoid chronicle-chronicle duplication + if [[ "$ENV_NAME" == "chronicle" ]]; then + MONGODB_DATABASE="chronicle" + COMPOSE_PROJECT_NAME="chronicle" + else + MONGODB_DATABASE="chronicle_${ENV_NAME}" + COMPOSE_PROJECT_NAME="chronicle-${ENV_NAME}" + fi + + echo "" + echo -e "${GREEN}โœ… Environment configured${NC}" + echo -e " Name: ${ENV_NAME}" + echo -e " Project: ${COMPOSE_PROJECT_NAME}" + echo -e " Backend: ${BACKEND_PORT}" + echo -e " WebUI: ${WEBUI_PORT}" + echo -e " Database: ${MONGODB_DATABASE}" + echo "" + + # Create minimal .env file with worktree-specific overrides cat > "$ENV_FILE" < - - + + + } /> @@ -96,8 +98,9 @@ function App() { } /> - - + + + diff --git a/backends/advanced/webui/src/hooks/useSimpleAudioRecording.ts b/backends/advanced/webui/src/hooks/useSimpleAudioRecording.ts index cb3e3eee..7b3f3075 100644 --- a/backends/advanced/webui/src/hooks/useSimpleAudioRecording.ts +++ b/backends/advanced/webui/src/hooks/useSimpleAudioRecording.ts @@ -1,9 +1,10 @@ import { useState, useRef, useCallback, useEffect } from 'react' import { BACKEND_URL } from '../services/api' import { getStorageKey } from '../utils/storage' +import { useRecording, RecordingMode, RecordingStep } from '../contexts/RecordingContext' -export type RecordingStep = 'idle' | 'mic' | 'websocket' | 'audio-start' | 'streaming' | 'stopping' | 'error' -export type RecordingMode = 'batch' | 'streaming' +// Re-export types for components that import from this hook +export type { RecordingMode, RecordingStep } export interface DebugStats { chunksSent: number @@ -37,12 +38,19 @@ export interface SimpleAudioRecordingReturn { } export const useSimpleAudioRecording = (): SimpleAudioRecordingReturn => { - // Basic state - const [currentStep, setCurrentStep] = useState('idle') - const [isRecording, setIsRecording] = useState(false) - const [recordingDuration, setRecordingDuration] = useState(0) - const [error, setError] = useState(null) - const [mode, setMode] = useState('streaming') + // Get recording state from global context + const recording = useRecording() + const { + currentStep, + isRecording, + duration: recordingDuration, + error, + mode, + setMode, + setCurrentStep, + setRecordingDuration, + setError + } = recording // Debug stats const [debugStats, setDebugStats] = useState({ @@ -397,14 +405,16 @@ export const useSimpleAudioRecording = (): SimpleAudioRecordingReturn => { setCurrentStep('streaming') // Step 4: Start audio streaming (includes processing delay) await startAudioStreaming(stream, ws) - - // All steps complete - mark as recording - setIsRecording(true) + + // All steps complete - mark as recording via context + // Note: startRecording is already called by context, we just update duration setRecordingDuration(0) - - // Start duration timer + + // Start duration timer (use functional update to avoid stale closures) + const startTime = Date.now() durationIntervalRef.current = setInterval(() => { - setRecordingDuration(prev => prev + 1) + const elapsed = Math.floor((Date.now() - startTime) / 1000) + setRecordingDuration(elapsed) }, 1000) console.log('๐ŸŽ‰ Recording started successfully!') @@ -449,21 +459,24 @@ export const useSimpleAudioRecording = (): SimpleAudioRecordingReturn => { // Cleanup resources cleanup() - - // Reset state - setIsRecording(false) + + // Reset state via context + recording.stopRecording() setRecordingDuration(0) - setCurrentStep('idle') - + console.log('โœ… Recording stopped') - }, [isRecording, cleanup]) + }, [isRecording, cleanup, recording]) - // Cleanup on unmount + // Cleanup on unmount - DO NOT cleanup if recording is active + // Recording should persist across page navigation useEffect(() => { return () => { - cleanup() + // Only cleanup if NOT currently recording + if (!isRecording) { + cleanup() + } } - }, [cleanup]) + }, [cleanup, isRecording]) return { currentStep, From d7ec81759358aab2a9c43e437271f664ada4d505 Mon Sep 17 00:00:00 2001 From: Stu Alexandere Date: Sat, 20 Dec 2025 19:52:28 +0000 Subject: [PATCH 24/25] fixed the record button and ui tweaks --- .../webui/src/contexts/RecordingContext.tsx | 132 ++++++++++++++++++ .../advanced/webui/src/pages/LoginPage.tsx | 9 +- backends/advanced/webui/src/pages/Queue.tsx | 26 ++-- 3 files changed, 146 insertions(+), 21 deletions(-) create mode 100644 backends/advanced/webui/src/contexts/RecordingContext.tsx diff --git a/backends/advanced/webui/src/contexts/RecordingContext.tsx b/backends/advanced/webui/src/contexts/RecordingContext.tsx new file mode 100644 index 00000000..ab3bc191 --- /dev/null +++ b/backends/advanced/webui/src/contexts/RecordingContext.tsx @@ -0,0 +1,132 @@ +import { createContext, useContext, useState, useEffect, ReactNode, useCallback } from 'react' +import { getStorageKey } from '../utils/storage' + +export type RecordingMode = 'streaming' | 'batch' +export type RecordingStep = 'idle' | 'mic' | 'websocket' | 'audio-start' | 'streaming' | 'stopping' | 'error' + +interface RecordingState { + isRecording: boolean + mode: RecordingMode + duration: number + currentStep: RecordingStep + error: string | null +} + +interface RecordingContextType extends RecordingState { + startRecording: (mode: RecordingMode) => void + stopRecording: () => void + setMode: (mode: RecordingMode) => void + setRecordingDuration: (duration: number) => void + setCurrentStep: (step: RecordingStep) => void + setError: (error: string | null) => void + resetRecording: () => void +} + +const STORAGE_KEY = getStorageKey('recording_state') + +const defaultState: RecordingState = { + isRecording: false, + mode: 'streaming', + duration: 0, + currentStep: 'idle', + error: null, +} + +const RecordingContext = createContext(undefined) + +export function RecordingProvider({ children }: { children: ReactNode }) { + // Initialize from localStorage + const [state, setState] = useState(() => { + const saved = localStorage.getItem(STORAGE_KEY) + if (saved) { + try { + return JSON.parse(saved) + } catch (e) { + console.error('Failed to parse recording state from localStorage:', e) + } + } + return defaultState + }) + + // Persist to localStorage whenever state changes + useEffect(() => { + localStorage.setItem(STORAGE_KEY, JSON.stringify(state)) + }, [state]) + + // Listen for storage events to sync across tabs + useEffect(() => { + const handleStorageChange = (e: StorageEvent) => { + if (e.key === STORAGE_KEY && e.newValue) { + try { + const newState = JSON.parse(e.newValue) + setState(newState) + } catch (error) { + console.error('Failed to parse recording state from storage event:', error) + } + } + } + + window.addEventListener('storage', handleStorageChange) + return () => window.removeEventListener('storage', handleStorageChange) + }, []) + + const startRecording = useCallback((mode: RecordingMode) => { + setState(prev => ({ + ...prev, + isRecording: true, + mode, + duration: 0, + currentStep: 'mic', + error: null, + })) + }, []) + + const stopRecording = useCallback(() => { + setState(prev => ({ + ...prev, + isRecording: false, + currentStep: 'stopping', + })) + }, []) + + const setMode = useCallback((mode: RecordingMode) => { + setState(prev => ({ ...prev, mode })) + }, []) + + const setRecordingDuration = useCallback((duration: number) => { + setState(prev => ({ ...prev, duration })) + }, []) + + const setCurrentStep = useCallback((currentStep: RecordingStep) => { + setState(prev => ({ ...prev, currentStep })) + }, []) + + const setError = useCallback((error: string | null) => { + setState(prev => ({ ...prev, error })) + }, []) + + const resetRecording = useCallback(() => { + setState(defaultState) + }, []) + + const value: RecordingContextType = { + ...state, + startRecording, + stopRecording, + setMode, + setRecordingDuration, + setCurrentStep, + setError, + resetRecording, + } + + return {children} +} + +export function useRecording() { + const context = useContext(RecordingContext) + if (context === undefined) { + throw new Error('useRecording must be used within a RecordingProvider') + } + return context +} diff --git a/backends/advanced/webui/src/pages/LoginPage.tsx b/backends/advanced/webui/src/pages/LoginPage.tsx index 1ffcfe89..8924bd8c 100644 --- a/backends/advanced/webui/src/pages/LoginPage.tsx +++ b/backends/advanced/webui/src/pages/LoginPage.tsx @@ -68,7 +68,7 @@ export default function LoginPage() { Chronicle

- AI-Powered Personal Audio System + AI powered conversation system

Sign in to access your dashboard @@ -153,13 +153,6 @@ export default function LoginPage() { )} - - {/* Additional Info */} -

-

- Protected by enterprise-grade security -

-
{/* Footer */} diff --git a/backends/advanced/webui/src/pages/Queue.tsx b/backends/advanced/webui/src/pages/Queue.tsx index 18521dcc..b487341c 100644 --- a/backends/advanced/webui/src/pages/Queue.tsx +++ b/backends/advanced/webui/src/pages/Queue.tsx @@ -1064,7 +1064,7 @@ const Queue: React.FC = () => {
{/* Active Conversations - Grouped by conversation_id */}
-

Active Conversations

+

Active Conversations

{(() => { // Group all jobs by conversation_id with deduplication const allJobsRaw = Object.values(sessionJobs).flat().filter(job => job != null); @@ -1134,7 +1134,7 @@ const Queue: React.FC = () => { if (conversationMap.size === 0) { return ( -
+
No active conversations
); @@ -1201,7 +1201,7 @@ const Queue: React.FC = () => { {lastUpdate && ` โ€ข Updated: ${new Date(lastUpdate).toLocaleTimeString()}`}
{transcript && ( -
+
"{transcript.substring(0, 100)}{transcript.length > 100 ? '...' : ''}"
)} @@ -1213,7 +1213,7 @@ const Queue: React.FC = () => {
{/* Pipeline Timeline */}
-
Pipeline Timeline:
+
Pipeline Timeline:
{(() => { // Helper function to get display name from job type const getJobDisplayName = (jobType: string) => { @@ -1519,7 +1519,7 @@ const Queue: React.FC = () => { setCompletedConvTimeRange(Number(e.target.value)); setCompletedConvPage(1); // Reset to first page }} - className="text-xs border border-gray-300 rounded px-2 py-1" + className="text-xs border border-gray-300 dark:border-gray-600 rounded px-2 py-1 bg-white dark:bg-neutral-700 text-gray-900 dark:text-gray-100" > @@ -1597,7 +1597,7 @@ const Queue: React.FC = () => { if (conversationMap.size === 0) { return ( -
+
No completed conversations
); @@ -1629,7 +1629,7 @@ const Queue: React.FC = () => { if (conversationsArray.length === 0) { return ( -
+
No completed conversations in the selected time range
); @@ -2082,11 +2082,11 @@ const Queue: React.FC = () => {

Filters

- + setFilters({ ...filters, job_type: e.target.value })} - className="w-full border border-gray-300 rounded-md px-3 py-2" + className="w-full border border-gray-300 dark:border-gray-600 rounded-md px-3 py-2 bg-white dark:bg-neutral-700 text-gray-900 dark:text-gray-100" > @@ -2114,11 +2114,11 @@ const Queue: React.FC = () => {
- + setDisplayName(e.target.value)} + className={`input ${fieldErrors.displayName ? 'border-error-500 dark:border-error-500' : ''}`} + placeholder="Administrator" + /> + {fieldErrors.displayName && ( +

{fieldErrors.displayName}

+ )} +
+ + {/* Email Input */} +
+ + setEmail(e.target.value)} + className={`input ${fieldErrors.email ? 'border-error-500 dark:border-error-500' : ''}`} + placeholder="admin@example.com" + /> + {fieldErrors.email && ( +

{fieldErrors.email}

+ )} +
+ + {/* Password Input */} +
+ +
+ setPassword(e.target.value)} + className={`input pr-10 ${fieldErrors.password ? 'border-error-500 dark:border-error-500' : ''}`} + placeholder="Minimum 8 characters" + /> + +
+ {fieldErrors.password && ( +

{fieldErrors.password}

+ )} +
+ + {/* Confirm Password Input */} +
+ +
+ setConfirmPassword(e.target.value)} + className={`input pr-10 ${fieldErrors.confirmPassword ? 'border-error-500 dark:border-error-500' : ''}`} + placeholder="Re-enter your password" + /> + +
+ {fieldErrors.confirmPassword && ( +

{fieldErrors.confirmPassword}

+ )} +
+ + {/* Error Message */} + {error && ( +
+

+ {error} +

+
+ )} + + {/* Submit Button */} + + +
+ + {/* Footer */} +
+

+ Chronicle Dashboard v1.0 +

+
+
+
+ ) +} diff --git a/backends/advanced/webui/src/services/api.ts b/backends/advanced/webui/src/services/api.ts index 8d777e3d..9310f0fa 100644 --- a/backends/advanced/webui/src/services/api.ts +++ b/backends/advanced/webui/src/services/api.ts @@ -106,6 +106,16 @@ export const authApi = { getMe: () => api.get('/users/me'), } +export const setupApi = { + getSetupStatus: () => api.get('/api/setup/status'), + createAdmin: (setupData: { + display_name: string + email: string + password: string + confirm_password: string + }) => api.post('/api/setup/create-admin', setupData), +} + export const conversationsApi = { getAll: () => api.get('/api/conversations'), getById: (id: string) => api.get(`/api/conversations/${id}`), diff --git a/clear.sh b/clear.sh new file mode 100755 index 00000000..13741680 --- /dev/null +++ b/clear.sh @@ -0,0 +1,130 @@ +#!/bin/bash +set -e + +# Chronicle Admin Reset Script +# Removes admin users from database and clears auth variables for fresh setup + +echo "๐Ÿงน Chronicle Admin Reset" +echo "========================================" + +# Check we're in the right directory +if [ ! -f "docker-compose.yml" ] || [ ! -f "docker-compose.infra.yml" ]; then + echo "โŒ Error: Must be run from the GOLD directory" + echo " cd to the directory containing docker-compose.yml" + exit 1 +fi +echo "" +echo "โš ๏ธ WARNING: This will:" +echo " - Remove ALL admin users from the database" +echo " - Clear AUTH_SECRET_KEY from .env" +echo " - Clear ADMIN_PASSWORD from .env" +echo " - Allow you to run ./go.sh for a fresh setup" +echo "" +read -p "Are you sure? (yes/no): " -r +echo "" + +if [[ ! $REPLY =~ ^[Yy][Ee][Ss]$ ]]; then + echo "โŒ Aborted" + exit 0 +fi + +# Get database name - check where backend actually loads it from +# Priority: backends/advanced/.env > root .env > .env.default > hardcoded default +if [ -f backends/advanced/.env ]; then + MONGODB_DATABASE=$(grep "^MONGODB_DATABASE=" backends/advanced/.env | cut -d'=' -f2) +fi + +if [ -z "$MONGODB_DATABASE" ] && [ -f .env ]; then + MONGODB_DATABASE=$(grep "^MONGODB_DATABASE=" .env | cut -d'=' -f2) +fi + +if [ -z "$MONGODB_DATABASE" ] && [ -f .env.default ]; then + MONGODB_DATABASE=$(grep "^MONGODB_DATABASE=" .env.default | cut -d'=' -f2) +fi + +# Final fallback to backend's hardcoded default +if [ -z "$MONGODB_DATABASE" ]; then + MONGODB_DATABASE="friend-lite" +fi + +echo "๐Ÿ“ฆ Database: ${MONGODB_DATABASE}" +echo "" + +# Check if MongoDB is running +echo "๐Ÿ” Checking MongoDB connection..." +if ! docker ps | grep -q "mongo"; then + echo "โš ๏ธ MongoDB container is not running" + echo " Starting MongoDB..." + docker compose -f docker-compose.infra.yml up -d mongo + echo " Waiting for MongoDB to be ready..." + sleep 5 +fi + +# Remove admin users from MongoDB +echo "๐Ÿ—‘๏ธ Removing admin users from database..." +docker exec -i mongo mongosh "${MONGODB_DATABASE}" --quiet --eval ' +const beforeCount = db.users.countDocuments({ is_superuser: true }); +const result = db.users.deleteMany({ is_superuser: true }); +const afterCount = db.users.countDocuments({ is_superuser: true }); +print("โœ… Removed " + result.deletedCount + " admin user(s). Remaining admins: " + afterCount); +' || echo "โš ๏ธ MongoDB operation may have failed - check if container is running" + +echo "" +echo "๐Ÿ” Clearing auth variables from .env files..." + +# Function to clear auth variables from a file +clear_auth_vars() { + local file=$1 + local cleared=false + + if [ -f "$file" ]; then + # Clear AUTH_SECRET_KEY + if grep -q "^AUTH_SECRET_KEY=" "$file"; then + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s|^AUTH_SECRET_KEY=.*|AUTH_SECRET_KEY=|" "$file" + else + sed -i "s|^AUTH_SECRET_KEY=.*|AUTH_SECRET_KEY=|" "$file" + fi + echo " โœ… AUTH_SECRET_KEY cleared from $file" + cleared=true + fi + + # Clear ADMIN_PASSWORD + if grep -q "^ADMIN_PASSWORD=" "$file"; then + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s|^ADMIN_PASSWORD=.*|ADMIN_PASSWORD=|" "$file" + else + sed -i "s|^ADMIN_PASSWORD=.*|ADMIN_PASSWORD=|" "$file" + fi + echo " โœ… ADMIN_PASSWORD cleared from $file" + cleared=true + fi + fi +} + +# Clear from root .env +clear_auth_vars ".env" + +# Clear from backends/advanced/.env (this is what the backend actually uses!) +clear_auth_vars "backends/advanced/.env" + +if [ ! -f .env ] && [ ! -f backends/advanced/.env ]; then + echo " โš ๏ธ No .env files found (will be created by go.sh)" +fi + +echo "" +echo "๐Ÿ”„ Restarting backend to invalidate active sessions..." +docker compose restart backend 2>/dev/null || echo " โš ๏ธ Backend not running (that's ok)" + +echo "" +echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" +echo "โœ… Admin reset complete!" +echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" +echo "" +echo "๐Ÿš€ Next steps:" +echo " 1. Clear your browser cache/localStorage (Cmd+Shift+R or hard refresh)" +echo " 2. Visit the web UI - you'll be redirected to /setup" +echo " 3. Create a new admin account" +echo "" +echo "๐Ÿ’ก Or run ./go.sh to restart everything fresh" +echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" diff --git a/go.sh b/go.sh new file mode 100755 index 00000000..27fdb218 --- /dev/null +++ b/go.sh @@ -0,0 +1,154 @@ +#!/bin/bash +set -e + +# Chronicle Quick Start - Web UI Setup Flow +# This script starts Chronicle and opens the web UI setup screen to create an admin account + +echo "๐Ÿš€ Chronicle Quick Start - Web UI Setup" +echo "========================================" + +# Check we're in the right directory +if [ ! -f "docker-compose.yml" ] || [ ! -f "docker-compose.infra.yml" ]; then + echo "โŒ Error: Must be run from the GOLD directory" + echo " cd to the directory containing docker-compose.yml" + exit 1 +fi + +# Check if .env exists, if not create from defaults +if [ ! -f .env ]; then + echo "๐Ÿ“ Creating .env from .env.default..." + cp .env.default .env +fi + +# Generate AUTH_SECRET_KEY if not set (check all .env files) +SECRET_KEY="" + +# Check if any .env file has a valid AUTH_SECRET_KEY +for env_file in backends/advanced/.env .env; do + if [ -f "$env_file" ] && grep -q "^AUTH_SECRET_KEY=.\+" "$env_file" 2>/dev/null; then + SECRET_KEY=$(grep "^AUTH_SECRET_KEY=" "$env_file" | cut -d'=' -f2) + echo "โœ… AUTH_SECRET_KEY already set in $env_file" + break + fi +done + +# Generate if not found +if [ -z "$SECRET_KEY" ]; then + echo "๐Ÿ” Generating secure AUTH_SECRET_KEY..." + SECRET_KEY=$(openssl rand -base64 32) + echo "โœ… AUTH_SECRET_KEY generated" +fi + +# Ensure it's set in backends/advanced/.env (the one backend actually uses) +if [ -f backends/advanced/.env ]; then + if grep -q "^AUTH_SECRET_KEY=" backends/advanced/.env; then + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s|^AUTH_SECRET_KEY=.*|AUTH_SECRET_KEY=${SECRET_KEY}|" backends/advanced/.env + else + sed -i "s|^AUTH_SECRET_KEY=.*|AUTH_SECRET_KEY=${SECRET_KEY}|" backends/advanced/.env + fi + else + echo "AUTH_SECRET_KEY=${SECRET_KEY}" >> backends/advanced/.env + fi +fi + +# Also set in root .env for consistency +if [ -f .env ]; then + if grep -q "^AUTH_SECRET_KEY=" .env; then + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s|^AUTH_SECRET_KEY=.*|AUTH_SECRET_KEY=${SECRET_KEY}|" .env + else + sed -i "s|^AUTH_SECRET_KEY=.*|AUTH_SECRET_KEY=${SECRET_KEY}|" .env + fi + else + echo "AUTH_SECRET_KEY=${SECRET_KEY}" >> .env + fi +fi + +# Ensure ADMIN_PASSWORD is empty in all .env files (to trigger web UI setup) +for env_file in .env backends/advanced/.env; do + if [ -f "$env_file" ] && grep -q "^ADMIN_PASSWORD=.\+" "$env_file" 2>/dev/null; then + echo "โš ๏ธ ADMIN_PASSWORD is set in $env_file - clearing it to enable web UI setup..." + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s|^ADMIN_PASSWORD=.*|ADMIN_PASSWORD=|" "$env_file" + else + sed -i "s|^ADMIN_PASSWORD=.*|ADMIN_PASSWORD=|" "$env_file" + fi + fi +done + +echo "" +echo "๐Ÿณ Starting Docker services..." + +# Check if infrastructure is already running +if docker ps --filter "name=^mongo$" --filter "status=running" -q | grep -q .; then + echo " โœ… Infrastructure already running (reusing existing)" +else + echo " Starting infrastructure (MongoDB, Redis, Qdrant)..." + docker compose -f docker-compose.infra.yml up -d + echo " Waiting for infrastructure to be ready..." + sleep 3 +fi + +echo " Starting application services..." +# Clean up any orphaned containers from previous runs +docker compose down 2>/dev/null || true +docker compose up -d --build + +echo "" +echo "โณ Waiting for backend to be ready..." +MAX_WAIT=60 +WAITED=0 +BACKEND_PORT=$(grep "^BACKEND_PORT=" .env | cut -d'=' -f2 || echo "8000") + +while [ $WAITED -lt $MAX_WAIT ]; do + if curl -s "http://localhost:${BACKEND_PORT}/health" > /dev/null 2>&1; then + echo "โœ… Backend is ready!" + break + fi + sleep 2 + WAITED=$((WAITED + 2)) + echo " Waiting... (${WAITED}s/${MAX_WAIT}s)" +done + +if [ $WAITED -ge $MAX_WAIT ]; then + echo "โŒ Backend failed to start within ${MAX_WAIT} seconds" + echo " Check logs with: docker compose logs backend" + exit 1 +fi + +echo "" +echo "โœ… Chronicle is running!" +echo "" +echo "๐Ÿ“ฑ Opening web UI setup screen..." +echo " You'll be prompted to create your admin account" +echo "" + +# Get webui port +WEBUI_PORT=$(grep "^WEBUI_PORT=" .env | cut -d'=' -f2 || echo "3000") + +# Open browser to setup page +if command -v open > /dev/null; then + # macOS + open "http://localhost:${WEBUI_PORT}/setup" +elif command -v xdg-open > /dev/null; then + # Linux + xdg-open "http://localhost:${WEBUI_PORT}/setup" +elif command -v start > /dev/null; then + # Windows + start "http://localhost:${WEBUI_PORT}/setup" +else + echo " Please open your browser to: http://localhost:${WEBUI_PORT}/setup" +fi + +echo "" +echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" +echo "๐Ÿ“‹ Quick Reference:" +echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" +echo " Web UI Setup: http://localhost:${WEBUI_PORT}/setup" +echo " Web Dashboard: http://localhost:${WEBUI_PORT}" +echo " Backend API: http://localhost:${BACKEND_PORT}" +echo "" +echo " View logs: docker compose logs -f" +echo " Stop services: docker compose down" +echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"