diff --git a/.env.secrets.template b/.env.secrets.template new file mode 100644 index 00000000..57599ebc --- /dev/null +++ b/.env.secrets.template @@ -0,0 +1,48 @@ +# ======================================== +# Friend-Lite Secrets Template +# ======================================== +# Copy this file to .env.secrets and fill in your actual values +# .env.secrets is gitignored and should NEVER be committed +# +# Usage: cp .env.secrets.template .env.secrets +# +# IMPORTANT: This file contains ENVIRONMENT-SPECIFIC credentials +# For API keys that might be shared, see .env.api-keys.template +# ======================================== + +# ======================================== +# AUTHENTICATION & SECURITY (Environment-Specific) +# ======================================== + +# JWT secret key - MUST be different per environment +# Generate with: openssl rand -base64 32 +AUTH_SECRET_KEY=your-super-secret-jwt-key-change-this-to-something-random + +# Admin account credentials - Should be different per environment +ADMIN_EMAIL=admin@example.com +ADMIN_PASSWORD=change-this-secure-password + +# ======================================== +# DATABASE CREDENTIALS (Environment-Specific) +# ======================================== + +# Neo4j password - Different per environment +NEO4J_PASSWORD=your-neo4j-password + +# MongoDB credentials (if using auth) +# MONGODB_USERNAME= +# MONGODB_PASSWORD= + +# Redis password (if using auth) +# REDIS_PASSWORD= + +# ======================================== +# EXTERNAL SERVICE CREDENTIALS (Environment-Specific) +# ======================================== + +# Ngrok authtoken (optional - for external access in dev/staging) +NGROK_AUTHTOKEN= + +# Langfuse telemetry (optional - different per environment) +LANGFUSE_PUBLIC_KEY= +LANGFUSE_SECRET_KEY= diff --git a/.gitignore b/.gitignore index b2b052b3..f0a73d86 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,11 @@ *.wav **/*.env !**/.env.template +.env.secrets +.env.api-keys +.env.quick-start +.env.backup.* +config.env.backup.* **/memory_config.yaml !**/memory_config.yaml.template example/* @@ -71,6 +76,7 @@ backends/advanced-backend/data/speaker_model_cache/ backends/charts/advanced-backend/env-configmap.yaml extras/openmemory-mcp/data/* +extras/openmemory/data/* .env.backup.* backends/advanced/nginx.conf @@ -82,3 +88,27 @@ log.html output.xml report.html .secrets +extras/openmemory-mcp/.env.openmemory +extras/openmemory/.env +certs + +# Environment-specific configuration files (added 2025-12-09) +environments/ +*.env.backup.* +backends/advanced/.env.* +!backends/advanced/.env.template + +# SSL certificates +*.crt +*.key + +# IDE and tool directories +.playwright-mcp/ +.serena/ + +# Docker compose data directories +**/compose/data/ + +# Deprecated compose files (moved to root compose/) +backends/advanced/compose/infrastructure.yml +backends/advanced/compose/mycelia.yml diff --git a/Makefile b/Makefile index 9c4dca6a..be709d70 100644 --- a/Makefile +++ b/Makefile @@ -1,25 +1,45 @@ # ======================================== -# Chronicle Management System +# Friend-Lite Management System # ======================================== -# Central management interface for Chronicle project +# Central management interface for Friend-Lite project # Handles configuration, deployment, and maintenance tasks -# Load environment variables from .env file +# Load environment variables from .env file (if it exists) ifneq (,$(wildcard ./.env)) include .env export $(shell sed 's/=.*//' .env | grep -v '^\s*$$' | grep -v '^\s*\#') endif -# Load configuration definitions -include config.env -# Export all variables from config.env -export $(shell sed 's/=.*//' config.env | grep -v '^\s*$$' | grep -v '^\s*\#') +# Load configuration definitions for Kubernetes +# Use config-k8s.env for K8s deployments +ifneq (,$(wildcard ./config-k8s.env)) + include config-k8s.env + export $(shell sed 's/=.*//' config-k8s.env | grep -v '^\s*$$' | grep -v '^\s*\#') +else + # Fallback to config.env for backwards compatibility + ifneq (,$(wildcard ./config.env)) + include config.env + export $(shell sed 's/=.*//' config.env | grep -v '^\s*$$' | grep -v '^\s*\#') + endif +endif + +# Load secrets (gitignored) - required for K8s secrets generation +ifneq (,$(wildcard ./.env.secrets)) + include .env.secrets + export $(shell sed 's/=.*//' .env.secrets | grep -v '^\s*$$' | grep -v '^\s*\#') +endif + +# Load API keys (gitignored) - required for K8s secrets generation +ifneq (,$(wildcard ./.env.api-keys)) + include .env.api-keys + export $(shell sed 's/=.*//' .env.api-keys | grep -v '^\s*$$' | grep -v '^\s*\#') +endif # Script directories SCRIPTS_DIR := scripts K8S_SCRIPTS_DIR := $(SCRIPTS_DIR)/k8s -.PHONY: help menu setup-k8s setup-infrastructure setup-rbac setup-storage-pvc config config-docker config-k8s config-all clean deploy deploy-docker deploy-k8s deploy-k8s-full deploy-infrastructure deploy-apps check-infrastructure check-apps build-backend up-backend down-backend k8s-status k8s-cleanup k8s-purge audio-manage mycelia-sync-status mycelia-sync-all mycelia-sync-user mycelia-check-orphans mycelia-reassign-orphans test-robot test-robot-integration test-robot-unit test-robot-endpoints test-robot-specific test-robot-clean +.PHONY: help menu wizard setup-secrets setup-tailscale configure-tailscale-serve setup-environment check-secrets setup-k8s setup-infrastructure setup-rbac setup-storage-pvc config config-k8s config-all clean deploy deploy-docker deploy-k8s deploy-k8s-full deploy-infrastructure deploy-apps check-infrastructure check-apps build-backend up-backend down-backend k8s-status k8s-cleanup k8s-purge audio-manage mycelia-sync-status mycelia-sync-all mycelia-sync-user mycelia-check-orphans mycelia-reassign-orphans mycelia-create-token test-robot test-robot-integration test-robot-unit test-robot-endpoints test-robot-specific test-robot-clean infra-start infra-stop infra-restart infra-logs infra-status infra-clean caddy-start caddy-stop caddy-restart caddy-logs caddy-status caddy-regenerate env-list env-start env-stop env-clean env-status # Default target .DEFAULT_GOAL := menu @@ -28,6 +48,39 @@ menu: ## Show interactive menu (default) @echo "🎯 Chronicle Management System" @echo "================================" @echo + @echo "πŸš€ Standard Docker Compose Commands:" + @echo " make up πŸš€ Start Chronicle (auto-starts infra if needed)" + @echo " make down πŸ›‘ Stop app only (keeps infra running)" + @echo " make down-all πŸ›‘ Stop everything (infra + app)" + @echo " make build πŸ”¨ Rebuild application images" + @echo " make restart πŸ”„ Restart app only" + @echo " make restart-all πŸ”„ Restart everything" + @echo " make logs πŸ“‹ View app logs" + @echo " make logs-all πŸ“‹ View all logs" + @echo + @echo " OR use docker compose directly:" + @echo " docker compose -f docker-compose.infra.yml up -d (start infra)" + @echo " docker compose up -d (start app)" + @echo " docker compose down (stop app only)" + @echo " docker compose -f docker-compose.infra.yml down (stop infra)" + @echo + @echo "⚑ Quick Start (First Time):" + @echo " quick-start πŸš€ Interactive setup with zero configuration" + @echo " quick-start-reset πŸ”„ Reset and regenerate configuration" + @echo + @echo "πŸ—οΈ Infrastructure Control:" + @echo " infra-start πŸ—οΈ Start infrastructure only (MongoDB, Redis, Qdrant)" + @echo " infra-stop πŸ›‘ Stop infrastructure (keeps data)" + @echo " infra-clean πŸ—‘οΈ Stop infrastructure and remove all data" + @echo + @echo "πŸ§™ Advanced Setup:" + @echo " installer πŸš€ Chronicle Install - Python-based installer" + @echo " wizard πŸ§™ Interactive setup wizard (secrets + Tailscale + environment)" + @echo " setup-secrets πŸ” Configure API keys and passwords" + @echo " setup-tailscale 🌐 Configure Tailscale for distributed deployment" + @echo " configure-tailscale-serve 🌐 Configure Tailscale serve routes (single environment)" + @echo " setup-environment πŸ“¦ Create a custom environment" + @echo @echo "πŸ“‹ Quick Actions:" @echo " setup-dev πŸ› οΈ Setup development environment (git hooks, pre-commit)" @echo " setup-k8s πŸ—οΈ Complete Kubernetes setup (registry + infrastructure + RBAC)" @@ -43,7 +96,6 @@ menu: ## Show interactive menu (default) @echo " test-robot-endpoints 🌐 Run endpoint tests only" @echo @echo "πŸ“ Configuration:" - @echo " config-docker 🐳 Generate Docker Compose .env files" @echo " config-k8s ☸️ Generate Kubernetes files (Skaffold env + ConfigMap/Secret)" @echo @echo "πŸš€ Deployment:" @@ -58,12 +110,29 @@ menu: ## Show interactive menu (default) @echo " clean 🧹 Clean up generated files" @echo @echo "πŸ”„ Mycelia Sync:" + @echo " mycelia-create-token πŸ”‘ Create Mycelia API token for a user" @echo " mycelia-sync-status πŸ“Š Show Mycelia OAuth sync status" - @echo " mycelia-sync-all πŸ”„ Sync all Chronicle users to Mycelia" + @echo " mycelia-sync-all πŸ”„ Sync all Friend-Lite users to Mycelia" @echo " mycelia-sync-user πŸ‘€ Sync specific user (EMAIL=user@example.com)" @echo " mycelia-check-orphans πŸ” Find orphaned Mycelia objects" @echo " mycelia-reassign-orphans ♻️ Reassign orphans (EMAIL=admin@example.com)" @echo + @echo "πŸ—οΈ Shared Infrastructure:" + @echo " infra-start πŸš€ Start shared infrastructure (MongoDB, Redis, Qdrant, optional Neo4j)" + @echo " infra-stop πŸ›‘ Stop infrastructure" + @echo " infra-restart πŸ”„ Restart infrastructure" + @echo " infra-status πŸ“Š Check infrastructure status" + @echo " infra-logs πŸ“‹ View infrastructure logs" + @echo " infra-clean πŸ—‘οΈ Clean all infrastructure data (DANGER!)" + @echo + @echo "🌐 Caddy Reverse Proxy (Shared Service):" + @echo " caddy-start πŸš€ Start shared Caddy (serves all environments)" + @echo " caddy-stop πŸ›‘ Stop Caddy" + @echo " caddy-restart πŸ”„ Restart Caddy" + @echo " caddy-status πŸ“Š Check if Caddy is running" + @echo " caddy-logs πŸ“‹ View Caddy logs" + @echo " caddy-regenerate πŸ”§ Regenerate Caddyfile from environments" + @echo @echo "Current configuration:" @echo " DOMAIN: $(DOMAIN)" @echo " DEPLOYMENT_MODE: $(DEPLOYMENT_MODE)" @@ -75,7 +144,7 @@ menu: ## Show interactive menu (default) @echo "πŸ’‘ Tip: Run 'make help' for detailed help on any target" help: ## Show detailed help for all targets - @echo "🎯 Chronicle Management System - Detailed Help" + @echo "🎯 Friend-Lite Management System - Detailed Help" @echo "================================================" @echo @echo "πŸ—οΈ KUBERNETES SETUP:" @@ -90,8 +159,7 @@ help: ## Show detailed help for all targets @echo " setup-storage-pvc Create shared models PVC" @echo @echo "πŸ“ CONFIGURATION:" - @echo " config Generate all configuration files (Docker + K8s)" - @echo " config-docker Generate Docker Compose .env files" + @echo " config Generate all configuration files (K8s)" @echo " config-k8s Generate Kubernetes files (Skaffold env + ConfigMap/Secret)" @echo @echo "πŸš€ DEPLOYMENT:" @@ -109,10 +177,11 @@ help: ## Show detailed help for all targets @echo " audio-manage Interactive audio file management" @echo @echo "πŸ”„ MYCELIA SYNC:" + @echo " mycelia-create-token Create Mycelia API token for a user" @echo " mycelia-sync-status Show Mycelia OAuth sync status for all users" - @echo " mycelia-sync-all Sync all Chronicle users to Mycelia OAuth" + @echo " mycelia-sync-all Sync all Friend-Lite users to Mycelia OAuth" @echo " mycelia-sync-user Sync specific user (EMAIL=user@example.com)" - @echo " mycelia-check-orphans Find Mycelia objects without Chronicle owner" + @echo " mycelia-check-orphans Find Mycelia objects without Friend-Lite owner" @echo " mycelia-reassign-orphans Reassign orphaned objects (EMAIL=admin@example.com)" @echo @echo "πŸ§ͺ ROBOT FRAMEWORK TESTING:" @@ -152,13 +221,188 @@ setup-dev: ## Setup development environment (git hooks, pre-commit) @echo "" @echo "βš™οΈ To skip hooks: git push --no-verify / git commit --no-verify" +# ======================================== +# QUICK START (Zero Configuration) +# ======================================== + +.PHONY: up down down-all build restart restart-all logs logs-all quick-start quick-start-reset quick-start-stop quick-start-clean quick-start-logs quick-start-rebuild infra-start infra-stop infra-clean + +up: ## πŸš€ Start Chronicle (infrastructure + application) + @echo "πŸš€ Starting Chronicle..." + @if [ ! -f .env.default ]; then \ + echo "⚠️ Configuration not found. Running quick-start.sh..."; \ + ./quick-start.sh; \ + else \ + if ! docker ps --filter "name=^mongo$$" --filter "status=running" -q | grep -q .; then \ + echo "πŸ—οΈ Infrastructure not running, starting it first..."; \ + docker compose -f docker-compose.infra.yml up -d; \ + sleep 3; \ + fi; \ + docker compose --env-file .env.default up -d; \ + echo "βœ… Chronicle started"; \ + fi + +down: ## πŸ›‘ Stop Chronicle application only (keeps infrastructure running) + @echo "πŸ›‘ Stopping Chronicle application..." + @docker compose down + @echo "βœ… Application stopped (infrastructure still running)" + @echo "πŸ’‘ To stop everything: make down-all" + +down-all: ## πŸ›‘ Stop everything (infrastructure + application) + @echo "πŸ›‘ Stopping all services..." + @docker compose down + @docker compose -f docker-compose.infra.yml down + @echo "βœ… All services stopped" + +build: ## πŸ”¨ Rebuild Chronicle application images + @echo "πŸ”¨ Building Chronicle..." + @docker compose build + +restart: ## πŸ”„ Restart Chronicle application only + @echo "πŸ”„ Restarting Chronicle application..." + @docker compose restart + @echo "βœ… Application restarted" + +restart-all: ## πŸ”„ Restart everything (infrastructure + application) + @echo "πŸ”„ Restarting all services..." + @docker compose restart + @docker compose -f docker-compose.infra.yml restart + @echo "βœ… All services restarted" + +logs: ## πŸ“‹ View Chronicle application logs + @docker compose logs -f + +logs-all: ## πŸ“‹ View all logs (infrastructure + application) + @docker compose logs -f & + @docker compose -f docker-compose.infra.yml logs -f + +quick-start: ## πŸš€ Start Chronicle with zero configuration (interactive setup) + @./quick-start.sh + +quick-start-reset: ## πŸ”„ Reset and regenerate quick-start configuration + @./quick-start.sh --reset + +quick-start-stop: ## πŸ›‘ Stop quick-start environment + @echo "πŸ›‘ Stopping application..." + @docker compose down + @echo "βœ… Application stopped (data preserved)" + +quick-start-clean: ## πŸ—‘οΈ Stop application and remove all data volumes + @echo "πŸ—‘οΈ Stopping application and removing data..." + @docker compose down -v + @docker compose -f docker-compose.infra.yml down -v + @echo "βœ… Environment cleaned" + +quick-start-logs: ## πŸ“‹ View quick-start logs + @docker compose logs -f + +quick-start-rebuild: ## πŸ”¨ Rebuild and restart application (keeps infrastructure running) + @echo "πŸ”¨ Rebuilding application..." + @docker compose up -d --build + @echo "βœ… Application rebuilt and restarted" + +infra-start: ## πŸ—οΈ Start infrastructure only (MongoDB, Redis, Qdrant) + @echo "πŸ—οΈ Starting infrastructure..." + @docker compose -f docker-compose.infra.yml up -d + @echo "βœ… Infrastructure started" + +infra-stop: ## πŸ›‘ Stop infrastructure (keeps data) + @echo "πŸ›‘ Stopping infrastructure..." + @docker compose -f docker-compose.infra.yml down + @echo "βœ… Infrastructure stopped (data preserved)" + +infra-clean: ## πŸ—‘οΈ Stop infrastructure and remove all data + @echo "πŸ—‘οΈ Stopping infrastructure and removing data..." + @docker compose -f docker-compose.infra.yml down -v + @echo "βœ… Infrastructure cleaned" + +# ======================================== +# INTERACTIVE SETUP WIZARD +# ======================================== + +.PHONY: installer wizard setup-secrets setup-tailscale setup-environment check-secrets + +installer: ## πŸš€ Chronicle Install - Python-based interactive installer (recommended) + @./chronicle-install.sh + +wizard: ## πŸ§™ Interactive setup wizard - guides through complete Friend-Lite setup + @echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + @echo "πŸ§™ Friend-Lite Setup Wizard" + @echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + @echo "" + @echo "This wizard will guide you through:" + @echo " 1. πŸ“¦ Creating your environment (name, ports, services)" + @echo " 2. πŸ” Configuring secrets (API keys based on your services)" + @echo " 3. 🌐 Optionally configuring Tailscale for remote access" + @echo " 4. πŸ”§ Finalizing setup (certificates, final configuration)" + @echo "" + @read -p "Press Enter to continue or Ctrl+C to exit..." + @echo "" + @$(MAKE) --no-print-directory setup-environment + @echo "" + @$(MAKE) --no-print-directory setup-secrets + @echo "" + @$(MAKE) --no-print-directory setup-tailscale + @echo "" + @$(MAKE) --no-print-directory finalize-setup + @echo "" + @echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + @echo "βœ… Setup Complete!" + @echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + @echo "" + @echo "πŸš€ Next Steps:" + @echo "" + @if [ -f ".env.secrets" ] && [ -d "environments" ]; then \ + LATEST_ENV=$$(ls -t environments/*.env 2>/dev/null | head -1 | xargs basename -s .env 2>/dev/null || echo "dev"); \ + echo " Start your environment:"; \ + echo " ./start-env.sh $$LATEST_ENV"; \ + echo ""; \ + echo " πŸ’‘ Your configured services will start automatically!"; \ + else \ + echo " ⚠️ Some setup steps were skipped. Run individual targets:"; \ + echo " make setup-secrets"; \ + echo " make setup-environment"; \ + fi + @echo "" + @echo "πŸ“š Documentation:" + @echo " β€’ ENVIRONMENTS.md - Environment system overview" + @echo " β€’ SSL_SETUP.md - Tailscale and SSL configuration" + @echo " β€’ SETUP.md - Detailed setup instructions" + @echo "" + +check-secrets: ## Check if secrets file exists and is configured + @if [ ! -f ".env.secrets" ]; then \ + echo "❌ .env.secrets not found"; \ + exit 1; \ + fi + @if ! grep -q "^AUTH_SECRET_KEY=" .env.secrets || grep -q "your-super-secret" .env.secrets; then \ + echo "❌ .env.secrets exists but needs configuration"; \ + exit 1; \ + fi + @echo "βœ… Secrets file configured" + +setup-secrets: ## πŸ” Interactive secrets setup (API keys, passwords) + @./scripts/setup-secrets.sh + +setup-tailscale: ## 🌐 Interactive Tailscale setup for distributed deployment + @./scripts/setup-tailscale.sh + +configure-tailscale-serve: ## 🌐 Configure Tailscale serve for an environment + @./scripts/configure-tailscale-serve.sh + +setup-environment: ## πŸ“¦ Create a custom environment configuration + @./scripts/setup-environment.sh + +finalize-setup: ## πŸ”§ Finalize setup (generate Caddyfile, provision certificates) + @./scripts/finalize-setup.sh + # ======================================== # KUBERNETES SETUP # ======================================== setup-k8s: ## Initial Kubernetes setup (registry + infrastructure) @echo "πŸ—οΈ Starting Kubernetes initial setup..." - @echo "This will set up the complete infrastructure for Chronicle" + @echo "This will set up the complete infrastructure for Friend-Lite" @echo @echo "πŸ“‹ Setup includes:" @echo " β€’ Insecure registry configuration" @@ -218,27 +462,25 @@ setup-storage-pvc: ## Set up shared models PVC config: config-all ## Generate all configuration files -config-docker: ## Generate Docker Compose configuration files - @echo "🐳 Generating Docker Compose configuration files..." - @CONFIG_FILE=config.env.dev python3 scripts/generate-docker-configs.py - @echo "βœ… Docker Compose configuration files generated" - config-k8s: ## Generate Kubernetes configuration files (ConfigMap/Secret only - no .env files) @echo "☸️ Generating Kubernetes configuration files..." @python3 scripts/generate-k8s-configs.py @echo "πŸ“¦ Applying ConfigMap and Secret to Kubernetes..." @kubectl apply -f k8s-manifests/configmap.yaml -n $(APPLICATION_NAMESPACE) 2>/dev/null || echo "⚠️ ConfigMap not applied (cluster not available?)" @kubectl apply -f k8s-manifests/secrets.yaml -n $(APPLICATION_NAMESPACE) 2>/dev/null || echo "⚠️ Secret not applied (cluster not available?)" - @echo "πŸ“¦ Copying ConfigMap and Secret to speech namespace..." - @kubectl get configmap chronicle-config -n $(APPLICATION_NAMESPACE) -o yaml | \ + @echo "πŸ“¦ Copying ConfigMap and Secrets to speech namespace..." + @kubectl get configmap friend-lite-config -n $(APPLICATION_NAMESPACE) -o yaml | \ sed -e '/namespace:/d' -e '/resourceVersion:/d' -e '/uid:/d' -e '/creationTimestamp:/d' | \ kubectl apply -n speech -f - 2>/dev/null || echo "⚠️ ConfigMap not copied to speech namespace" - @kubectl get secret chronicle-secrets -n $(APPLICATION_NAMESPACE) -o yaml | \ + @kubectl get secret friend-lite-secrets -n $(APPLICATION_NAMESPACE) -o yaml | \ sed -e '/namespace:/d' -e '/resourceVersion:/d' -e '/uid:/d' -e '/creationTimestamp:/d' | \ - kubectl apply -n speech -f - 2>/dev/null || echo "⚠️ Secret not copied to speech namespace" + kubectl apply -n speech -f - 2>/dev/null || echo "⚠️ Credentials secret not copied to speech namespace" + @kubectl get secret friend-lite-api-keys -n $(APPLICATION_NAMESPACE) -o yaml | \ + sed -e '/namespace:/d' -e '/resourceVersion:/d' -e '/uid:/d' -e '/creationTimestamp:/d' | \ + kubectl apply -n speech -f - 2>/dev/null || echo "⚠️ API keys secret not copied to speech namespace" @echo "βœ… Kubernetes configuration files generated" -config-all: config-docker config-k8s ## Generate all configuration files +config-all: config-k8s ## Generate all configuration files @echo "βœ… All configuration files generated" clean: ## Clean up generated configuration files @@ -269,7 +511,7 @@ else @exit 1 endif -deploy-docker: config-docker ## Deploy using Docker Compose +deploy-docker: ## Deploy using Docker Compose @echo "🐳 Deploying with Docker Compose..." @cd backends/advanced && docker-compose up -d @echo "βœ… Docker Compose deployment completed" @@ -314,7 +556,7 @@ build-backend: ## Build backend Docker image @echo "πŸ”¨ Building backend Docker image..." @cd backends/advanced && docker build -t advanced-backend:latest . -up-backend: config-docker ## Start backend services +up-backend: ## Start backend services @echo "πŸš€ Starting backend services..." @cd backends/advanced && docker-compose up -d @@ -353,13 +595,13 @@ audio-manage: ## Interactive audio file management mycelia-sync-status: ## Show Mycelia OAuth sync status for all users @echo "πŸ“Š Checking Mycelia OAuth sync status..." - @cd backends/advanced && uv run python scripts/sync_chronicle_mycelia.py --status + @cd backends/advanced && uv run python scripts/sync_friendlite_mycelia.py --status -mycelia-sync-all: ## Sync all Chronicle users to Mycelia OAuth - @echo "πŸ”„ Syncing all Chronicle users to Mycelia OAuth..." +mycelia-sync-all: ## Sync all Friend-Lite users to Mycelia OAuth + @echo "πŸ”„ Syncing all Friend-Lite users to Mycelia OAuth..." @echo "⚠️ This will create OAuth credentials for users without them" @read -p "Continue? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1 - @cd backends/advanced && uv run python scripts/sync_chronicle_mycelia.py --sync-all + @cd backends/advanced && uv run python scripts/sync_friendlite_mycelia.py --sync-all mycelia-sync-user: ## Sync specific user to Mycelia OAuth (usage: make mycelia-sync-user EMAIL=user@example.com) @echo "πŸ‘€ Syncing specific user to Mycelia OAuth..." @@ -367,11 +609,11 @@ mycelia-sync-user: ## Sync specific user to Mycelia OAuth (usage: make mycelia-s echo "❌ EMAIL parameter is required. Usage: make mycelia-sync-user EMAIL=user@example.com"; \ exit 1; \ fi - @cd backends/advanced && uv run python scripts/sync_chronicle_mycelia.py --email $(EMAIL) + @cd backends/advanced && uv run python scripts/sync_friendlite_mycelia.py --email $(EMAIL) -mycelia-check-orphans: ## Find Mycelia objects without Chronicle owner +mycelia-check-orphans: ## Find Mycelia objects without Friend-Lite owner @echo "πŸ” Checking for orphaned Mycelia objects..." - @cd backends/advanced && uv run python scripts/sync_chronicle_mycelia.py --check-orphans + @cd backends/advanced && uv run python scripts/sync_friendlite_mycelia.py --check-orphans mycelia-reassign-orphans: ## Reassign orphaned objects to user (usage: make mycelia-reassign-orphans EMAIL=admin@example.com) @echo "♻️ Reassigning orphaned Mycelia objects..." @@ -381,7 +623,40 @@ mycelia-reassign-orphans: ## Reassign orphaned objects to user (usage: make myce fi @echo "⚠️ This will reassign all orphaned objects to: $(EMAIL)" @read -p "Continue? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1 - @cd backends/advanced && uv run python scripts/sync_chronicle_mycelia.py --reassign-orphans --target-email $(EMAIL) + @cd backends/advanced && uv run python scripts/sync_friendlite_mycelia.py --reassign-orphans --target-email $(EMAIL) + +mycelia-create-token: ## Create Mycelia API token for a user in specified environment + @echo "πŸ”‘ Creating Mycelia API Token" + @echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + @echo "" + @# List available environments + @if [ ! -d "environments" ] || [ -z "$$(ls -A environments/*.env 2>/dev/null)" ]; then \ + echo "❌ No environments found. Create one with: make wizard"; \ + exit 1; \ + fi + @echo "πŸ“‹ Available environments:"; \ + ls -1 environments/*.env 2>/dev/null | sed 's|environments/||;s|.env$$||' | sed 's/^/ - /'; \ + echo "" + @# Ask for environment + @read -p "Environment name: " env_name; \ + if [ ! -f "environments/$$env_name.env" ]; then \ + echo "❌ Environment '$$env_name' not found"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "πŸ“¦ Checking if $$env_name environment is running..."; \ + echo ""; \ + source "environments/$$env_name.env"; \ + running=$$(docker ps --filter "name=$$COMPOSE_PROJECT_NAME-friend-backend-1" --format "{{.Names}}" 2>/dev/null); \ + if [ -z "$$running" ]; then \ + echo "⚠️ Environment not running. Start it first with:"; \ + echo " ./start-env.sh $$env_name"; \ + echo ""; \ + exit 1; \ + fi; \ + echo "βœ… Environment is running ($$COMPOSE_PROJECT_NAME)"; \ + echo ""; \ + cd backends/advanced && ENV_NAME=$$env_name uv run python scripts/create_mycelia_api_key.py # ======================================== # TESTING TARGETS @@ -428,3 +703,229 @@ test-robot-clean: ## Clean up Robot Framework test results @echo "🧹 Cleaning up Robot Framework test results..." @rm -rf results/ @echo "βœ… Test results cleaned" + +# ======================================== +# MULTI-ENVIRONMENT SUPPORT +# ======================================== + +env-list: ## List available environments + @echo "πŸ“‹ Available Environments:" + @echo "" + @ls -1 environments/*.env 2>/dev/null | sed 's|environments/||;s|.env$$||' | while read env; do \ + echo " β€’ $$env"; \ + if [ -f "environments/$$env.env" ]; then \ + grep '^# ' environments/$$env.env | head -1 | sed 's/^# / /'; \ + fi; \ + done + @echo "" + @echo "Usage: make env-start ENV=" + @echo " or: ./start-env.sh [options]" + +env-start: ## Start specific environment (usage: make env-start ENV=dev) + @if [ -z "$(ENV)" ]; then \ + echo "❌ ENV parameter required"; \ + echo "Usage: make env-start ENV=dev"; \ + echo ""; \ + $(MAKE) env-list; \ + exit 1; \ + fi + @./start-env.sh $(ENV) $(OPTS) + +env-stop: ## Stop specific environment (usage: make env-stop ENV=dev) + @if [ -z "$(ENV)" ]; then \ + echo "❌ ENV parameter required"; \ + echo "Usage: make env-stop ENV=dev"; \ + exit 1; \ + fi + @echo "πŸ›‘ Stopping environment: $(ENV)" + @COMPOSE_PROJECT_NAME=friend-lite-$(ENV) docker compose down + +env-clean: ## Clean specific environment data (usage: make env-clean ENV=dev) + @if [ -z "$(ENV)" ]; then \ + echo "❌ ENV parameter required"; \ + echo "Usage: make env-clean ENV=dev"; \ + exit 1; \ + fi + @echo "⚠️ This will delete all data for environment: $(ENV)" + @read -p "Continue? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1 + @source environments/$(ENV).env && rm -rf $$DATA_DIR + @COMPOSE_PROJECT_NAME=friend-lite-$(ENV) docker compose down -v + @echo "βœ… Environment $(ENV) cleaned" + +env-status: ## Show status of all environments + @echo "πŸ“Š Environment Status:" + @echo "" + @for env in $$(ls -1 environments/*.env 2>/dev/null | sed 's|environments/||;s|.env$$||'); do \ + echo "Environment: $$env"; \ + COMPOSE_PROJECT_NAME=friend-lite-$$env docker compose ps 2>/dev/null | grep -v "NAME" || echo " Not running"; \ + echo ""; \ + done + +# ======================================== +# SHARED INFRASTRUCTURE (MongoDB, Redis, Qdrant) +# ======================================== + +infra-start: ## Start shared infrastructure (MongoDB, Redis, Qdrant, optional Neo4j) + @echo "πŸš€ Starting shared infrastructure services..." + @echo "" + @# Check if network exists, create if not + @docker network inspect chronicle-network >/dev/null 2>&1 || docker network create chronicle-network + @# Check if Neo4j should be started (NEO4J_ENABLED in any environment) + @if grep -q "^NEO4J_ENABLED=true" environments/*.env 2>/dev/null; then \ + echo "πŸ”— Neo4j enabled in at least one environment - starting with Neo4j profile..."; \ + docker compose -p chronicle-infra -f compose/infrastructure-shared.yml --profile neo4j up -d; \ + else \ + docker compose -p chronicle-infra -f compose/infrastructure-shared.yml up -d; \ + fi + @echo "" + @echo "βœ… Infrastructure services started!" + @echo "" + @echo " πŸ“Š MongoDB: mongodb://localhost:27017" + @echo " πŸ’Ύ Redis: redis://localhost:6379" + @echo " πŸ” Qdrant: http://localhost:6034" + @if docker ps --format '{{.Names}}' | grep -q '^chronicle-neo4j$$'; then \ + echo " πŸ”— Neo4j: http://localhost:7474 (bolt: 7687)"; \ + fi + @echo "" + @echo "πŸ’‘ These services are shared by all environments" + @echo " Each environment uses unique database names for isolation" + @echo "" + +infra-stop: ## Stop shared infrastructure + @echo "πŸ›‘ Stopping shared infrastructure..." + @echo "⚠️ This will affect ALL running environments!" + @read -p "Continue? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1 + @docker compose -p chronicle-infra -f compose/infrastructure-shared.yml down + @echo "βœ… Infrastructure stopped" + +infra-restart: ## Restart shared infrastructure + @echo "πŸ”„ Restarting shared infrastructure..." + @docker compose -p chronicle-infra -f compose/infrastructure-shared.yml restart + @echo "βœ… Infrastructure restarted" + +infra-logs: ## View infrastructure logs + @echo "πŸ“‹ Viewing infrastructure logs (press Ctrl+C to exit)..." + @docker compose -p chronicle-infra -f compose/infrastructure-shared.yml logs -f + +infra-status: ## Check infrastructure status + @echo "πŸ“Š Infrastructure Status:" + @echo "" + @if docker ps --format '{{.Names}}' | grep -qE '(chronicle|friend-lite).*mongo'; then \ + echo "βœ… MongoDB is running"; \ + docker ps --format '{{.Names}} {{.Ports}}' | grep mongo | awk '{print " " $$1}'; \ + else \ + echo "❌ MongoDB is not running"; \ + fi + @echo "" + @if docker ps --format '{{.Names}}' | grep -qE '(chronicle|friend-lite).*redis'; then \ + echo "βœ… Redis is running"; \ + docker ps --format '{{.Names}} {{.Ports}}' | grep redis | awk '{print " " $$1}'; \ + else \ + echo "❌ Redis is not running"; \ + fi + @echo "" + @if docker ps --format '{{.Names}}' | grep -qE '(chronicle|friend-lite).*qdrant'; then \ + echo "βœ… Qdrant is running"; \ + docker ps --format '{{.Names}} {{.Ports}}' | grep qdrant | awk '{print " " $$1}'; \ + else \ + echo "❌ Qdrant is not running"; \ + fi + @echo "" + @if docker ps --format '{{.Names}}' | grep -q '^chronicle-neo4j$$'; then \ + echo "βœ… Neo4j is running"; \ + docker ps --format '{{.Names}} {{.Ports}}' | grep neo4j | awk '{print " " $$1}'; \ + else \ + echo "ℹ️ Neo4j is not running (optional)"; \ + fi + @echo "" + @if ! docker ps --format '{{.Names}}' | grep -qE '(chronicle|friend-lite).*(mongo|redis|qdrant)'; then \ + echo "πŸ’‘ Start infrastructure with: make infra-start"; \ + fi + +infra-clean: ## Clean infrastructure data (DANGER: deletes all databases!) + @echo "⚠️ WARNING: This will delete ALL data from ALL environments!" + @echo " This includes:" + @echo " β€’ All MongoDB databases" + @echo " β€’ All Redis data" + @echo " β€’ All Qdrant collections" + @echo " β€’ All Neo4j graph databases (if enabled)" + @echo "" + @read -p "Type 'DELETE ALL DATA' to confirm: " confirm && [ "$$confirm" = "DELETE ALL DATA" ] || exit 1 + @docker compose -p chronicle-infra -f compose/infrastructure-shared.yml --profile neo4j down -v + @echo "βœ… Infrastructure data deleted" + +# ======================================== +# CADDY REVERSE PROXY (Shared Service) +# ======================================== + +caddy-start: ## Start shared Caddy reverse proxy (serves all environments) + @echo "πŸš€ Starting Caddy reverse proxy..." + @echo "" + @# Check if Caddyfile exists + @if [ ! -f "caddy/Caddyfile" ]; then \ + echo "⚠️ Caddyfile not found. Generating..."; \ + ./scripts/generate-caddyfile.sh; \ + echo ""; \ + fi + @# Start Caddy + @docker compose -f compose/caddy.yml up -d + @echo "" + @echo "βœ… Caddy reverse proxy started!" + @echo "" + @# Show access URLs + @if [ -f "config-docker.env" ]; then \ + source config-docker.env; \ + if [ -n "$$TAILSCALE_HOSTNAME" ]; then \ + echo "🌐 Access your environments at:"; \ + echo " https://$$TAILSCALE_HOSTNAME/"; \ + echo ""; \ + echo " Individual environments:"; \ + for env in $$(ls -1 environments/*.env 2>/dev/null | sed 's|environments/||;s|.env$$||'); do \ + echo " β€’ $$env: https://$$TAILSCALE_HOSTNAME/$$env/"; \ + done; \ + echo ""; \ + fi; \ + fi + +caddy-stop: ## Stop shared Caddy reverse proxy + @echo "πŸ›‘ Stopping Caddy reverse proxy..." + @docker compose -f compose/caddy.yml down + @echo "βœ… Caddy stopped" + +caddy-restart: ## Restart shared Caddy reverse proxy + @echo "πŸ”„ Restarting Caddy reverse proxy..." + @docker compose -f compose/caddy.yml restart + @echo "βœ… Caddy restarted" + +caddy-logs: ## View Caddy logs + @echo "πŸ“‹ Viewing Caddy logs (press Ctrl+C to exit)..." + @docker compose -f compose/caddy.yml logs -f + +caddy-status: ## Check if Caddy is running + @echo "πŸ“Š Caddy Status:" + @echo "" + @if docker ps --format '{{.Names}}' | grep -qE '^(chronicle|friend-lite)-caddy'; then \ + echo "βœ… Caddy is running"; \ + docker ps --format '{{.Names}} {{.Ports}}' | grep caddy | awk '{print " " $$1}'; \ + echo ""; \ + if [ -f "config-docker.env" ]; then \ + source config-docker.env; \ + if [ -n "$$TAILSCALE_HOSTNAME" ]; then \ + echo "🌐 Access URL: https://$$TAILSCALE_HOSTNAME/"; \ + fi; \ + fi; \ + else \ + echo "❌ Caddy is not running"; \ + echo " Start with: make caddy-start"; \ + fi + @echo "" + +caddy-regenerate: ## Regenerate Caddyfile from current environments + @echo "πŸ”§ Regenerating Caddyfile..." + @./scripts/generate-caddyfile.sh + @echo "" + @echo "βœ… Caddyfile regenerated" + @echo "" + @echo "πŸ”„ Restart Caddy to apply changes:" + @echo " make caddy-restart" + diff --git a/README.md b/README.md index 34027891..a93dcd55 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Chronicle +# Chronicle (fork from https://github.com/chronicler-ai/chronicle) Self-hostable AI system that captures audio/video data from OMI devices and other sources to generate memories, action items, and contextual insights about your conversations and daily interactions. diff --git a/backends/advanced/Caddyfile.template b/backends/advanced/Caddyfile.template index ccb2983d..3041ec79 100644 --- a/backends/advanced/Caddyfile.template +++ b/backends/advanced/Caddyfile.template @@ -56,6 +56,15 @@ localhost TAILSCALE_IP { reverse_proxy chronicle-backend:8000 } + # MCP endpoints - proxy to backend + handle /mcp/* { + reverse_proxy friend-backend:8000 { + header_up X-Real-IP {remote_host} + header_up X-Forwarded-For {remote_host} + header_up X-Forwarded-Proto {scheme} + } + } + # Everything else - proxy to webui handle { reverse_proxy webui:80 @@ -100,6 +109,15 @@ localhost TAILSCALE_IP { # reverse_proxy chronicle-backend:8000 # } # +# # MCP endpoints +# handle /mcp/* { +# reverse_proxy friend-backend:8000 { +# header_up X-Real-IP {remote_host} +# header_up X-Forwarded-For {remote_host} +# header_up X-Forwarded-Proto {scheme} +# } +# } +# # # Everything else - webui # handle { # reverse_proxy webui:80 diff --git a/backends/advanced/compose/backend.yml b/backends/advanced/compose/backend.yml new file mode 100644 index 00000000..a7f22f02 --- /dev/null +++ b/backends/advanced/compose/backend.yml @@ -0,0 +1,58 @@ +# Backend Services +# Friend-Lite backend API and workers + +services: + backend: + build: + context: .. + dockerfile: Dockerfile + env_file: + - ../../../.env.default + ports: + - "${BACKEND_PORT:-8000}:8000" + volumes: + - ../src:/app/src + - ../data/audio_chunks:/app/audio_chunks + - ../data/debug_dir:/app/debug_dir + - ../data:/app/data + environment: + # Service URLs (Docker internal network) + - REDIS_URL=redis://redis:6379/${REDIS_DATABASE:-0} + - MYCELIA_URL=${MYCELIA_URL:-http://mycelia-backend:5173} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/readiness"] + interval: 30s + timeout: 30s + retries: 5 + start_period: 5s + restart: unless-stopped + networks: + - chronicle-network + + # Unified Worker Container + # Runs all workers in a single container: + # - 3 RQ workers (transcription, memory, default queues) + # - 1 Audio stream worker (Redis Streams consumer - must be single instance) + workers: + build: + context: .. + dockerfile: Dockerfile + command: ["./start-workers.sh"] + env_file: + - ../../../.env.default + volumes: + - ../src:/app/src + - ../start-workers.sh:/app/start-workers.sh + - ../data/audio_chunks:/app/audio_chunks + - ../data:/app/data + environment: + # Service URLs (Docker internal network) + - REDIS_URL=redis://redis:6379/${REDIS_DATABASE:-0} + restart: unless-stopped + networks: + - chronicle-network + +networks: + chronicle-network: + name: chronicle-network + external: true diff --git a/backends/advanced/compose/frontend.yml b/backends/advanced/compose/frontend.yml new file mode 100644 index 00000000..5da122d5 --- /dev/null +++ b/backends/advanced/compose/frontend.yml @@ -0,0 +1,26 @@ +# Frontend Services +# Web UI for Friend-Lite + +services: + webui: + build: + context: ../webui + dockerfile: Dockerfile + args: + - VITE_BASE_PATH=${VITE_BASE_PATH:-/} + - VITE_BACKEND_URL=${VITE_BACKEND_URL:-http://localhost:8000} + env_file: + - ../../../.env.default + ports: + - "${WEBUI_PORT:-3000}:80" + depends_on: + backend: + condition: service_healthy + restart: unless-stopped + networks: + - chronicle-network + +networks: + chronicle-network: + name: chronicle-network + external: true diff --git a/backends/advanced/docker-compose.yml b/backends/advanced/docker-compose.yml index 8d4bc42f..b4a54c73 100644 --- a/backends/advanced/docker-compose.yml +++ b/backends/advanced/docker-compose.yml @@ -1,241 +1,53 @@ -services: - chronicle-backend: - build: - context: . - dockerfile: Dockerfile - ports: - - "8000:8000" - env_file: - - .env - volumes: - - ./src:/app/src # Mount source code for development - - ./data/audio_chunks:/app/audio_chunks - - ./data/debug_dir:/app/debug_dir - - ./data:/app/data - environment: - - DEEPGRAM_API_KEY=${DEEPGRAM_API_KEY} - - MISTRAL_API_KEY=${MISTRAL_API_KEY} - - MISTRAL_MODEL=${MISTRAL_MODEL} - - TRANSCRIPTION_PROVIDER=${TRANSCRIPTION_PROVIDER} - - PARAKEET_ASR_URL=${PARAKEET_ASR_URL} - - OLLAMA_BASE_URL=${OLLAMA_BASE_URL} - - HF_TOKEN=${HF_TOKEN} - - SPEAKER_SERVICE_URL=${SPEAKER_SERVICE_URL} - - ADMIN_PASSWORD=${ADMIN_PASSWORD} - - ADMIN_EMAIL=${ADMIN_EMAIL} - - AUTH_SECRET_KEY=${AUTH_SECRET_KEY} - - LLM_PROVIDER=${LLM_PROVIDER} - - OPENAI_API_KEY=${OPENAI_API_KEY} - - OPENAI_BASE_URL=${OPENAI_BASE_URL} - - OPENAI_MODEL=${OPENAI_MODEL} - - NEO4J_HOST=${NEO4J_HOST} - - NEO4J_USER=${NEO4J_USER} - - NEO4J_PASSWORD=${NEO4J_PASSWORD} - - CORS_ORIGINS=http://localhost:3010,http://localhost:8000,http://192.168.1.153:3010,http://192.168.1.153:8000,https://localhost:3010,https://localhost:8000,https://100.105.225.45,https://localhost - - REDIS_URL=redis://redis:6379/0 - depends_on: - qdrant: - condition: service_started - mongo: - condition: service_healthy - redis: - condition: service_healthy - # neo4j-mem0: - # condition: service_started - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8000/readiness"] - interval: 30s - timeout: 30s - retries: 5 - start_period: 5s - restart: unless-stopped - - # Unified Worker Container - # No CUDA needed for chronicle-backend and workers, workers only orchestrate jobs and call external services - # Runs all workers in a single container for efficiency: - # - 3 RQ workers (transcription, memory, default queues) - # - 1 Audio stream worker (Redis Streams consumer - must be single to maintain sequential chunks) - workers: - build: - context: . - dockerfile: Dockerfile - command: ["./start-workers.sh"] - env_file: - - .env - volumes: - - ./src:/app/src - - ./start-workers.sh:/app/start-workers.sh - - ./data/audio_chunks:/app/audio_chunks - - ./data:/app/data - environment: - - DEEPGRAM_API_KEY=${DEEPGRAM_API_KEY} - - MISTRAL_API_KEY=${MISTRAL_API_KEY} - - MISTRAL_MODEL=${MISTRAL_MODEL} - - TRANSCRIPTION_PROVIDER=${TRANSCRIPTION_PROVIDER} - - OPENAI_API_KEY=${OPENAI_API_KEY} - - OPENAI_BASE_URL=${OPENAI_BASE_URL} - - OPENAI_MODEL=${OPENAI_MODEL} - - LLM_PROVIDER=${LLM_PROVIDER} - - REDIS_URL=redis://redis:6379/0 - depends_on: - redis: - condition: service_healthy - mongo: - condition: service_healthy - qdrant: - condition: service_started - restart: unless-stopped - - webui: - build: - context: ./webui - dockerfile: Dockerfile - args: - # Direct access (http://localhost:3010): - # - VITE_BACKEND_URL=http://localhost:8000 - # - BACKEND_URL=http://localhost:8000 - # For Caddy HTTPS (https://localhost), use: - - VITE_BACKEND_URL= - - BACKEND_URL= - ports: - # - "${WEBUI_PORT:-3010}:80" - - 3010:80 - depends_on: - chronicle-backend: - condition: service_healthy - restart: unless-stopped - - # Caddy reverse proxy - provides HTTPS for microphone access - # Access at: https://localhost (accepts self-signed cert warning) - # Only starts when HTTPS is configured (Caddyfile exists) - caddy: - image: caddy:2-alpine - ports: - - "443:443" - - "80:80" # HTTP redirect to HTTPS - volumes: - - ./Caddyfile:/etc/caddy/Caddyfile:ro - - caddy_data:/data - - caddy_config:/config - depends_on: - chronicle-backend: - condition: service_healthy - restart: unless-stopped - profiles: - - https - - # Development webui service (use with docker-compose --profile dev up) - webui-dev: - build: - context: ./webui - dockerfile: Dockerfile.dev - ports: - - "${WEBUI_DEV_PORT:-5173}:5173" - environment: - - VITE_BACKEND_URL=http://${HOST_IP}:${BACKEND_PUBLIC_PORT:-8000} - volumes: - - ./webui/src:/app/src - - ./webui/public:/app/public - depends_on: - chronicle-backend: - condition: service_healthy - profiles: - - dev - - qdrant: - image: qdrant/qdrant:latest - ports: - - "6033:6033" # gRPC - - "6034:6034" # HTTP - volumes: - - ./data/qdrant_data:/qdrant/storage - - - mongo: - image: mongo:8.0.14 - ports: - - "27017:27017" - volumes: - - mongo_data:/data/db - healthcheck: - test: ["CMD", "mongosh", "--quiet", "--eval", "db.adminCommand({ ping: 1 })"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - - redis: - image: redis:7-alpine - ports: - - "6379:6379" # Avoid conflict with dev on 6379 - volumes: - - ./data/redis_data:/data - command: redis-server --appendonly yes - healthcheck: - test: ["CMD", "redis-cli", "ping"] - interval: 5s - timeout: 3s - retries: 5 - - ## Additional - - # neo4j-mem0: - # image: neo4j:5.15-community - # ports: - # - "7474:7474" # HTTP - # - "7687:7687" # Bolt - # environment: - # - NEO4J_AUTH=neo4j/${NEO4J_PASSWORD:-password} - # - NEO4J_PLUGINS=["apoc"] - # - NEO4J_dbms_security_procedures_unrestricted=apoc.* - # - NEO4J_dbms_security_procedures_allowlist=apoc.* - # volumes: - # - ./data/neo4j_data:/data - # - ./data/neo4j_logs:/logs - # restart: unless-stopped - - # ollama: - # image: ollama/ollama:latest - # container_name: ollama - # ports: - # - "11434:11434" - # volumes: - # - ollama_data:/root/.ollama - # deploy: - # resources: - # reservations: - # devices: - # - driver: nvidia - # count: all - # capabilities: [gpu] - - - - # Use tailscale instead - # UNCOMMENT OUT FOR LOCAL DEMO - EXPOSES to internet - # ngrok: - # image: ngrok/ngrok:latest - # depends_on: [chronicle-backend, proxy] - # ports: - # - "4040:4040" # Ngrok web interface - # environment: - # - NGROK_AUTHTOKEN=${NGROK_AUTHTOKEN} - # command: "http proxy:80 --url=${NGROK_URL} --basic-auth=${NGROK_BASIC_AUTH}" - -# Shared network for cross-project communication +# Friend-Lite Docker Compose +# Root compose file using modular includes +# +# Usage: +# Development: docker compose up +# With Mycelia: docker compose --profile mycelia up +# With HTTPS: docker compose --profile https up +# Testing: docker compose -f docker-compose.yml -f compose/overrides/test.yml up +# Production: docker compose -f docker-compose.yml -f compose/overrides/prod.yml up +# +# Structure: +# compose/backend.yml - Backend API and workers +# compose/frontend.yml - Web UI +# compose/overrides/ - Environment-specific overrides + +include: + # Application services (always included) + - compose/backend.yml + - compose/frontend.yml + + # Note: Infrastructure (MongoDB, Redis, Qdrant) is SHARED across all environments + # Start infrastructure once with: docker compose -f ../../compose/infrastructure-shared.yml up -d + # Each environment connects to shared infrastructure using unique database names + + # Note: Caddy is SHARED across all environments + # Start Caddy once with: docker compose -f ../../compose/caddy.yml up -d + # Caddy serves all environments via path-based routing + + # Note: Mycelia moved to root level (../../compose/mycelia.yml) + # To use Mycelia, run from project root: docker compose --profile mycelia up + + # Development overrides (default) + - compose/overrides/dev.yml + +# Shared network configuration networks: - default: + chronicle-network: name: chronicle-network + external: true +# Shared volume configuration volumes: - ollama_data: - driver: local mongo_data: driver: local caddy_data: driver: local caddy_config: driver: local + ollama_data: + driver: local neo4j_data: driver: local neo4j_logs: diff --git a/backends/advanced/src/advanced_omi_backend/app_config.py b/backends/advanced/src/advanced_omi_backend/app_config.py index 9857417e..bcd16c35 100644 --- a/backends/advanced/src/advanced_omi_backend/app_config.py +++ b/backends/advanced/src/advanced_omi_backend/app_config.py @@ -26,6 +26,9 @@ class AppConfig: """Centralized application configuration.""" def __init__(self): + # Graceful degradation mode + self.allow_missing_api_keys = os.getenv("ALLOW_MISSING_API_KEYS", "false").lower() == "true" + # MongoDB Configuration self.mongodb_uri = os.getenv("MONGODB_URI", "mongodb://mongo:27017") # default to legacy value to avoid breaking peoples .env @@ -51,19 +54,31 @@ def __init__(self): self.min_speech_segment_duration = float(os.getenv("MIN_SPEECH_SEGMENT_DURATION", "1.0")) self.cropping_context_padding = float(os.getenv("CROPPING_CONTEXT_PADDING", "0.1")) - # Transcription Configuration + # Transcription Configuration with graceful degradation self.transcription_provider_name = os.getenv("TRANSCRIPTION_PROVIDER") self.deepgram_api_key = os.getenv("DEEPGRAM_API_KEY") self.mistral_api_key = os.getenv("MISTRAL_API_KEY") + self.transcription_required = os.getenv("TRANSCRIPTION_REQUIRED", "true").lower() == "true" - # Get configured transcription provider - self.transcription_provider = get_transcription_provider(self.transcription_provider_name) + # Get configured transcription provider (with graceful degradation support) + self.transcription_provider = get_transcription_provider( + self.transcription_provider_name, + allow_missing_keys=self.allow_missing_api_keys + ) if self.transcription_provider: logger.info( f"βœ… Using {self.transcription_provider.name} transcription provider ({self.transcription_provider.mode})" ) + self.transcription_enabled = True else: - logger.warning("⚠️ No transcription provider configured - speech-to-text will not be available") + if self.transcription_required and not self.allow_missing_api_keys: + logger.error("❌ Transcription provider required but not configured") + else: + logger.warning("⚠️ Transcription disabled - No transcription provider configured") + if self.allow_missing_api_keys: + logger.warning(" Add Deepgram API key to enable transcription") + logger.warning(" Configure at: http://localhost:4000/system") + self.transcription_enabled = False # External Services Configuration self.qdrant_base_url = os.getenv("QDRANT_BASE_URL", "qdrant") @@ -88,6 +103,24 @@ def __init__(self): # Thread pool configuration self.max_workers = os.cpu_count() or 4 + # LLM Configuration with graceful degradation + self.llm_provider = os.getenv("LLM_PROVIDER", "openai") + self.openai_api_key = os.getenv("OPENAI_API_KEY") + self.llm_required = os.getenv("LLM_REQUIRED", "true").lower() == "true" + + if not self.openai_api_key: + if self.llm_required and not self.allow_missing_api_keys: + logger.error("❌ LLM provider required but OPENAI_API_KEY not configured") + else: + logger.warning("⚠️ LLM disabled - Memory extraction and chat features unavailable") + if self.allow_missing_api_keys: + logger.warning(" Add OpenAI API key to enable LLM features") + logger.warning(" Configure at: http://localhost:4000/system") + self.llm_enabled = False + else: + self.llm_enabled = True + logger.info(f"βœ… LLM enabled (provider: {self.llm_provider})") + # Memory service configuration self.memory_service_supports_threshold = self.memory_provider == "chronicle" diff --git a/backends/advanced/src/advanced_omi_backend/app_factory.py b/backends/advanced/src/advanced_omi_backend/app_factory.py index 7ccda184..fdde55de 100644 --- a/backends/advanced/src/advanced_omi_backend/app_factory.py +++ b/backends/advanced/src/advanced_omi_backend/app_factory.py @@ -37,6 +37,7 @@ from advanced_omi_backend.routers.modules.websocket_routes import router as websocket_router from advanced_omi_backend.services.audio_service import get_audio_stream_service from advanced_omi_backend.task_manager import init_task_manager, get_task_manager +from advanced_omi_backend.services.mcp_server import setup_mcp_server logger = logging.getLogger(__name__) application_logger = logging.getLogger("audio_processing") @@ -66,6 +67,16 @@ async def lifespan(app: FastAPI): application_logger.error(f"Failed to initialize Beanie: {e}") raise + # Initialize settings manager + try: + from advanced_omi_backend.settings_manager import init_settings_manager + settings_mgr = init_settings_manager(config.db) + await settings_mgr.initialize() + application_logger.info("βœ… Settings manager initialized and loaded from environment/database") + except Exception as e: + application_logger.error(f"Failed to initialize settings manager: {e}") + # Don't raise - use fallback to environment variables if settings manager fails + # Create admin user if needed try: await create_admin_user_if_needed() @@ -205,6 +216,10 @@ def create_app() -> FastAPI: tags=["users"], ) + # Setup MCP server for conversation access + setup_mcp_server(app) + logger.info("MCP server configured for conversation access") + # Mount static files LAST (mounts are catch-all patterns) CHUNK_DIR = Path("/app/audio_chunks") app.mount("/audio", StaticFiles(directory=CHUNK_DIR), name="audio") diff --git a/backends/advanced/src/advanced_omi_backend/controllers/system_controller.py b/backends/advanced/src/advanced_omi_backend/controllers/system_controller.py index 44067a49..02888259 100644 --- a/backends/advanced/src/advanced_omi_backend/controllers/system_controller.py +++ b/backends/advanced/src/advanced_omi_backend/controllers/system_controller.py @@ -562,3 +562,23 @@ async def set_memory_provider(provider: str): ) +# API Key and Environment Configuration Functions + +async def get_api_key_status(): + """Get current API key configuration status.""" + try: + from advanced_omi_backend.utils.env_writer import get_env_writer + + env_writer = get_env_writer() + status = env_writer.get_configuration_status() + + return { + **status, + "status": "success" + } + + except Exception as e: + logger.error(f"Error getting API key status: {e}") + return JSONResponse( + status_code=500, content={"error": f"Failed to get API key status: {str(e)}"} + ) diff --git a/backends/advanced/src/advanced_omi_backend/models/user.py b/backends/advanced/src/advanced_omi_backend/models/user.py index b0ced195..7998c5b3 100644 --- a/backends/advanced/src/advanced_omi_backend/models/user.py +++ b/backends/advanced/src/advanced_omi_backend/models/user.py @@ -25,6 +25,8 @@ class UserRead(BaseUser[PydanticObjectId]): display_name: Optional[str] = None registered_clients: dict[str, dict] = Field(default_factory=dict) primary_speakers: list[dict] = Field(default_factory=list) + api_key: Optional[str] = None + api_key_created_at: Optional[datetime] = None class UserUpdate(BaseUserUpdate): @@ -62,6 +64,9 @@ class User(BeanieBaseUser, Document): registered_clients: dict[str, dict] = Field(default_factory=dict) # Speaker processing filter configuration primary_speakers: list[dict] = Field(default_factory=list) + # API key for MCP access + api_key: Optional[str] = None + api_key_created_at: Optional[datetime] = None class Settings: name = "users" # Collection name in MongoDB - standardized from "fastapi_users" diff --git a/backends/advanced/src/advanced_omi_backend/routers/api_router.py b/backends/advanced/src/advanced_omi_backend/routers/api_router.py index 528713c0..e6abfe48 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/api_router.py +++ b/backends/advanced/src/advanced_omi_backend/routers/api_router.py @@ -16,6 +16,7 @@ conversation_router, memory_router, queue_router, + settings_router, system_router, user_router, ) @@ -34,6 +35,7 @@ router.include_router(client_router) router.include_router(conversation_router) router.include_router(memory_router) +router.include_router(settings_router) router.include_router(system_router) router.include_router(queue_router) router.include_router(health_router) # Also include under /api for frontend compatibility diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py b/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py index a5669b06..2cda0884 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py @@ -12,6 +12,7 @@ - audio_routes: Audio file uploads and processing - health_routes: Health check endpoints - websocket_routes: WebSocket connection handling +- settings_routes: Application settings management """ from .audio_routes import router as audio_router @@ -21,6 +22,7 @@ from .health_routes import router as health_router from .memory_routes import router as memory_router from .queue_routes import router as queue_router +from .settings_routes import router as settings_router from .system_routes import router as system_router from .user_routes import router as user_router from .websocket_routes import router as websocket_router @@ -33,6 +35,7 @@ "health_router", "memory_router", "queue_router", + "settings_router", "system_router", "user_router", "websocket_router", diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/health_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/health_routes.py index d94940ce..15bda29c 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/health_routes.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/health_routes.py @@ -1,5 +1,5 @@ """ -Health check routes for Chronicle backend. +Health check routes for Friend-Lite backend. This module provides health check endpoints for monitoring the application's status. """ @@ -35,8 +35,9 @@ # Memory service memory_service = get_memory_service() -# Transcription provider -transcription_provider = get_transcription_provider() +# Transcription provider (with graceful degradation support) +allow_missing_keys = os.getenv("ALLOW_MISSING_API_KEYS", "false").lower() == "true" +transcription_provider = get_transcription_provider(allow_missing_keys=allow_missing_keys) # Qdrant Configuration QDRANT_BASE_URL = os.getenv("QDRANT_BASE_URL", "qdrant") @@ -116,15 +117,9 @@ async def health_check(): overall_healthy = True critical_services_healthy = True - + # Get configuration once at the start - memory_provider = os.getenv("MEMORY_PROVIDER", "chronicle").lower() - - # Map legacy provider names to current names - if memory_provider in ("friend-lite", "friend_lite"): - logger.debug(f"Mapping legacy provider '{memory_provider}' to 'chronicle'") - memory_provider = "chronicle" - + memory_provider = os.getenv("MEMORY_PROVIDER", "friend_lite") speaker_service_url = os.getenv("SPEAKER_SERVICE_URL") openmemory_mcp_url = os.getenv("OPENMEMORY_MCP_URL") @@ -135,12 +130,14 @@ async def health_check(): "status": "βœ… Connected", "healthy": True, "critical": True, + "url": MONGODB_URI, } except asyncio.TimeoutError: health_status["services"]["mongodb"] = { "status": "❌ Connection Timeout (5s)", "healthy": False, "critical": True, + "url": MONGODB_URI, } overall_healthy = False critical_services_healthy = False @@ -149,13 +146,14 @@ async def health_check(): "status": f"❌ Connection Failed: {str(e)}", "healthy": False, "critical": True, + "url": MONGODB_URI, } overall_healthy = False critical_services_healthy = False # Check Redis and RQ Workers (critical for queue processing) try: - from advanced_omi_backend.controllers.queue_controller import get_queue_health + from advanced_omi_backend.controllers.queue_controller import get_queue_health, REDIS_URL # Get queue health (includes Redis connection test and worker count) queue_health = await asyncio.wait_for( @@ -173,6 +171,7 @@ async def health_check(): "status": "βœ… Connected", "healthy": True, "critical": True, + "url": REDIS_URL, "worker_count": worker_count, "active_workers": active_workers, "idle_workers": idle_workers, @@ -183,6 +182,7 @@ async def health_check(): "status": f"❌ Connection Failed: {queue_health.get('redis_connection')}", "healthy": False, "critical": True, + "url": REDIS_URL, "worker_count": 0 } overall_healthy = False @@ -193,6 +193,7 @@ async def health_check(): "status": "❌ Connection Timeout (5s)", "healthy": False, "critical": True, + "url": os.getenv("REDIS_URL", "redis://localhost:6379/0"), "worker_count": 0 } overall_healthy = False @@ -202,6 +203,7 @@ async def health_check(): "status": f"❌ Connection Failed: {str(e)}", "healthy": False, "critical": True, + "url": os.getenv("REDIS_URL", "redis://localhost:6379/0"), "worker_count": 0 } overall_healthy = False @@ -213,61 +215,70 @@ async def health_check(): health_status["services"]["audioai"] = { "status": llm_health.get("status", "❌ Unknown"), "healthy": "βœ…" in llm_health.get("status", ""), - "base_url": llm_health.get("base_url", ""), + "url": llm_health.get("base_url", ""), "model": llm_health.get("default_model", ""), "provider": os.getenv("LLM_PROVIDER", "openai"), "critical": False, } except asyncio.TimeoutError: + llm_base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1") health_status["services"]["audioai"] = { "status": "⚠️ Connection Timeout (8s) - Service may not be running", "healthy": False, + "url": llm_base_url, "provider": os.getenv("LLM_PROVIDER", "openai"), "critical": False, } overall_healthy = False except Exception as e: + llm_base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1") health_status["services"]["audioai"] = { "status": f"⚠️ Connection Failed: {str(e)} - Service may not be running", "healthy": False, + "url": llm_base_url, "provider": os.getenv("LLM_PROVIDER", "openai"), "critical": False, } overall_healthy = False # Check memory service (provider-dependent) - if memory_provider == "chronicle": + if memory_provider == "friend_lite": + qdrant_url = f"http://{QDRANT_BASE_URL}:{QDRANT_PORT}" try: - # Test Chronicle memory service connection with timeout + # Test Friend-Lite memory service connection with timeout test_success = await asyncio.wait_for(memory_service.test_connection(), timeout=8.0) if test_success: health_status["services"]["memory_service"] = { - "status": "βœ… Chronicle Memory Connected", + "status": "βœ… Friend-Lite Memory Connected", "healthy": True, - "provider": "chronicle", + "provider": "friend_lite", + "url": qdrant_url, "critical": False, } else: health_status["services"]["memory_service"] = { - "status": "⚠️ Chronicle Memory Test Failed", + "status": "⚠️ Friend-Lite Memory Test Failed", "healthy": False, - "provider": "chronicle", + "provider": "friend_lite", + "url": qdrant_url, "critical": False, } overall_healthy = False except asyncio.TimeoutError: health_status["services"]["memory_service"] = { - "status": "⚠️ Chronicle Memory Timeout (8s) - Check Qdrant", + "status": "⚠️ Friend-Lite Memory Timeout (8s) - Check Qdrant", "healthy": False, - "provider": "chronicle", + "provider": "friend_lite", + "url": qdrant_url, "critical": False, } overall_healthy = False except Exception as e: health_status["services"]["memory_service"] = { - "status": f"⚠️ Chronicle Memory Failed: {str(e)}", + "status": f"⚠️ Friend-Lite Memory Failed: {str(e)}", "healthy": False, - "provider": "chronicle", + "provider": "friend_lite", + "url": qdrant_url, "critical": False, } overall_healthy = False @@ -277,10 +288,12 @@ async def health_check(): "status": "βœ… Using OpenMemory MCP", "healthy": True, "provider": "openmemory_mcp", + "url": openmemory_mcp_url or "Not configured", "critical": False, } elif memory_provider == "mycelia": # Mycelia memory service check + mycelia_url = os.getenv("MYCELIA_API_URL", "http://mycelia-backend:5100") try: # Test Mycelia memory service connection with timeout test_success = await asyncio.wait_for(memory_service.test_connection(), timeout=8.0) @@ -289,6 +302,7 @@ async def health_check(): "status": "βœ… Mycelia Memory Connected", "healthy": True, "provider": "mycelia", + "url": mycelia_url, "critical": False, } else: @@ -296,6 +310,7 @@ async def health_check(): "status": "⚠️ Mycelia Memory Test Failed", "healthy": False, "provider": "mycelia", + "url": mycelia_url, "critical": False, } overall_healthy = False @@ -304,6 +319,7 @@ async def health_check(): "status": "⚠️ Mycelia Memory Timeout (8s) - Check Mycelia service", "healthy": False, "provider": "mycelia", + "url": mycelia_url, "critical": False, } overall_healthy = False @@ -312,6 +328,7 @@ async def health_check(): "status": f"⚠️ Mycelia Memory Failed: {str(e)}", "healthy": False, "provider": "mycelia", + "url": mycelia_url, "critical": False, } overall_healthy = False diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py new file mode 100644 index 00000000..3da73c4d --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py @@ -0,0 +1,564 @@ +""" +Application settings management routes. + +Provides endpoints for reading and updating dynamic application settings. +Settings changes take effect within the cache TTL (default: 5 seconds). +""" + +import logging + +from fastapi import APIRouter, Depends, HTTPException + +from advanced_omi_backend.auth import current_active_user, current_superuser +from advanced_omi_backend.settings_manager import get_settings_manager, SettingsManager +from advanced_omi_backend.settings_models import ( + AllSettings, + ApiKeysSettings, + AudioProcessingSettings, + ConversationSettings, + DiarizationSettings, + InfrastructureSettings, + LLMSettings, + MiscSettings, + NetworkSettings, + ProviderSettings, + SpeechDetectionSettings, +) +from advanced_omi_backend.users import User + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/settings", tags=["settings"]) + + +# All Settings (Combined) + + +@router.get("", response_model=AllSettings) +async def get_all_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Get all application settings. + + Available to all authenticated users for read access. + """ + return await settings_mgr.get_all_settings() + + +@router.put("", response_model=AllSettings) +async def update_all_settings( + settings: AllSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update all application settings at once. + + Admin only. Changes take effect within the cache TTL. + """ + await settings_mgr.update_all_settings(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_all_settings() + + +# Speech Detection Settings + + +@router.get("/speech-detection", response_model=SpeechDetectionSettings) +async def get_speech_detection_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get speech detection settings.""" + return await settings_mgr.get_speech_detection() + + +@router.put("/speech-detection", response_model=SpeechDetectionSettings) +async def update_speech_detection_settings( + settings: SpeechDetectionSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update speech detection settings. Admin only. + + These settings control when audio sessions are converted to conversations. + """ + await settings_mgr.update_speech_detection(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_speech_detection() + + +# Conversation Settings + + +@router.get("/conversation", response_model=ConversationSettings) +async def get_conversation_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get conversation management settings.""" + return await settings_mgr.get_conversation() + + +@router.put("/conversation", response_model=ConversationSettings) +async def update_conversation_settings( + settings: ConversationSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update conversation management settings. Admin only. + + Controls conversation timeouts, transcription buffering, and speaker enrollment. + """ + await settings_mgr.update_conversation(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_conversation() + + +# Audio Processing Settings + + +@router.get("/audio-processing", response_model=AudioProcessingSettings) +async def get_audio_processing_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get audio processing settings.""" + return await settings_mgr.get_audio_processing() + + +@router.put("/audio-processing", response_model=AudioProcessingSettings) +async def update_audio_processing_settings( + settings: AudioProcessingSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update audio processing settings. Admin only. + + Controls audio cropping, silence removal, and segment duration. + """ + await settings_mgr.update_audio_processing(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_audio_processing() + + +# Diarization Settings + + +@router.get("/diarization", response_model=DiarizationSettings) +async def get_diarization_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get speaker diarization settings.""" + return await settings_mgr.get_diarization() + + +@router.put("/diarization", response_model=DiarizationSettings) +async def update_diarization_settings( + settings: DiarizationSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update speaker diarization settings. Admin only. + + Controls how speakers are identified and segments are separated. + """ + await settings_mgr.update_diarization(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_diarization() + + +# LLM Settings + + +@router.get("/llm", response_model=LLMSettings) +async def get_llm_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get LLM provider and model settings.""" + return await settings_mgr.get_llm() + + +@router.put("/llm", response_model=LLMSettings) +async def update_llm_settings( + settings: LLMSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update LLM settings. Admin only. + + Controls which LLM provider and models to use for processing and chat. + """ + await settings_mgr.update_llm(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_llm() + + +# Provider Settings + + +@router.get("/providers", response_model=ProviderSettings) +async def get_provider_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get service provider settings.""" + return await settings_mgr.get_providers() + + +@router.put("/providers", response_model=ProviderSettings) +async def update_provider_settings( + settings: ProviderSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update service provider settings. Admin only. + + Controls which memory and transcription providers to use. + """ + await settings_mgr.update_providers(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_providers() + + +# Network Settings + + +@router.get("/network", response_model=NetworkSettings) +async def get_network_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get network and public access settings.""" + return await settings_mgr.get_network() + + +@router.put("/network", response_model=NetworkSettings) +async def update_network_settings( + settings: NetworkSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update network settings. Admin only. + + Controls public endpoints, CORS, and network access configuration. + """ + await settings_mgr.update_network(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_network() + + +# Infrastructure Settings + + +@router.get("/infrastructure", response_model=InfrastructureSettings) +async def get_infrastructure_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get infrastructure settings.""" + return await settings_mgr.get_infrastructure() + + +@router.put("/infrastructure", response_model=InfrastructureSettings) +async def update_infrastructure_settings( + settings: InfrastructureSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update infrastructure settings. Admin only. + + Controls MongoDB, Redis, Qdrant, and Neo4j connection settings. + """ + await settings_mgr.update_infrastructure(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_infrastructure() + + +# Miscellaneous Settings + + +@router.get("/misc", response_model=MiscSettings) +async def get_misc_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get miscellaneous settings.""" + return await settings_mgr.get_misc() + + +@router.put("/misc", response_model=MiscSettings) +async def update_misc_settings( + settings: MiscSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update miscellaneous settings. Admin only. + + Controls debug options and telemetry. + """ + await settings_mgr.update_misc(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_misc() + + +# API Keys Settings + + +@router.get("/api-keys", response_model=ApiKeysSettings) +async def get_api_keys_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get API keys settings.""" + return await settings_mgr.get_api_keys() + + +@router.put("/api-keys", response_model=ApiKeysSettings) +async def update_api_keys_settings( + settings: ApiKeysSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update API keys settings. Admin only. + + Controls external service API keys. + """ + await settings_mgr.update_api_keys(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_api_keys() + + +@router.get("/api-keys/load-from-file", response_model=ApiKeysSettings) +async def load_api_keys_from_file( + file_path: str = ".env.api-keys", + current_user: User = Depends(current_superuser), +): + """ + Load API keys from a file. Admin only. + + Args: + file_path: Path to the API keys file (default: .env.api-keys) + + Returns: + API keys loaded from the file + """ + from advanced_omi_backend.utils.api_keys_manager import read_api_keys_from_file + + try: + keys_dict = read_api_keys_from_file(file_path) + return ApiKeysSettings(**keys_dict) + except Exception as e: + logger.error(f"Error loading API keys from file {file_path}: {e}") + raise HTTPException( + status_code=500, + detail=f"Failed to load API keys from {file_path}: {str(e)}" + ) + + +@router.post("/api-keys/save") +async def save_api_keys( + settings: ApiKeysSettings, + save_to_file: bool = True, + save_to_database: bool = True, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Save API keys to file and/or database. Admin only. + + Args: + settings: API keys to save + save_to_file: Save to .env.api-keys file (default: True) + save_to_database: Save to MongoDB (default: True) + """ + from advanced_omi_backend.utils.api_keys_manager import write_api_keys_to_file + + results = {"file": False, "database": False, "errors": []} + + # Save to file + if save_to_file: + try: + keys_dict = { + "openai_api_key": settings.openai_api_key, + "deepgram_api_key": settings.deepgram_api_key, + "mistral_api_key": settings.mistral_api_key, + "hf_token": settings.hf_token, + "langfuse_public_key": settings.langfuse_public_key, + "langfuse_secret_key": settings.langfuse_secret_key, + "ngrok_authtoken": settings.ngrok_authtoken, + } + success = write_api_keys_to_file(keys_dict, ".env.api-keys") + results["file"] = success + if not success: + results["errors"].append("Failed to write to .env.api-keys file") + except Exception as e: + logger.error(f"Error writing API keys to file: {e}") + results["errors"].append(f"File write error: {str(e)}") + + # Save to database + if save_to_database: + try: + await settings_mgr.update_api_keys(settings, updated_by=str(current_user.id)) + results["database"] = True + except Exception as e: + logger.error(f"Error saving API keys to database: {e}") + results["errors"].append(f"Database save error: {str(e)}") + + return { + "success": results["file"] or results["database"], + "saved_to": { + "file": results["file"], + "database": results["database"], + }, + "errors": results["errors"], + "settings": await settings_mgr.get_api_keys(), + } + + +# Cache Management + + +@router.post("/cache/invalidate") +async def invalidate_settings_cache( + category: str = None, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Invalidate settings cache. Admin only. + + Forces settings to reload from database on next access. + If category is provided, only invalidates that category. + """ + settings_mgr.invalidate_cache(category) + return { + "status": "success", + "message": f"Cache invalidated for {category if category else 'all settings'}", + } + + +# Infrastructure Status + + +@router.get("/infrastructure/status") +async def get_infrastructure_status( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Get infrastructure service connection status. + + Returns URLs and connection status for MongoDB, Redis, Qdrant, Neo4j. + Uses editable settings from database. + """ + from advanced_omi_backend.app_config import get_app_config + + # Get infrastructure settings from database + infra_settings = await settings_mgr.get_infrastructure() + config = get_app_config() + + status = { + "mongodb": { + "url": infra_settings.mongodb_uri, + "database": infra_settings.mongodb_database, + "connected": False, + }, + "redis": { + "url": infra_settings.redis_url, + "connected": False, + }, + "qdrant": { + "url": f"http://{infra_settings.qdrant_base_url}:{infra_settings.qdrant_port}", + "connected": False, + }, + "neo4j": { + "host": infra_settings.neo4j_host, + "user": infra_settings.neo4j_user, + "connected": False, + }, + } + + # Check MongoDB + try: + await config.mongo_client.admin.command('ping') + status["mongodb"]["connected"] = True + except Exception as e: + logger.debug(f"MongoDB connection check failed: {e}") + + # Check Redis + try: + from advanced_omi_backend.controllers.queue_controller import redis_conn + redis_conn.ping() + status["redis"]["connected"] = True + except Exception as e: + logger.debug(f"Redis connection check failed: {e}") + + # Check Qdrant + try: + import httpx + async with httpx.AsyncClient() as client: + response = await client.get(f"{status['qdrant']['url']}/", timeout=2.0) + status["qdrant"]["connected"] = response.status_code == 200 + except Exception as e: + logger.debug(f"Qdrant connection check failed: {e}") + + # Neo4j check (optional service) + # We don't check Neo4j connection as it's optional and may not be configured + + return status + + +@router.get("/api-keys/status") +async def get_api_keys_status( + current_user: User = Depends(current_superuser), +): + """ + Get API keys configuration status. Admin only. + + Returns which API keys are configured (but not the actual keys). + """ + import os + + keys_status = { + "openai": { + "name": "OpenAI API Key", + "configured": bool(os.getenv("OPENAI_API_KEY")), + "env_var": "OPENAI_API_KEY", + }, + "deepgram": { + "name": "Deepgram API Key", + "configured": bool(os.getenv("DEEPGRAM_API_KEY")), + "env_var": "DEEPGRAM_API_KEY", + }, + "mistral": { + "name": "Mistral API Key", + "configured": bool(os.getenv("MISTRAL_API_KEY")), + "env_var": "MISTRAL_API_KEY", + }, + "hf_token": { + "name": "HuggingFace Token", + "configured": bool(os.getenv("HF_TOKEN")), + "env_var": "HF_TOKEN", + }, + "langfuse_public": { + "name": "Langfuse Public Key", + "configured": bool(os.getenv("LANGFUSE_PUBLIC_KEY")), + "env_var": "LANGFUSE_PUBLIC_KEY", + }, + "langfuse_secret": { + "name": "Langfuse Secret Key", + "configured": bool(os.getenv("LANGFUSE_SECRET_KEY")), + "env_var": "LANGFUSE_SECRET_KEY", + }, + "ngrok": { + "name": "Ngrok Auth Token", + "configured": bool(os.getenv("NGROK_AUTHTOKEN")), + "env_var": "NGROK_AUTHTOKEN", + }, + } + + return keys_status diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/system_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/system_routes.py index e51c036c..9e0b1953 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/system_routes.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/system_routes.py @@ -143,3 +143,10 @@ async def set_memory_provider( ): """Set memory provider and restart backend services. Admin only.""" return await system_controller.set_memory_provider(provider) +# API Key and Configuration Management Endpoints + +@router.get("/admin/config/status") +async def get_configuration_status(current_user: User = Depends(current_superuser)): + """Get current API key configuration and feature status. Admin only.""" + return await system_controller.get_api_key_status() + diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py index 12ed5c63..233ddd68 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py @@ -5,10 +5,12 @@ """ import logging +import secrets +from datetime import UTC, datetime -from fastapi import APIRouter, Depends +from fastapi import APIRouter, Depends, HTTPException -from advanced_omi_backend.auth import current_superuser +from advanced_omi_backend.auth import current_active_user, current_superuser from advanced_omi_backend.controllers import user_controller from advanced_omi_backend.users import User, UserCreate, UserUpdate @@ -44,3 +46,42 @@ async def delete_user( ): """Delete a user and optionally their associated data. Admin only.""" return await user_controller.delete_user(user_id, delete_conversations, delete_memories) + + +@router.post("/me/api-key") +async def generate_api_key(current_user: User = Depends(current_active_user)): + """Generate a new API key for the current user.""" + try: + # Generate a secure random API key (32 bytes = 64 hex characters) + new_api_key = secrets.token_urlsafe(32) + + # Update user with new API key + current_user.api_key = new_api_key + current_user.api_key_created_at = datetime.now(UTC) + await current_user.save() + + logger.info(f"Generated new API key for user {current_user.id}") + + return { + "api_key": new_api_key, + "created_at": current_user.api_key_created_at.isoformat() + } + except Exception as e: + logger.error(f"Failed to generate API key for user {current_user.id}: {e}") + raise HTTPException(status_code=500, detail="Failed to generate API key") + + +@router.delete("/me/api-key") +async def revoke_api_key(current_user: User = Depends(current_active_user)): + """Revoke the current user's API key.""" + try: + current_user.api_key = None + current_user.api_key_created_at = None + await current_user.save() + + logger.info(f"Revoked API key for user {current_user.id}") + + return {"status": "success", "message": "API key revoked"} + except Exception as e: + logger.error(f"Failed to revoke API key for user {current_user.id}: {e}") + raise HTTPException(status_code=500, detail="Failed to revoke API key") diff --git a/backends/advanced/src/advanced_omi_backend/services/mcp_server.py b/backends/advanced/src/advanced_omi_backend/services/mcp_server.py new file mode 100644 index 00000000..27288599 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/services/mcp_server.py @@ -0,0 +1,532 @@ +""" +MCP Server for Friend-Lite conversations. + +This module implements an MCP (Model Context Protocol) server that provides +conversation access tools for LLMs to retrieve conversation data, transcripts, +and audio files. + +Key features: +- List conversations with filtering and pagination +- Get detailed conversation data including transcripts and segments +- Access conversation audio files as resources +- User-scoped access with proper authentication +""" + +import base64 +import contextvars +import json +import logging +from pathlib import Path +from typing import Optional, List + +from fastapi import FastAPI, Request +from fastapi.routing import APIRouter +from mcp.server.fastmcp import FastMCP +from mcp.server.sse import SseServerTransport + +from advanced_omi_backend.config import CHUNK_DIR +from advanced_omi_backend.models.conversation import Conversation +from advanced_omi_backend.models.user import User + +logger = logging.getLogger(__name__) + +# Initialize MCP +mcp = FastMCP("friend-lite-conversations") + +# Context variables for user_id +user_id_var: contextvars.ContextVar[str] = contextvars.ContextVar("user_id") + +# Create a router for MCP endpoints +mcp_router = APIRouter(prefix="/mcp") + +# Initialize SSE transport +sse = SseServerTransport("/mcp/messages/") + + +async def resolve_user_identifier(identifier: str) -> Optional[str]: + """ + Resolve a user identifier (email or user_id) to a user_id. + + Args: + identifier: Either an email address or a MongoDB ObjectId string + + Returns: + User ID string if found, None otherwise + """ + try: + # First try to find by email (case-insensitive) + user = await User.find_one(User.email == identifier.lower()) + if user: + logger.info(f"Resolved email '{identifier}' to user_id: {user.id}") + return str(user.id) + + # If not found by email, assume it's already a user_id + # Verify it exists + from bson import ObjectId + try: + user = await User.find_one(User.id == ObjectId(identifier)) + if user: + logger.info(f"Verified user_id: {identifier}") + return str(user.id) + except: + pass + + logger.warning(f"Could not resolve user identifier: {identifier}") + return None + except Exception as e: + logger.error(f"Error resolving user identifier '{identifier}': {e}") + return None + + +@mcp.tool(description="List all conversations. Returns conversation_id, title, summary, created_at, client_id, segment_count, memory_count, and has_audio. Supports date filtering and pagination.") +async def list_conversations( + limit: int = 20, + offset: int = 0, + order_by: str = "created_at_desc", + start_date: Optional[str] = None, + end_date: Optional[str] = None +) -> str: + """ + List conversations with optional date filtering. + + Args: + limit: Maximum number of conversations to return (default: 20, max: 100) + offset: Number of conversations to skip for pagination (default: 0) + order_by: Sort order - "created_at_desc" (newest first) or "created_at_asc" (oldest first) + start_date: Optional ISO 8601 date string (e.g., "2025-01-01T00:00:00Z") - filter conversations after this date + end_date: Optional ISO 8601 date string (e.g., "2025-12-31T23:59:59Z") - filter conversations before this date + + Returns: + JSON string with list of conversations and pagination info + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Validate and limit parameters + limit = min(max(1, limit), 100) # Clamp between 1 and 100 + offset = max(0, offset) + + # Build base query + # If uid is "all", return all conversations (temporary for development) + # In the future, this will filter by speaker identity + if uid == "all": + query = Conversation.find_all() + else: + query = Conversation.find(Conversation.user_id == uid) + + # Apply date filtering if provided + from datetime import datetime + + if start_date: + try: + start_dt = datetime.fromisoformat(start_date.replace('Z', '+00:00')) + query = query.find(Conversation.start_datetime >= start_dt) + except ValueError as e: + logger.warning(f"Invalid start_date format: {start_date}, error: {e}") + return json.dumps({"error": f"Invalid start_date format: {start_date}. Use ISO 8601 format."}, indent=2) + + if end_date: + try: + end_dt = datetime.fromisoformat(end_date.replace('Z', '+00:00')) + query = query.find(Conversation.start_datetime <= end_dt) + except ValueError as e: + logger.warning(f"Invalid end_date format: {end_date}, error: {e}") + return json.dumps({"error": f"Invalid end_date format: {end_date}. Use ISO 8601 format."}, indent=2) + + # Get total count with same filters + total_count = await query.count() + + # Apply sorting + if order_by == "created_at_asc": + query = query.sort(Conversation.start_datetime) + else: # Default to newest first + query = query.sort(-Conversation.start_datetime) + + # Apply pagination + conversations = await query.skip(offset).limit(limit).to_list() + + # Format conversations for response + formatted_convs = [] + for conv in conversations: + + formatted_convs.append({ + "conversation_id": conv.conversation_id, + "title": conv.title, + "summary": conv.summary, + "start_datetime": conv.start_datetime.isoformat(), + "end_datetime": conv.end_datetime.isoformat() if conv.end_datetime else None, + "segment_count": len(conv.segments), + "memory_count": conv.memory_count, + "client_id": conv.client_id, + }) + + + result = { + "conversations": formatted_convs, + "pagination": { + "total": total_count, + "limit": limit, + "offset": offset, + "returned": len(formatted_convs), + "has_more": (offset + len(formatted_convs)) < total_count + } + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error listing conversations: {e}") + return json.dumps({"error": f"Failed to list conversations: {str(e)}"}, indent=2) + + +@mcp.tool(description="Get detailed information about a specific conversation including full transcript, speaker segments, memories, and version history. Use the conversation_id from list_conversations.") +async def get_conversation(conversation_id: str) -> str: + """ + Get detailed conversation data. + + Args: + conversation_id: The unique conversation identifier + + Returns: + JSON string with complete conversation details + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Format conversation data with explicit fields + conv_data = { + # Core identifiers + "conversation_id": conversation.conversation_id, + "audio_uuid": conversation.audio_uuid, + "user_id": conversation.user_id, + "client_id": conversation.client_id, + + # Metadata + "start_datetime": conversation.start_datetime.isoformat(), + "end_datetime": conversation.end_datetime.isoformat() if conversation.end_datetime else None, + "title": conversation.title, + "summary": conversation.summary, + # "detailed_summary": conversation.detailed_summary, + + # Transcript data + "transcript": conversation.transcript, + + # Memory data + "memory_count": conversation.memory_count, + + # Audio paths + "has_audio": bool(conversation.audio_path), + "has_cropped_audio": bool(conversation.cropped_audio_path), + + # Version information + "active_transcript_version": conversation.active_transcript_version, + "active_memory_version": conversation.active_memory_version, + "transcript_versions_count": len(conversation.transcript_versions), + "memory_versions_count": len(conversation.memory_versions) + } + + return json.dumps(conv_data, indent=2) + + except Exception as e: + logger.exception(f"Error getting conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get conversation: {str(e)}"}, indent=2) + + +@mcp.tool(description="Get speaker segments from a conversation. Returns detailed timing and speaker information for each segment of the transcript.") +async def get_segments_from_conversation(conversation_id: str) -> str: + """ + Get speaker segments from a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + JSON string with speaker segments including timing and text + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Format segments + segments_data = { + "conversation_id": conversation_id, + "segment_count": len(conversation.segments), + "segments": [ + { + "start": seg.start, + "end": seg.end, + "duration": seg.end - seg.start, + "text": seg.text, + "speaker": seg.speaker, + "confidence": seg.confidence + } for seg in conversation.segments + ] + } + + return json.dumps(segments_data, indent=2) + + except Exception as e: + logger.exception(f"Error getting segments for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get segments: {str(e)}"}, indent=2) + + +@mcp.resource(uri="conversation://{conversation_id}/audio", name="Conversation Audio", description="Get the audio file for a conversation") +async def get_conversation_audio(conversation_id: str) -> str: + """ + Get audio file for a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + Base64-encoded audio data with metadata + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Default to regular audio (not cropped) + audio_type = "audio" + + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Get the appropriate audio path + if audio_type == "cropped_audio": + audio_path = conversation.cropped_audio_path + if not audio_path: + return json.dumps({"error": "No cropped audio available for this conversation"}, indent=2) + else: # Default to regular audio + audio_path = conversation.audio_path + if not audio_path: + return json.dumps({"error": "No audio file available for this conversation"}, indent=2) + + # Resolve full path + full_path = CHUNK_DIR / audio_path + + if not full_path.exists(): + return json.dumps({"error": f"Audio file not found at path: {audio_path}"}, indent=2) + + # Read and encode audio file + with open(full_path, "rb") as f: + audio_data = f.read() + + audio_base64 = base64.b64encode(audio_data).decode('utf-8') + + result = { + "conversation_id": conversation_id, + "audio_type": audio_type, + "file_path": str(audio_path), + "file_size_bytes": len(audio_data), + "mime_type": "audio/wav", # Friend-Lite stores audio as WAV + "audio_base64": audio_base64 + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error getting audio for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get audio: {str(e)}"}, indent=2) + + +@mcp.resource(uri="conversation://{conversation_id}/cropped_audio", name="Conversation Cropped Audio", description="Get the cropped (speech-only) audio file for a conversation") +async def get_conversation_cropped_audio(conversation_id: str) -> str: + """ + Get cropped audio file for a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + Base64-encoded cropped audio data with metadata + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Get cropped audio path + audio_path = conversation.cropped_audio_path + if not audio_path: + return json.dumps({"error": "No cropped audio available for this conversation"}, indent=2) + + # Resolve full path + full_path = CHUNK_DIR / audio_path + + if not full_path.exists(): + return json.dumps({"error": f"Audio file not found at path: {audio_path}"}, indent=2) + + # Read and encode audio file + with open(full_path, "rb") as f: + audio_data = f.read() + + audio_base64 = base64.b64encode(audio_data).decode('utf-8') + + result = { + "conversation_id": conversation_id, + "audio_type": "cropped_audio", + "file_path": str(audio_path), + "file_size_bytes": len(audio_data), + "mime_type": "audio/wav", + "audio_base64": audio_base64 + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error getting cropped audio for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get cropped audio: {str(e)}"}, indent=2) + + +@mcp_router.get("/conversations/sse") +async def handle_sse(request: Request): + """ + Handle SSE connections with Bearer token authentication. + + The access token should be provided in the Authorization header: + Authorization: Bearer + + Note: For development, this bypasses user authentication and returns all conversations. + In the future, this will validate speaker identity from conversations. + """ + from fastapi.responses import JSONResponse + + # Extract access token from Authorization header + auth_header = request.headers.get("authorization") + if not auth_header: + logger.error("No Authorization header provided") + return JSONResponse( + status_code=401, + content={"error": "Authorization header required. Use: Authorization: Bearer "} + ) + + # Parse Bearer token + parts = auth_header.split() + if len(parts) != 2 or parts[0].lower() != "bearer": + logger.error(f"Invalid Authorization header format: {auth_header}") + return JSONResponse( + status_code=401, + content={"error": "Invalid Authorization header. Use format: Authorization: Bearer "} + ) + + access_token = parts[1] + if not access_token: + logger.error("Empty access token") + return JSONResponse( + status_code=401, + content={"error": "Access token cannot be empty"} + ) + + # For now, use "all" as the user_id to bypass filtering + # This will be replaced with speaker-based permissions later + logger.info(f"MCP connection established with access token: {access_token[:min(8, len(access_token))]}...") + user_token = user_id_var.set("all") + + try: + # Handle SSE connection + async with sse.connect_sse( + request.scope, + request.receive, + request._send, + ) as (read_stream, write_stream): + await mcp._mcp_server.run( + read_stream, + write_stream, + mcp._mcp_server.create_initialization_options(), + ) + finally: + # Clean up context variables + user_id_var.reset(user_token) + + +@mcp_router.post("/messages/") +async def handle_get_message(request: Request): + return await handle_post_message(request) + + +@mcp_router.post("/conversations/sse/{user_id}/messages/") +async def handle_post_message_with_user(request: Request): + return await handle_post_message(request) + + +async def handle_post_message(request: Request): + """Handle POST messages for SSE""" + try: + body = await request.body() + + # Create a simple receive function that returns the body + async def receive(): + return {"type": "http.request", "body": body, "more_body": False} + + # Create a simple send function that does nothing + async def send(message): + return {} + + # Call handle_post_message with the correct arguments + await sse.handle_post_message(request.scope, receive, send) + + # Return a success response + return {"status": "ok"} + finally: + pass + + +def setup_mcp_server(app: FastAPI): + """Setup MCP server with the FastAPI application""" + mcp._mcp_server.name = "friend-lite-conversations" + + # Include MCP router in the FastAPI app + app.include_router(mcp_router) + + logger.info("Friend-Lite MCP server initialized with conversation tools") diff --git a/backends/advanced/src/advanced_omi_backend/services/memory/config.py b/backends/advanced/src/advanced_omi_backend/services/memory/config.py index f3943f29..1bf0908b 100644 --- a/backends/advanced/src/advanced_omi_backend/services/memory/config.py +++ b/backends/advanced/src/advanced_omi_backend/services/memory/config.py @@ -141,8 +141,13 @@ def create_mycelia_config( return config -def build_memory_config_from_env() -> MemoryConfig: - """Build memory configuration from environment variables and YAML config.""" +def build_memory_config_from_env(allow_missing_keys: bool = False) -> MemoryConfig: + """Build memory configuration from environment variables and YAML config. + + Args: + allow_missing_keys: If True, allow missing API keys and disable features gracefully. + If False, raise errors when required keys are missing. + """ try: # Determine memory provider memory_provider = os.getenv("MEMORY_PROVIDER", "chronicle").lower() @@ -226,25 +231,30 @@ def build_memory_config_from_env() -> MemoryConfig: if llm_provider == "openai": openai_api_key = os.getenv("OPENAI_API_KEY") if not openai_api_key: - raise ValueError("OPENAI_API_KEY required for OpenAI provider") - - # Use environment variables for model, fall back to config, then defaults - model = os.getenv("OPENAI_MODEL") or memory_config.get("llm_settings", {}).get("model") or "gpt-4o-mini" - embedding_model = memory_config.get("llm_settings", {}).get("embedding_model") or "text-embedding-3-small" - base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1") - memory_logger.info(f"πŸ”§ Memory config: LLM={model}, Embedding={embedding_model}, Base URL={base_url}") - - llm_config = create_openai_config( - api_key=openai_api_key, - model=model, - embedding_model=embedding_model, - base_url=base_url, - temperature=memory_config.get("llm_settings", {}).get("temperature", 0.1), - max_tokens=memory_config.get("llm_settings", {}).get("max_tokens", 2000) - ) - llm_provider_enum = LLMProvider.OPENAI - embedding_dims = get_embedding_dims(llm_config) - memory_logger.info(f"πŸ”§ Setting Embedder dims {embedding_dims}") + if allow_missing_keys: + memory_logger.warning("OPENAI_API_KEY not set - memory extraction will be disabled") + llm_config = None + llm_provider_enum = None + else: + raise ValueError("OPENAI_API_KEY required for OpenAI provider") + else: + # Use environment variables for model, fall back to config, then defaults + model = os.getenv("OPENAI_MODEL") or memory_config.get("llm_settings", {}).get("model") or "gpt-4o-mini" + embedding_model = memory_config.get("llm_settings", {}).get("embedding_model") or "text-embedding-3-small" + base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1") + memory_logger.info(f"πŸ”§ Memory config: LLM={model}, Embedding={embedding_model}, Base URL={base_url}") + + llm_config = create_openai_config( + api_key=openai_api_key, + model=model, + embedding_model=embedding_model, + base_url=base_url, + temperature=memory_config.get("llm_settings", {}).get("temperature", 0.1), + max_tokens=memory_config.get("llm_settings", {}).get("max_tokens", 2000) + ) + llm_provider_enum = LLMProvider.OPENAI + embedding_dims = get_embedding_dims(llm_config) + memory_logger.info(f"πŸ”§ Setting Embedder dims {embedding_dims}") elif llm_provider == "ollama": base_url = os.getenv("OLLAMA_BASE_URL") diff --git a/backends/advanced/src/advanced_omi_backend/services/memory/service_factory.py b/backends/advanced/src/advanced_omi_backend/services/memory/service_factory.py index 5607d8ff..f5ca9268 100644 --- a/backends/advanced/src/advanced_omi_backend/services/memory/service_factory.py +++ b/backends/advanced/src/advanced_omi_backend/services/memory/service_factory.py @@ -7,6 +7,7 @@ import asyncio import logging +import os import threading from typing import Optional @@ -92,7 +93,9 @@ def get_memory_service() -> MemoryServiceBase: if _memory_service is None: try: # Build configuration from environment - config = build_memory_config_from_env() + # Check for graceful degradation mode + allow_missing_keys = os.getenv("ALLOW_MISSING_API_KEYS", "false").lower() == "true" + config = build_memory_config_from_env(allow_missing_keys=allow_missing_keys) # Create appropriate service implementation _memory_service = create_memory_service(config) diff --git a/backends/advanced/src/advanced_omi_backend/services/transcription/__init__.py b/backends/advanced/src/advanced_omi_backend/services/transcription/__init__.py index 1c4a3b59..48f43eb5 100644 --- a/backends/advanced/src/advanced_omi_backend/services/transcription/__init__.py +++ b/backends/advanced/src/advanced_omi_backend/services/transcription/__init__.py @@ -27,6 +27,7 @@ def get_transcription_provider( provider_name: Optional[str] = None, mode: Optional[str] = None, + allow_missing_keys: bool = False, ) -> Optional[BaseTranscriptionProvider]: """ Factory function to get the appropriate transcription provider. @@ -35,12 +36,16 @@ def get_transcription_provider( provider_name: Name of the provider ('deepgram', 'parakeet'). If None, will auto-select based on available configuration. mode: Processing mode ('streaming', 'batch'). If None, defaults to 'batch'. + allow_missing_keys: If True, return None instead of raising error when + provider is requested but API key is not configured. + Enables graceful degradation mode. Returns: An instance of BaseTranscriptionProvider, or None if no provider is configured. Raises: - RuntimeError: If a specific provider is requested but not properly configured. + RuntimeError: If a specific provider is requested but not properly configured + (only when allow_missing_keys=False). """ deepgram_key = os.getenv("DEEPGRAM_API_KEY") parakeet_url = os.getenv("PARAKEET_ASR_URL") @@ -55,6 +60,11 @@ def get_transcription_provider( # Handle specific provider requests if provider_name == "deepgram": if not deepgram_key: + if allow_missing_keys: + logger.debug( + "Deepgram provider requested but DEEPGRAM_API_KEY not configured (graceful degradation mode)" + ) + return None raise RuntimeError( "Deepgram transcription provider requested but DEEPGRAM_API_KEY not configured" ) @@ -66,6 +76,11 @@ def get_transcription_provider( elif provider_name == "parakeet": if not parakeet_url: + if allow_missing_keys: + logger.debug( + "Parakeet provider requested but PARAKEET_ASR_URL not configured (graceful degradation mode)" + ) + return None raise RuntimeError( "Parakeet ASR provider requested but PARAKEET_ASR_URL not configured" ) @@ -80,8 +95,8 @@ def get_transcription_provider( # Check TRANSCRIPTION_PROVIDER environment variable first env_provider = os.getenv("TRANSCRIPTION_PROVIDER") if env_provider: - # Recursively call with the specified provider - return get_transcription_provider(env_provider, mode) + # Recursively call with the specified provider (pass allow_missing_keys through) + return get_transcription_provider(env_provider, mode, allow_missing_keys) # Auto-select: prefer Deepgram if available, fallback to Parakeet if deepgram_key: diff --git a/backends/advanced/src/advanced_omi_backend/settings_manager.py b/backends/advanced/src/advanced_omi_backend/settings_manager.py new file mode 100644 index 00000000..67873334 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/settings_manager.py @@ -0,0 +1,485 @@ +""" +Dynamic settings manager with MongoDB storage and caching. + +Settings are loaded from environment variables on first initialization, +then stored in MongoDB. Subsequent loads use MongoDB as the source of truth. +Changes take effect within the cache TTL (default: 5 seconds). +""" + +import logging +import os +import time +from typing import Dict, Any, Optional, TypeVar, Type + +from motor.motor_asyncio import AsyncIOMotorDatabase + +from advanced_omi_backend.settings_models import ( + AllSettings, + ApiKeysSettings, + AudioProcessingSettings, + ConversationSettings, + DiarizationSettings, + InfrastructureSettings, + LLMSettings, + MiscSettings, + NetworkSettings, + ProviderSettings, + SpeechDetectionSettings, + TranscriptionProvider, +) + +logger = logging.getLogger(__name__) + +T = TypeVar('T') + + +class SettingsManager: + """ + Manages dynamic application settings with MongoDB storage and caching. + + Settings are stored in the 'application_settings' collection with documents: + { + "_id": "speech_detection", # Setting category + "values": {...}, # Pydantic model dict + "updated_at": datetime, + "updated_by": "user_id or 'system'" + } + """ + + def __init__(self, db: AsyncIOMotorDatabase, cache_ttl: int = 5): + """ + Initialize settings manager. + + Args: + db: MongoDB database instance + cache_ttl: Cache TTL in seconds (default: 5) + """ + self.db = db + self.settings_col = db["application_settings"] + self.cache_ttl = cache_ttl + + # Cache storage + self._cache: Dict[str, Any] = {} + self._cache_time: Dict[str, float] = {} + + # Initialization flag + self._initialized = False + + async def initialize(self): + """ + Initialize settings from environment variables if not already in MongoDB. + + This is called once on application startup to migrate existing env vars + to the database. + """ + if self._initialized: + return + + logger.info("Initializing settings manager...") + + # Check if settings already exist in DB + count = await self.settings_col.count_documents({}) + + if count == 0: + # First time setup - load from env vars + logger.info("No settings found in database, initializing from environment variables") + await self._initialize_from_env() + else: + logger.info(f"Found {count} setting categories in database") + + self._initialized = True + + async def _initialize_from_env(self): + """Initialize all settings from environment variables.""" + + # Speech detection + speech_detection = SpeechDetectionSettings( + min_words=int(os.getenv("SPEECH_DETECTION_MIN_WORDS", "5")), + min_confidence=float(os.getenv("SPEECH_DETECTION_MIN_CONFIDENCE", "0.5")), + min_duration=float(os.getenv("SPEECH_DETECTION_MIN_DURATION", "10.0")), + ) + await self._save_to_db("speech_detection", speech_detection.dict(), "system") + + # Conversation settings + conversation = ConversationSettings( + transcription_buffer_seconds=float(os.getenv("TRANSCRIPTION_BUFFER_SECONDS", "120")), + speech_inactivity_threshold=float(os.getenv("SPEECH_INACTIVITY_THRESHOLD_SECONDS", "60")), + new_conversation_timeout_minutes=float(os.getenv("NEW_CONVERSATION_TIMEOUT_MINUTES", "1.5")), + record_only_enrolled_speakers=os.getenv("RECORD_ONLY_ENROLLED_SPEAKERS", "true").lower() == "true", + ) + await self._save_to_db("conversation", conversation.dict(), "system") + + # Audio processing + audio_processing = AudioProcessingSettings( + audio_cropping_enabled=os.getenv("AUDIO_CROPPING_ENABLED", "true").lower() == "true", + min_speech_segment_duration=float(os.getenv("MIN_SPEECH_SEGMENT_DURATION", "1.0")), + cropping_context_padding=float(os.getenv("CROPPING_CONTEXT_PADDING", "0.1")), + ) + await self._save_to_db("audio_processing", audio_processing.dict(), "system") + + # Diarization (load from existing config or defaults) + from advanced_omi_backend.config import _diarization_settings + if _diarization_settings: + diarization = DiarizationSettings(**_diarization_settings) + else: + diarization = DiarizationSettings() + await self._save_to_db("diarization", diarization.dict(), "system") + + # LLM settings + llm = LLMSettings( + llm_provider=os.getenv("LLM_PROVIDER", "openai"), + openai_model=os.getenv("OPENAI_MODEL", "gpt-4o-mini"), + chat_llm_model=os.getenv("CHAT_LLM_MODEL"), + chat_temperature=float(os.getenv("CHAT_TEMPERATURE", "0.7")), + ollama_model=os.getenv("OLLAMA_MODEL", "llama3.1:latest"), + ollama_embedder_model=os.getenv("OLLAMA_EMBEDDER_MODEL", "nomic-embed-text:latest"), + ) + await self._save_to_db("llm", llm.dict(), "system") + + # Provider settings + transcription_provider = os.getenv("TRANSCRIPTION_PROVIDER", "auto") + # Map empty string to "auto" + if not transcription_provider: + transcription_provider = "auto" + + providers = ProviderSettings( + memory_provider=os.getenv("MEMORY_PROVIDER", "chronicle"), + transcription_provider=transcription_provider, + ) + await self._save_to_db("providers", providers.dict(), "system") + + # Network settings + network = NetworkSettings( + host_ip=os.getenv("HOST_IP", "localhost"), + backend_public_port=int(os.getenv("BACKEND_PUBLIC_PORT", "8000")), + webui_port=int(os.getenv("WEBUI_PORT", "5173")), + cors_origins=os.getenv("CORS_ORIGINS", "http://localhost:5173,http://localhost:3000"), + ) + await self._save_to_db("network", network.dict(), "system") + + # Infrastructure settings + from advanced_omi_backend.app_config import get_app_config + config = get_app_config() + infrastructure = InfrastructureSettings( + mongodb_uri=config.mongodb_uri, + mongodb_database=config.mongodb_database, + redis_url=config.redis_url, + qdrant_base_url=config.qdrant_base_url, + qdrant_port=config.qdrant_port, + neo4j_host=os.getenv("NEO4J_HOST", "neo4j-mem0"), + neo4j_user=os.getenv("NEO4J_USER", "neo4j"), + ) + await self._save_to_db("infrastructure", infrastructure.dict(), "system") + + # Misc settings + misc = MiscSettings( + debug_dir=os.getenv("DEBUG_DIR", "./data/debug_dir"), + langfuse_enable_telemetry=os.getenv("LANGFUSE_ENABLE_TELEMETRY", "false").lower() == "true", + ) + await self._save_to_db("misc", misc.dict(), "system") + + # API Keys settings - read from .env.api-keys file first, fallback to env vars + from advanced_omi_backend.utils.api_keys_manager import read_api_keys_from_file + + file_keys = read_api_keys_from_file(".env.api-keys") + api_keys = ApiKeysSettings( + openai_api_key=file_keys.get("openai_api_key") or os.getenv("OPENAI_API_KEY"), + deepgram_api_key=file_keys.get("deepgram_api_key") or os.getenv("DEEPGRAM_API_KEY"), + mistral_api_key=file_keys.get("mistral_api_key") or os.getenv("MISTRAL_API_KEY"), + hf_token=file_keys.get("hf_token") or os.getenv("HF_TOKEN"), + langfuse_public_key=file_keys.get("langfuse_public_key") or os.getenv("LANGFUSE_PUBLIC_KEY"), + langfuse_secret_key=file_keys.get("langfuse_secret_key") or os.getenv("LANGFUSE_SECRET_KEY"), + ngrok_authtoken=file_keys.get("ngrok_authtoken") or os.getenv("NGROK_AUTHTOKEN"), + ) + await self._save_to_db("api_keys", api_keys.dict(), "system") + + logger.info("βœ… Initialized all settings from environment variables") + + async def _get_from_cache_or_db( + self, + key: str, + model_class: Type[T], + ) -> T: + """ + Get settings from cache or database. + + Args: + key: Settings category key + model_class: Pydantic model class + + Returns: + Instance of model_class with current settings + """ + # Check cache freshness + if key in self._cache: + age = time.time() - self._cache_time.get(key, 0) + if age < self.cache_ttl: + return self._cache[key] + + # Load from DB + doc = await self.settings_col.find_one({"_id": key}) + + if doc and "values" in doc: + settings = model_class(**doc["values"]) + else: + # Use defaults if not found + logger.warning(f"Settings '{key}' not found in database, using defaults") + settings = model_class() + + # Update cache + self._cache[key] = settings + self._cache_time[key] = time.time() + + return settings + + async def _save_to_db(self, key: str, values: dict, updated_by: str = "user"): + """ + Save settings to database. + + Args: + key: Settings category key + values: Settings values as dict + updated_by: User ID or 'system' + """ + from datetime import datetime + + await self.settings_col.update_one( + {"_id": key}, + { + "$set": { + "values": values, + "updated_at": datetime.utcnow(), + "updated_by": updated_by, + } + }, + upsert=True, + ) + + async def _update_settings( + self, + key: str, + settings: T, + updated_by: str = "user", + ): + """ + Update settings in database and cache. + + Args: + key: Settings category key + settings: Pydantic model instance + updated_by: User ID or 'system' + """ + # Save to DB + await self._save_to_db(key, settings.dict(), updated_by) + + # Update cache immediately + self._cache[key] = settings + self._cache_time[key] = time.time() + + logger.info(f"Updated settings '{key}' (by: {updated_by})") + + # Speech Detection Settings + + async def get_speech_detection(self) -> SpeechDetectionSettings: + """Get speech detection settings.""" + return await self._get_from_cache_or_db("speech_detection", SpeechDetectionSettings) + + async def update_speech_detection( + self, + settings: SpeechDetectionSettings, + updated_by: str = "user", + ): + """Update speech detection settings.""" + await self._update_settings("speech_detection", settings, updated_by) + + # Conversation Settings + + async def get_conversation(self) -> ConversationSettings: + """Get conversation management settings.""" + return await self._get_from_cache_or_db("conversation", ConversationSettings) + + async def update_conversation( + self, + settings: ConversationSettings, + updated_by: str = "user", + ): + """Update conversation management settings.""" + await self._update_settings("conversation", settings, updated_by) + + # Audio Processing Settings + + async def get_audio_processing(self) -> AudioProcessingSettings: + """Get audio processing settings.""" + return await self._get_from_cache_or_db("audio_processing", AudioProcessingSettings) + + async def update_audio_processing( + self, + settings: AudioProcessingSettings, + updated_by: str = "user", + ): + """Update audio processing settings.""" + await self._update_settings("audio_processing", settings, updated_by) + + # Diarization Settings + + async def get_diarization(self) -> DiarizationSettings: + """Get diarization settings.""" + return await self._get_from_cache_or_db("diarization", DiarizationSettings) + + async def update_diarization( + self, + settings: DiarizationSettings, + updated_by: str = "user", + ): + """Update diarization settings.""" + await self._update_settings("diarization", settings, updated_by) + + # LLM Settings + + async def get_llm(self) -> LLMSettings: + """Get LLM settings.""" + return await self._get_from_cache_or_db("llm", LLMSettings) + + async def update_llm( + self, + settings: LLMSettings, + updated_by: str = "user", + ): + """Update LLM settings.""" + await self._update_settings("llm", settings, updated_by) + + # Provider Settings + + async def get_providers(self) -> ProviderSettings: + """Get provider settings.""" + return await self._get_from_cache_or_db("providers", ProviderSettings) + + async def update_providers( + self, + settings: ProviderSettings, + updated_by: str = "user", + ): + """Update provider settings.""" + await self._update_settings("providers", settings, updated_by) + + # Network Settings + + async def get_network(self) -> NetworkSettings: + """Get network settings.""" + return await self._get_from_cache_or_db("network", NetworkSettings) + + async def update_network( + self, + settings: NetworkSettings, + updated_by: str = "user", + ): + """Update network settings.""" + await self._update_settings("network", settings, updated_by) + + # Infrastructure Settings + + async def get_infrastructure(self) -> InfrastructureSettings: + """Get infrastructure settings.""" + return await self._get_from_cache_or_db("infrastructure", InfrastructureSettings) + + async def update_infrastructure( + self, + settings: InfrastructureSettings, + updated_by: str = "user", + ): + """Update infrastructure settings.""" + await self._update_settings("infrastructure", settings, updated_by) + + # Misc Settings + + async def get_misc(self) -> MiscSettings: + """Get miscellaneous settings.""" + return await self._get_from_cache_or_db("misc", MiscSettings) + + async def update_misc( + self, + settings: MiscSettings, + updated_by: str = "user", + ): + """Update miscellaneous settings.""" + await self._update_settings("misc", settings, updated_by) + + # API Keys Settings + + async def get_api_keys(self) -> ApiKeysSettings: + """Get API keys settings.""" + return await self._get_from_cache_or_db("api_keys", ApiKeysSettings) + + async def update_api_keys( + self, + settings: ApiKeysSettings, + updated_by: str = "user", + ): + """Update API keys settings.""" + await self._update_settings("api_keys", settings, updated_by) + + # Combined Settings + + async def get_all_settings(self) -> AllSettings: + """Get all settings combined.""" + return AllSettings( + speech_detection=await self.get_speech_detection(), + conversation=await self.get_conversation(), + audio_processing=await self.get_audio_processing(), + diarization=await self.get_diarization(), + llm=await self.get_llm(), + providers=await self.get_providers(), + network=await self.get_network(), + infrastructure=await self.get_infrastructure(), + misc=await self.get_misc(), + api_keys=await self.get_api_keys(), + ) + + async def update_all_settings( + self, + settings: AllSettings, + updated_by: str = "user", + ): + """Update all settings at once.""" + await self.update_speech_detection(settings.speech_detection, updated_by) + await self.update_conversation(settings.conversation, updated_by) + await self.update_audio_processing(settings.audio_processing, updated_by) + await self.update_diarization(settings.diarization, updated_by) + await self.update_llm(settings.llm, updated_by) + await self.update_providers(settings.providers, updated_by) + await self.update_network(settings.network, updated_by) + await self.update_infrastructure(settings.infrastructure, updated_by) + await self.update_misc(settings.misc, updated_by) + await self.update_api_keys(settings.api_keys, updated_by) + + def invalidate_cache(self, key: Optional[str] = None): + """ + Force settings to reload from database on next access. + + Args: + key: Specific settings category to invalidate, or None for all + """ + if key: + self._cache_time[key] = 0 + logger.info(f"Invalidated cache for '{key}'") + else: + self._cache_time.clear() + logger.info("Invalidated all settings cache") + + +# Global settings manager instance (initialized in main.py) +_settings_manager: Optional[SettingsManager] = None + + +def init_settings_manager(db: AsyncIOMotorDatabase): + """Initialize the global settings manager.""" + global _settings_manager + _settings_manager = SettingsManager(db) + return _settings_manager + + +def get_settings_manager() -> SettingsManager: + """Get the global settings manager instance.""" + if _settings_manager is None: + raise RuntimeError("Settings manager not initialized. Call init_settings_manager() first.") + return _settings_manager diff --git a/backends/advanced/src/advanced_omi_backend/settings_models.py b/backends/advanced/src/advanced_omi_backend/settings_models.py new file mode 100644 index 00000000..68742f1e --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/settings_models.py @@ -0,0 +1,320 @@ +""" +Pydantic models for dynamic application settings. + +These settings can be changed by users through the UI and take effect +without requiring a server restart (within the cache TTL). +""" + +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field, validator + + +class LLMProvider(str, Enum): + """Supported LLM providers.""" + OPENAI = "openai" + OLLAMA = "ollama" + + +class MemoryProvider(str, Enum): + """Supported memory providers.""" + CHRONICLE = "chronicle" + OPENMEMORY_MCP = "openmemory_mcp" + MYCELIA = "mycelia" + + +class TranscriptionProvider(str, Enum): + """Supported transcription providers.""" + DEEPGRAM = "deepgram" + MISTRAL = "mistral" + PARAKEET = "parakeet" + AUTO = "auto" + + +class DiarizationSource(str, Enum): + """Supported diarization sources.""" + PYANNOTE = "pyannote" + DEEPGRAM = "deepgram" + + +class SpeechDetectionSettings(BaseModel): + """Speech detection settings for conversation creation.""" + + min_words: int = Field( + default=5, + ge=1, + le=100, + description="Minimum words required to create a conversation" + ) + min_confidence: float = Field( + default=0.5, + ge=0.0, + le=1.0, + description="Minimum word confidence threshold (0.0-1.0)" + ) + min_duration: float = Field( + default=10.0, + ge=0.0, + description="Minimum speech duration in seconds" + ) + + +class ConversationSettings(BaseModel): + """Conversation management settings.""" + + transcription_buffer_seconds: float = Field( + default=120.0, + ge=10.0, + le=600.0, + description="Trigger transcription every N seconds" + ) + speech_inactivity_threshold: float = Field( + default=60.0, + ge=10.0, + le=600.0, + description="Close conversation after N seconds of no speech" + ) + new_conversation_timeout_minutes: float = Field( + default=1.5, + ge=0.1, + le=60.0, + description="Timeout for creating new conversations (minutes)" + ) + record_only_enrolled_speakers: bool = Field( + default=True, + description="Only create conversations when enrolled speakers are detected" + ) + + +class AudioProcessingSettings(BaseModel): + """Audio processing settings.""" + + audio_cropping_enabled: bool = Field( + default=True, + description="Enable automatic silence removal from audio" + ) + min_speech_segment_duration: float = Field( + default=1.0, + ge=0.1, + le=10.0, + description="Minimum speech segment duration in seconds" + ) + cropping_context_padding: float = Field( + default=0.1, + ge=0.0, + le=1.0, + description="Context padding around speech segments" + ) + + +class DiarizationSettings(BaseModel): + """Speaker diarization settings.""" + + diarization_source: DiarizationSource = Field( + default=DiarizationSource.PYANNOTE, + description="Diarization service to use" + ) + similarity_threshold: float = Field( + default=0.15, + ge=0.0, + le=1.0, + description="Speaker similarity threshold" + ) + min_duration: float = Field( + default=0.5, + ge=0.0, + description="Minimum segment duration" + ) + collar: float = Field( + default=2.0, + ge=0.0, + description="Collar for segment merging (seconds)" + ) + min_duration_off: float = Field( + default=1.5, + ge=0.0, + description="Minimum silence duration between segments" + ) + min_speakers: int = Field( + default=2, + ge=1, + le=10, + description="Minimum number of speakers" + ) + max_speakers: int = Field( + default=6, + ge=1, + le=20, + description="Maximum number of speakers" + ) + + @validator('max_speakers') + def validate_max_speakers(cls, v, values): + """Ensure max_speakers >= min_speakers.""" + if 'min_speakers' in values and v < values['min_speakers']: + raise ValueError('max_speakers must be >= min_speakers') + return v + + +class LLMSettings(BaseModel): + """LLM provider and model settings.""" + + llm_provider: LLMProvider = Field( + default=LLMProvider.OPENAI, + description="LLM provider to use" + ) + openai_model: str = Field( + default="gpt-4o-mini", + description="OpenAI model for general tasks" + ) + chat_llm_model: Optional[str] = Field( + default=None, + description="Model for chat (defaults to openai_model if not set)" + ) + chat_temperature: float = Field( + default=0.7, + ge=0.0, + le=2.0, + description="Temperature for chat responses" + ) + ollama_model: Optional[str] = Field( + default="llama3.1:latest", + description="Ollama model name" + ) + ollama_embedder_model: Optional[str] = Field( + default="nomic-embed-text:latest", + description="Ollama embedder model name" + ) + + +class ProviderSettings(BaseModel): + """Service provider selection settings.""" + + memory_provider: MemoryProvider = Field( + default=MemoryProvider.CHRONICLE, + description="Memory provider to use" + ) + transcription_provider: TranscriptionProvider = Field( + default=TranscriptionProvider.AUTO, + description="Transcription provider (auto-selects if 'auto')" + ) + + +class NetworkSettings(BaseModel): + """Network and public access settings.""" + + host_ip: str = Field( + default="localhost", + description="Public IP/hostname for browser access" + ) + backend_public_port: int = Field( + default=8000, + ge=1, + le=65535, + description="Backend API public port" + ) + webui_port: int = Field( + default=5173, + ge=1, + le=65535, + description="WebUI port" + ) + cors_origins: str = Field( + default="http://localhost:5173,http://localhost:3000,http://127.0.0.1:5173,http://127.0.0.1:3000", + description="Comma-separated list of CORS origins" + ) + + +class InfrastructureSettings(BaseModel): + """Core infrastructure service settings.""" + + mongodb_uri: str = Field( + default="mongodb://mongo:27017", + description="MongoDB connection URI" + ) + mongodb_database: str = Field( + default="friend-lite", + description="MongoDB database name" + ) + redis_url: str = Field( + default="redis://localhost:6379/0", + description="Redis connection URL" + ) + qdrant_base_url: str = Field( + default="qdrant", + description="Qdrant base URL/hostname" + ) + qdrant_port: str = Field( + default="6333", + description="Qdrant port" + ) + neo4j_host: str = Field( + default="neo4j-mem0", + description="Neo4j host" + ) + neo4j_user: str = Field( + default="neo4j", + description="Neo4j username" + ) + + +class MiscSettings(BaseModel): + """Miscellaneous settings.""" + + debug_dir: str = Field( + default="./data/debug_dir", + description="Directory for debug files" + ) + langfuse_enable_telemetry: bool = Field( + default=False, + description="Enable Langfuse telemetry" + ) + + +class ApiKeysSettings(BaseModel): + """External service API keys.""" + + openai_api_key: Optional[str] = Field( + default=None, + description="OpenAI API Key" + ) + deepgram_api_key: Optional[str] = Field( + default=None, + description="Deepgram API Key" + ) + mistral_api_key: Optional[str] = Field( + default=None, + description="Mistral API Key" + ) + hf_token: Optional[str] = Field( + default=None, + description="HuggingFace Token" + ) + langfuse_public_key: Optional[str] = Field( + default=None, + description="Langfuse Public Key" + ) + langfuse_secret_key: Optional[str] = Field( + default=None, + description="Langfuse Secret Key" + ) + ngrok_authtoken: Optional[str] = Field( + default=None, + description="Ngrok Auth Token" + ) + + +class AllSettings(BaseModel): + """Combined model for all application settings.""" + + speech_detection: SpeechDetectionSettings = Field(default_factory=SpeechDetectionSettings) + conversation: ConversationSettings = Field(default_factory=ConversationSettings) + audio_processing: AudioProcessingSettings = Field(default_factory=AudioProcessingSettings) + diarization: DiarizationSettings = Field(default_factory=DiarizationSettings) + llm: LLMSettings = Field(default_factory=LLMSettings) + providers: ProviderSettings = Field(default_factory=ProviderSettings) + network: NetworkSettings = Field(default_factory=NetworkSettings) + infrastructure: InfrastructureSettings = Field(default_factory=InfrastructureSettings) + misc: MiscSettings = Field(default_factory=MiscSettings) + api_keys: ApiKeysSettings = Field(default_factory=ApiKeysSettings) diff --git a/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py b/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py new file mode 100644 index 00000000..1eca417d --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py @@ -0,0 +1,168 @@ +""" +API Keys Manager - Handle reading/writing API keys from file and database. +""" + +import logging +import os +from pathlib import Path +from typing import Dict, Optional + +logger = logging.getLogger(__name__) + + +def mask_api_key(key: Optional[str]) -> Optional[str]: + """ + Mask an API key for display purposes. + + Shows first 7 chars and last 4 chars, masks the middle. + Example: sk-1234567890abcdef -> sk-1234***cdef + """ + if not key or len(key) < 12: + return None + + return f"{key[:7]}****{key[-4:]}" + + +def read_api_keys_from_file(file_path: str = ".env.api-keys") -> Dict[str, Optional[str]]: + """ + Read API keys from .env.api-keys file. + + Returns: + Dictionary of API key values (not masked) + """ + keys = { + "openai_api_key": None, + "deepgram_api_key": None, + "mistral_api_key": None, + "hf_token": None, + "langfuse_public_key": None, + "langfuse_secret_key": None, + "ngrok_authtoken": None, + } + + # Check if file exists + if not os.path.exists(file_path): + logger.warning(f"API keys file not found: {file_path}") + return keys + + try: + with open(file_path, 'r') as f: + for line in f: + line = line.strip() + # Skip comments and empty lines + if not line or line.startswith('#'): + continue + + # Parse key=value + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + + # Map env var names to our field names + if key == "OPENAI_API_KEY" and value: + keys["openai_api_key"] = value + elif key == "DEEPGRAM_API_KEY" and value: + keys["deepgram_api_key"] = value + elif key == "MISTRAL_API_KEY" and value: + keys["mistral_api_key"] = value + elif key == "HF_TOKEN" and value: + keys["hf_token"] = value + elif key == "LANGFUSE_PUBLIC_KEY" and value: + keys["langfuse_public_key"] = value + elif key == "LANGFUSE_SECRET_KEY" and value: + keys["langfuse_secret_key"] = value + elif key == "NGROK_AUTHTOKEN" and value: + keys["ngrok_authtoken"] = value + + logger.info(f"Loaded API keys from {file_path}") + return keys + + except Exception as e: + logger.error(f"Error reading API keys file: {e}") + return keys + + +def write_api_keys_to_file(keys: Dict[str, Optional[str]], file_path: str = ".env.api-keys") -> bool: + """ + Write API keys to .env.api-keys file. + + Args: + keys: Dictionary of API key values + file_path: Path to the .env.api-keys file + + Returns: + True if successful, False otherwise + """ + try: + # Read template for structure/comments + template_path = f"{file_path}.template" + template_lines = [] + + if os.path.exists(template_path): + with open(template_path, 'r') as f: + template_lines = f.readlines() + + # Build output content + output_lines = [] + + if template_lines: + # Use template structure + for line in template_lines: + stripped = line.strip() + + # Keep comments and empty lines + if not stripped or stripped.startswith('#'): + output_lines.append(line) + continue + + # Parse key=value from template + if '=' in stripped: + key_name = stripped.split('=', 1)[0].strip() + + # Replace with actual values if provided + if key_name == "OPENAI_API_KEY": + value = keys.get("openai_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "DEEPGRAM_API_KEY": + value = keys.get("deepgram_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "MISTRAL_API_KEY": + value = keys.get("mistral_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "HF_TOKEN": + value = keys.get("hf_token", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "LANGFUSE_PUBLIC_KEY": + value = keys.get("langfuse_public_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "LANGFUSE_SECRET_KEY": + value = keys.get("langfuse_secret_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "NGROK_AUTHTOKEN": + value = keys.get("ngrok_authtoken", "") + output_lines.append(f"{key_name}={value}\n") + else: + # Keep other keys from template unchanged + output_lines.append(line) + else: + # No template - create simple format + output_lines.append("# API Keys\n\n") + output_lines.append(f"OPENAI_API_KEY={keys.get('openai_api_key', '')}\n") + output_lines.append(f"DEEPGRAM_API_KEY={keys.get('deepgram_api_key', '')}\n") + output_lines.append(f"MISTRAL_API_KEY={keys.get('mistral_api_key', '')}\n") + output_lines.append(f"HF_TOKEN={keys.get('hf_token', '')}\n") + output_lines.append(f"LANGFUSE_PUBLIC_KEY={keys.get('langfuse_public_key', '')}\n") + output_lines.append(f"LANGFUSE_SECRET_KEY={keys.get('langfuse_secret_key', '')}\n") + output_lines.append(f"NGROK_AUTHTOKEN={keys.get('ngrok_authtoken', '')}\n") + + # Write to file + with open(file_path, 'w') as f: + f.writelines(output_lines) + + logger.info(f"Wrote API keys to {file_path}") + return True + + except Exception as e: + logger.error(f"Error writing API keys file: {e}") + return False diff --git a/backends/advanced/webui/Dockerfile b/backends/advanced/webui/Dockerfile index 3b2f28d8..5004f822 100644 --- a/backends/advanced/webui/Dockerfile +++ b/backends/advanced/webui/Dockerfile @@ -1,5 +1,5 @@ # Multi-stage build for React app -FROM node:18-alpine AS build +FROM node:22-alpine AS build # Set working directory WORKDIR /app @@ -7,8 +7,8 @@ WORKDIR /app # Copy package files COPY package.json package-lock.json ./ -# Install dependencies -RUN npm ci +# Install dependencies (with legacy-peer-deps for react-gantt-timeline compatibility) +RUN npm install --legacy-peer-deps # Copy source code COPY . . @@ -17,9 +17,15 @@ COPY . . ARG VITE_ALLOWED_HOSTS ENV VITE_ALLOWED_HOSTS=${VITE_ALLOWED_HOSTS} +ARG VITE_BASE_PATH +ENV VITE_BASE_PATH=${VITE_BASE_PATH:-/} + ARG VITE_BACKEND_URL ENV VITE_BACKEND_URL=${VITE_BACKEND_URL} +# Debug: Print BASE_PATH value (forces cache invalidation when changed) +RUN echo "Building with VITE_BASE_PATH=${VITE_BASE_PATH}" + # Build the application RUN npm run build diff --git a/backends/advanced/webui/package-lock.json b/backends/advanced/webui/package-lock.json index ead72812..1090d0bb 100644 --- a/backends/advanced/webui/package-lock.json +++ b/backends/advanced/webui/package-lock.json @@ -32,7 +32,7 @@ "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.5", "postcss": "^8.4.32", - "sass-embedded": "^1.83.0", + "sass-embedded": "^1.80.7", "tailwindcss": "^3.3.0", "typescript": "^5.2.2", "vite": "^5.0.8" diff --git a/backends/advanced/webui/package.json b/backends/advanced/webui/package.json index b933d8db..250df867 100644 --- a/backends/advanced/webui/package.json +++ b/backends/advanced/webui/package.json @@ -34,7 +34,7 @@ "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.5", "postcss": "^8.4.32", - "sass-embedded": "^1.83.0", + "sass-embedded": "^1.80.7", "tailwindcss": "^3.3.0", "typescript": "^5.2.2", "vite": "^5.0.8" diff --git a/backends/advanced/webui/src/App.tsx b/backends/advanced/webui/src/App.tsx index fca59623..4c9add41 100644 --- a/backends/advanced/webui/src/App.tsx +++ b/backends/advanced/webui/src/App.tsx @@ -13,6 +13,7 @@ import System from './pages/System' import Upload from './pages/Upload' import Queue from './pages/Queue' import LiveRecord from './pages/LiveRecord' +import Settings from './pages/Settings' import ProtectedRoute from './components/auth/ProtectedRoute' import { ErrorBoundary, PageErrorBoundary } from './components/ErrorBoundary' @@ -89,6 +90,11 @@ function App() { } /> + + + + } /> diff --git a/backends/advanced/webui/src/components/header/HeaderRecordButton.tsx b/backends/advanced/webui/src/components/header/HeaderRecordButton.tsx new file mode 100644 index 00000000..491f2f95 --- /dev/null +++ b/backends/advanced/webui/src/components/header/HeaderRecordButton.tsx @@ -0,0 +1,124 @@ +import { useEffect, useRef } from 'react' +import { Mic, Square } from 'lucide-react' +import { useSimpleAudioRecording } from '../../hooks/useSimpleAudioRecording' + +export default function HeaderRecordButton() { + const recording = useSimpleAudioRecording() + const canvasRef = useRef(null) + const animationRef = useRef() + + // Waveform visualization + useEffect(() => { + if (!recording.isRecording || !recording.analyser || !canvasRef.current) { + // Clear animation when not recording + if (animationRef.current) { + cancelAnimationFrame(animationRef.current) + } + // Clear canvas + if (canvasRef.current) { + const canvas = canvasRef.current + const ctx = canvas.getContext('2d') + if (ctx) { + ctx.clearRect(0, 0, canvas.width, canvas.height) + } + } + return + } + + const canvas = canvasRef.current + const ctx = canvas.getContext('2d') + if (!ctx) return + + const analyser = recording.analyser + analyser.fftSize = 32 // Smaller for compact visualization + const bufferLength = analyser.frequencyBinCount + const dataArray = new Uint8Array(bufferLength) + + const draw = () => { + animationRef.current = requestAnimationFrame(draw) + + analyser.getByteFrequencyData(dataArray) + + // Clear canvas + ctx.clearRect(0, 0, canvas.width, canvas.height) + + const barWidth = canvas.width / bufferLength + let x = 0 + + for (let i = 0; i < bufferLength; i++) { + const barHeight = (dataArray[i] / 255) * canvas.height * 0.8 + + // Gradient color based on intensity + const intensity = dataArray[i] / 255 + const r = Math.floor(59 + intensity * 40) + const g = Math.floor(130 + intensity * 70) + const b = Math.floor(246 - intensity * 50) + + ctx.fillStyle = `rgb(${r}, ${g}, ${b})` + ctx.fillRect(x, canvas.height - barHeight, barWidth - 1, barHeight) + + x += barWidth + } + } + + draw() + + return () => { + if (animationRef.current) { + cancelAnimationFrame(animationRef.current) + } + } + }, [recording.isRecording, recording.analyser]) + + const handleClick = async () => { + if (recording.isRecording) { + recording.stopRecording() + } else { + await recording.startRecording() + } + } + + return ( + + ) +} diff --git a/backends/advanced/webui/src/components/layout/Layout.tsx b/backends/advanced/webui/src/components/layout/Layout.tsx index 5995f823..ab0d7dc2 100644 --- a/backends/advanced/webui/src/components/layout/Layout.tsx +++ b/backends/advanced/webui/src/components/layout/Layout.tsx @@ -1,12 +1,28 @@ import { Link, useLocation, Outlet } from 'react-router-dom' -import { Music, MessageSquare, MessageCircle, Brain, Users, Upload, Settings, LogOut, Sun, Moon, Shield, Radio, Layers, Calendar } from 'lucide-react' +import { useState, useRef, useEffect } from 'react' +import { MessageSquare, MessageCircle, Brain, Users, Upload, Settings, LogOut, Sun, Moon, Shield, Radio, Layers, Calendar, Search, Bell, User, ChevronDown } from 'lucide-react' import { useAuth } from '../../contexts/AuthContext' import { useTheme } from '../../contexts/ThemeContext' +import HeaderRecordButton from '../header/HeaderRecordButton' export default function Layout() { const location = useLocation() const { user, logout, isAdmin } = useAuth() const { isDark, toggleTheme } = useTheme() + const [userMenuOpen, setUserMenuOpen] = useState(false) + const [searchQuery, setSearchQuery] = useState('') + const userMenuRef = useRef(null) + + // Close dropdown when clicking outside + useEffect(() => { + function handleClickOutside(event: MouseEvent) { + if (userMenuRef.current && !userMenuRef.current.contains(event.target as Node)) { + setUserMenuOpen(false) + } + } + document.addEventListener('mousedown', handleClickOutside) + return () => document.removeEventListener('mousedown', handleClickOutside) + }, []) const navigationItems = [ { path: '/live-record', label: 'Live Record', icon: Radio }, @@ -15,83 +31,211 @@ export default function Layout() { { path: '/memories', label: 'Memories', icon: Brain }, { path: '/timeline', label: 'Timeline', icon: Calendar }, { path: '/users', label: 'User Management', icon: Users }, + { path: '/settings', label: 'Settings', icon: Settings }, ...(isAdmin ? [ { path: '/upload', label: 'Upload Audio', icon: Upload }, { path: '/queue', label: 'Queue Management', icon: Layers }, - { path: '/system', label: 'System State', icon: Settings }, + { path: '/system', label: 'System State', icon: Shield }, ] : []), ] return ( -
+
{/* Header */} -
-
+
+
-
- -

- Chronicle Dashboard -

+ {/* Logo & Brand */} +
+
+ +
+
+

+ Chronicle +

+

AI Memory System

+
-
+ + {/* Search Bar */} +
+
+ + setSearchQuery(e.target.value)} + className="w-full pl-10 pr-4 py-2 bg-neutral-100 dark:bg-neutral-700/50 border border-transparent rounded-lg text-sm text-neutral-900 dark:text-neutral-100 placeholder-neutral-500 focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-transparent transition-all" + /> +
+
+ + {/* Header Actions */} +
+ {/* Record Button */} + + + {/* Divider */} +
+ + {/* Search Icon (Mobile) */} + + + {/* Notifications */} + + + {/* Theme Toggle */} - - {/* User info */} -
-
- {isAdmin && } - {user?.name || user?.email} -
+ + {/* User Menu */} +
+ + + {/* Dropdown Menu */} + {userMenuOpen && ( +
+ {/* User Info */} +
+
+
+ +
+
+

+ {user?.name || 'User'} +

+

+ {user?.email} +

+
+
+ {isAdmin && ( + Admin + )} +
+ + {/* Menu Items */} +
+ setUserMenuOpen(false)} + > + + Settings + +
+ + {/* Logout */} +
+ +
+
+ )}
- -
-
-
+ {/* Main Container */} +
+
{/* Sidebar Navigation */}
{/* Main Content */} -
-
+
+
@@ -99,10 +243,13 @@ export default function Layout() {
{/* Footer */} -