From 01de652c68b040a36553add9e220b572e867ddeb Mon Sep 17 00:00:00 2001
From: Ankush Malaker <43288948+AnkushMalaker@users.noreply.github.com>
Date: Sat, 10 Jan 2026 09:33:07 +0530
Subject: [PATCH 1/5] Enhance setup documentation and convenience scripts
- Updated the interactive setup wizard instructions to recommend using the convenience script `./wizard.sh` for easier configuration.
- Added detailed instructions for uploading and processing existing audio files via the API, including example commands for single and multiple file uploads.
- Introduced a new section on HAVPE relay configuration for ESP32 audio streaming, providing environment variable setup and command examples.
- Clarified the distributed deployment setup, including GPU and backend separation instructions, and added benefits of using Tailscale for networking.
- Removed outdated `getting-started.md` and `SETUP_SCRIPTS.md` files to streamline documentation and avoid redundancy.
---
CLAUDE.md | 143 +++++-
Docs/getting-started.md | 731 ---------------------------
README.md | 120 +++++
backends/advanced/Docs/quickstart.md | 729 --------------------------
backends/advanced/SETUP_SCRIPTS.md | 160 ------
config/README.md | 7 +-
quickstart.md | 17 +-
7 files changed, 275 insertions(+), 1632 deletions(-)
delete mode 100644 Docs/getting-started.md
delete mode 100644 backends/advanced/Docs/quickstart.md
delete mode 100644 backends/advanced/SETUP_SCRIPTS.md
diff --git a/CLAUDE.md b/CLAUDE.md
index abe20db6..7f5f5507 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -26,20 +26,21 @@ Chronicle includes an **interactive setup wizard** for easy configuration. The w
### Quick Start
```bash
-# Run the interactive setup wizard from project root
-uv run python wizard.py
+# Run the interactive setup wizard from project root (recommended)
+./wizard.sh
-# Or use the quickstart guide for step-by-step instructions
-# See quickstart.md for detailed walkthrough
+# Or use direct command:
+uv run --with-requirements setup-requirements.txt python wizard.py
+
+# For step-by-step instructions, see quickstart.md
```
+**Note on Convenience Scripts**: Chronicle provides wrapper scripts (`./wizard.sh`, `./start.sh`, `./restart.sh`, `./stop.sh`, `./status.sh`) that simplify the longer `uv run --with-requirements setup-requirements.txt python` commands. Use these for everyday operations.
+
### Setup Documentation
For detailed setup instructions and troubleshooting, see:
- **[@quickstart.md](quickstart.md)**: Beginner-friendly step-by-step setup guide
- **[@Docs/init-system.md](Docs/init-system.md)**: Complete initialization system architecture and design
-- **[@Docs/getting-started.md](Docs/getting-started.md)**: Technical quickstart with advanced configuration
-- **[@backends/advanced/SETUP_SCRIPTS.md](backends/advanced/SETUP_SCRIPTS.md)**: Setup scripts reference and usage examples
-- **[@backends/advanced/Docs/quickstart.md](backends/advanced/Docs/quickstart.md)**: Backend-specific setup guide
### Wizard Architecture
The initialization system uses a **root orchestrator pattern**:
@@ -381,6 +382,134 @@ docker compose down -v
docker compose up --build -d
```
+## Add Existing Data
+
+### Audio File Upload & Processing
+
+The system supports processing existing audio files through the file upload API. This allows you to import and process pre-recorded conversations without requiring a live WebSocket connection.
+
+**Upload and Process WAV Files:**
+```bash
+export USER_TOKEN="your-jwt-token"
+
+# Upload single WAV file
+curl -X POST "http://localhost:8000/api/process-audio-files" \
+ -H "Authorization: Bearer $USER_TOKEN" \
+ -F "files=@/path/to/audio.wav" \
+ -F "device_name=file_upload"
+
+# Upload multiple WAV files
+curl -X POST "http://localhost:8000/api/process-audio-files" \
+ -H "Authorization: Bearer $USER_TOKEN" \
+ -F "files=@/path/to/recording1.wav" \
+ -F "files=@/path/to/recording2.wav" \
+ -F "device_name=import_batch"
+```
+
+**Response Example:**
+```json
+{
+ "message": "Successfully processed 2 audio files",
+ "processed_files": [
+ {
+ "filename": "recording1.wav",
+ "sample_rate": 16000,
+ "channels": 1,
+ "duration_seconds": 120.5,
+ "size_bytes": 3856000
+ },
+ {
+ "filename": "recording2.wav",
+ "sample_rate": 44100,
+ "channels": 2,
+ "duration_seconds": 85.2,
+ "size_bytes": 7532800
+ }
+ ],
+ "client_id": "user01-import_batch"
+}
+```
+
+## HAVPE Relay Configuration
+
+For ESP32 audio streaming using the HAVPE relay (`extras/havpe-relay/`):
+
+```bash
+# Environment variables for HAVPE relay
+export AUTH_USERNAME="user@example.com" # Email address
+export AUTH_PASSWORD="your-password"
+export DEVICE_NAME="havpe" # Device identifier
+
+# Run the relay
+cd extras/havpe-relay
+uv run python main.py --backend-url http://your-server:8000 --backend-ws-url ws://your-server:8000
+```
+
+The relay will automatically:
+- Authenticate using `AUTH_USERNAME` (email address)
+- Generate client ID as `objectid_suffix-havpe`
+- Forward ESP32 audio to the backend with proper authentication
+- Handle token refresh and reconnection
+
+## Distributed Deployment
+
+### Single Machine vs Distributed Setup
+
+**Single Machine (Default):**
+```bash
+# Everything on one machine
+docker compose up --build -d
+```
+
+**Distributed Setup (GPU + Backend separation):**
+
+#### GPU Machine Setup
+```bash
+# Start GPU-accelerated services
+cd extras/asr-services
+docker compose up moonshine -d
+
+cd extras/speaker-recognition
+docker compose up --build -d
+
+# Ollama with GPU support
+docker run -d --gpus=all -p 11434:11434 \
+ -v ollama:/root/.ollama \
+ ollama/ollama:latest
+```
+
+#### Backend Machine Configuration
+```bash
+# .env configuration for distributed services
+OLLAMA_BASE_URL=http://[gpu-machine-tailscale-ip]:11434
+SPEAKER_SERVICE_URL=http://[gpu-machine-tailscale-ip]:8085
+PARAKEET_ASR_URL=http://[gpu-machine-tailscale-ip]:8080
+
+# Start lightweight backend services
+docker compose up --build -d
+```
+
+#### Tailscale Networking
+```bash
+# Install on each machine
+curl -fsSL https://tailscale.com/install.sh | sh
+sudo tailscale up
+
+# Find machine IPs
+tailscale ip -4
+```
+
+**Benefits of Distributed Setup:**
+- GPU services on dedicated hardware
+- Lightweight backend on VPS/Raspberry Pi
+- Automatic Tailscale IP support (100.x.x.x) - no CORS configuration needed
+- Encrypted inter-service communication
+
+**Service Examples:**
+- GPU machine: LLM inference, ASR, speaker recognition
+- Backend machine: FastAPI, WebUI, databases
+- Database machine: MongoDB, Qdrant (optional separation)
+
## Development Notes
### Package Management
diff --git a/Docs/getting-started.md b/Docs/getting-started.md
deleted file mode 100644
index a923c99c..00000000
--- a/Docs/getting-started.md
+++ /dev/null
@@ -1,731 +0,0 @@
-# Getting Started
-
-# Chronicle Backend Quickstart Guide
-
-> ๐ **New to chronicle?** This is your starting point! After reading this, continue with [architecture.md](./architecture.md) for technical details.
-
-## Overview
-
-Chronicle is an eco-system of services to support "AI wearable" agents/functionality.
-At the moment, the basic functionalities are:
-- Audio capture (via WebSocket, from OMI device, files, or a laptop)
-- Audio transcription
-- **Advanced memory system** with pluggable providers (Chronicle native or OpenMemory MCP)
-- **Enhanced memory extraction** with individual fact storage and smart updates
-- **Semantic memory search** with relevance threshold filtering and live results
-- Action item extraction
-- Modern React web dashboard with live recording and advanced search features
-- Comprehensive user management with JWT authentication
-
-**Core Implementation**: See `src/advanced_omi_backend/main.py` for the complete FastAPI application and WebSocket handling.
-
-## Prerequisites
-
-- Docker and Docker Compose
-- API keys for your chosen providers (see setup script)
-
-## Quick Start
-
-### Step 1: Interactive Setup (Recommended)
-
-Run the interactive setup wizard to configure all services with guided prompts:
-```bash
-cd backends/advanced
-./init.sh
-```
-
-**The setup wizard will guide you through:**
-- **Authentication**: Admin email/password setup
-- **Transcription Provider**: Choose Deepgram, Mistral, or Offline (Parakeet)
-- **LLM Provider**: Choose OpenAI or Ollama for memory extraction
-- **Memory Provider**: Choose Chronicle Native or OpenMemory MCP
-- **Optional Services**: Speaker Recognition and other extras
-- **Network Configuration**: Ports and host settings
-
-**Example flow:**
-```
-๐ Chronicle Interactive Setup
-===============================================
-
-โบ Authentication Setup
-----------------------
-Admin email [admin@example.com]: john@company.com
-Admin password (min 8 chars): ********
-
-โบ Speech-to-Text Configuration
--------------------------------
-Choose your transcription provider:
- 1) Deepgram (recommended - high quality, requires API key)
- 2) Mistral (Voxtral models - requires API key)
- 3) Offline (Parakeet ASR - requires GPU, runs locally)
- 4) None (skip transcription setup)
-Enter choice (1-4) [1]: 1
-
-Get your API key from: https://console.deepgram.com/
-Deepgram API key: dg_xxxxxxxxxxxxx
-
-โบ LLM Provider Configuration
-----------------------------
-Choose your LLM provider for memory extraction:
- 1) OpenAI (GPT-4, GPT-3.5 - requires API key)
- 2) Ollama (local models - requires Ollama server)
- 3) Skip (no memory extraction)
-Enter choice (1-3) [1]: 1
-```
-
-### Step 2: HTTPS Setup (Optional)
-
-For microphone access and secure connections, set up HTTPS:
-```bash
-cd backends/advanced
-./setup-https.sh 100.83.66.30 # Your Tailscale/network IP
-```
-
-This creates SSL certificates and configures nginx for secure access.
-
-### Step 3: Start the System
-
-**Start all services:**
-```bash
-cd backends/advanced
-docker compose up --build -d
-```
-
-This starts:
-- **Backend API**: `http://localhost:8000`
-- **Web Dashboard**: `http://localhost:5173`
-- **MongoDB**: `localhost:27017`
-- **Qdrant**: `localhost:6333`
-
-### Step 4: Optional Services
-
-**If you configured optional services during setup, start them:**
-
-```bash
-# OpenMemory MCP (if selected)
-cd ../../extras/openmemory-mcp && docker compose up -d
-
-# Parakeet ASR (if selected for offline transcription)
-cd ../../extras/asr-services && docker compose up parakeet -d
-
-# Speaker Recognition (if enabled)
-cd ../../extras/speaker-recognition && docker compose up --build -d
-```
-
-### Manual Configuration (Alternative)
-
-If you prefer manual configuration, copy the `.env.template` file to `.env` and configure the required values:
-
-**Required Environment Variables:**
-```bash
-AUTH_SECRET_KEY=your-super-secret-jwt-key-here
-ADMIN_PASSWORD=your-secure-admin-password
-ADMIN_EMAIL=admin@example.com
-```
-
-**Memory Provider Configuration:**
-```bash
-# Memory Provider (Choose One)
-# Option 1: Chronicle Native (Default - Recommended)
-MEMORY_PROVIDER=chronicle
-
-# Option 2: OpenMemory MCP (Cross-client compatibility)
-# MEMORY_PROVIDER=openmemory_mcp
-# OPENMEMORY_MCP_URL=http://host.docker.internal:8765
-# OPENMEMORY_CLIENT_NAME=chronicle
-# OPENMEMORY_USER_ID=openmemory
-```
-
-**LLM Configuration (Choose One):**
-```bash
-# Option 1: OpenAI (Recommended for best memory extraction)
-LLM_PROVIDER=openai
-OPENAI_API_KEY=your-openai-api-key-here
-OPENAI_MODEL=gpt-4o-mini
-
-# Option 2: Local Ollama
-LLM_PROVIDER=ollama
-OLLAMA_BASE_URL=http://ollama:11434
-```
-
-**Transcription Services (Choose One):**
-```bash
-# Option 1: Deepgram (Recommended for best transcription quality)
-TRANSCRIPTION_PROVIDER=deepgram
-DEEPGRAM_API_KEY=your-deepgram-api-key-here
-
-# Option 2: Mistral (Voxtral models for transcription)
-TRANSCRIPTION_PROVIDER=mistral
-MISTRAL_API_KEY=your-mistral-api-key-here
-MISTRAL_MODEL=voxtral-mini-2507
-
-# Option 3: Local ASR service
-PARAKEET_ASR_URL=http://host.docker.internal:8080
-```
-
-**Important Notes:**
-- **OpenAI is strongly recommended** for LLM processing as it provides much better memory extraction and eliminates JSON parsing errors
-- **TRANSCRIPTION_PROVIDER** determines which service to use:
- - `deepgram`: Uses Deepgram's Nova-3 model for high-quality transcription
- - `mistral`: Uses Mistral's Voxtral models for transcription
- - If not set, system falls back to offline ASR service
-- The system requires either online API keys or offline ASR service configuration
-
-### Testing Your Setup (Optional)
-
-After configuration, verify everything works with the integration test suite:
-```bash
-./run-test.sh
-
-# Alternative: Manual test with detailed logging
-source .env && export DEEPGRAM_API_KEY OPENAI_API_KEY && \
- uv run robot --outputdir ../../test-results --loglevel INFO ../../tests/integration/integration_test.robot
-```
-This end-to-end test validates the complete audio processing pipeline using Robot Framework.
-
-## Using the System
-
-### Web Dashboard
-
-1. Open `http://localhost:5173`
-2. **Login** using the sidebar:
- - **Admin**: `admin@example.com` / `your-admin-password`
- - **Create new users** via admin interface
-
-### Dashboard Features
-
-- **Conversations**: View audio recordings, transcripts, and cropped audio
-- **Memories**: Advanced memory search with semantic search, relevance threshold filtering, and memory count display
-- **Live Recording**: Real-time audio recording with WebSocket streaming (HTTPS required)
-- **User Management**: Create/delete users and their data
-- **Client Management**: View active connections and close conversations
-- **System Monitoring**: Debug tools and system health monitoring
-
-### Audio Client Connection
-
-Connect audio clients via WebSocket with authentication:
-
-**WebSocket URLs:**
-```javascript
-// Opus audio stream
-ws://your-server-ip:8000/ws?token=YOUR_JWT_TOKEN&device_name=YOUR_DEVICE_NAME
-
-// PCM audio stream
-ws://your-server-ip:8000/ws_pcm?token=YOUR_JWT_TOKEN&device_name=YOUR_DEVICE_NAME
-```
-
-**Authentication Methods:**
-The system uses email-based authentication with JWT tokens:
-
-```bash
-# Login with email
-curl -X POST "http://localhost:8000/auth/jwt/login" \
- -H "Content-Type: application/x-www-form-urlencoded" \
- -d "username=admin@example.com&password=your-admin-password"
-
-# Response: {"access_token": "eyJhbGciOiJIUzI1NiIs...", "token_type": "bearer"}
-```
-
-**Authentication Flow:**
-1. **User Registration**: Admin creates users via API or dashboard
-2. **Login**: Users authenticate with email and password
-3. **Token Usage**: Include JWT token in API calls and WebSocket connections
-4. **Data Access**: Users can only access their own data (admins see all)
-
-For detailed authentication documentation, see [`auth.md`](./auth.md).
-
-**Create User Account:**
-```bash
-export ADMIN_TOKEN="your-admin-token"
-
-# Create user
-curl -X POST "http://localhost:8000/api/create_user" \
- -H "Authorization: Bearer $ADMIN_TOKEN" \
- -H "Content-Type: application/json" \
- -d '{"email": "user@example.com", "password": "userpass", "display_name": "John Doe"}'
-
-# Response includes the user_id (MongoDB ObjectId)
-# {"message": "User user@example.com created successfully", "user": {"id": "507f1f77bcf86cd799439011", ...}}
-```
-
-**Client ID Format:**
-The system automatically generates client IDs using the last 6 characters of the MongoDB ObjectId plus device name (e.g., `439011-phone`, `439011-desktop`). This ensures proper user-client association and data isolation.
-
-## Add Existing Data
-
-### Audio File Upload & Processing
-
-The system supports processing existing audio files through the file upload API. This allows you to import and process pre-recorded conversations without requiring a live WebSocket connection.
-
-**Upload and Process WAV Files:**
-```bash
-export USER_TOKEN="your-jwt-token"
-
-# Upload single WAV file
-curl -X POST "http://localhost:8000/api/process-audio-files" \
- -H "Authorization: Bearer $USER_TOKEN" \
- -F "files=@/path/to/audio.wav" \
- -F "device_name=file_upload"
-
-# Upload multiple WAV files
-curl -X POST "http://localhost:8000/api/process-audio-files" \
- -H "Authorization: Bearer $USER_TOKEN" \
- -F "files=@/path/to/recording1.wav" \
- -F "files=@/path/to/recording2.wav" \
- -F "device_name=import_batch"
-```
-
-**Response Example:**
-```json
-{
- "message": "Successfully processed 2 audio files",
- "processed_files": [
- {
- "filename": "recording1.wav",
- "sample_rate": 16000,
- "channels": 1,
- "duration_seconds": 120.5,
- "size_bytes": 3856000
- },
- {
- "filename": "recording2.wav",
- "sample_rate": 44100,
- "channels": 2,
- "duration_seconds": 85.2,
- "size_bytes": 7532800
- }
- ],
- "client_id": "user01-import_batch"
-}
-```
-
-## System Features
-
-### Audio Processing
-- **Real-time streaming**: WebSocket audio ingestion
-- **Multiple formats**: Opus and PCM audio support
-- **Per-client processing**: Isolated conversation management
-- **Speech detection**: Automatic silence removal
-- **Audio cropping**: Extract only speech segments
-
-**Implementation**: See `src/advanced_omi_backend/main.py` for WebSocket endpoints and `src/advanced_omi_backend/processors.py` for audio processing pipeline.
-
-### Transcription Options
-- **Deepgram API**: Cloud-based batch processing, high accuracy (recommended)
-- **Mistral API**: Voxtral models for transcription with REST API processing
-- **Self-hosted ASR**: Local Wyoming protocol services with real-time processing
-- **Collection timeout**: 1.5 minute collection for optimal online processing quality
-
-### Conversation Management
-- **Automatic chunking**: 60-second audio segments
-- **Conversation timeouts**: Auto-close after 1.5 minutes of silence
-- **Speaker identification**: Track multiple speakers per conversation
-- **Manual controls**: Close conversations via API or dashboard
-
-### Memory & Intelligence
-
-#### Pluggable Memory System
-- **Two memory providers**: Choose between Chronicle native or OpenMemory MCP
-- **Chronicle Provider**: Full control with custom extraction, individual fact storage, smart deduplication
-- **OpenMemory MCP Provider**: Cross-client compatibility (Claude Desktop, Cursor, Windsurf), professional processing
-
-#### Enhanced Memory Processing
-- **Individual fact storage**: No more generic transcript fallbacks
-- **Smart memory updates**: LLM-driven ADD/UPDATE/DELETE actions
-- **Enhanced prompts**: Improved fact extraction with granular, specific memories
-- **User-centric storage**: All memories keyed by database user_id
-- **Semantic search**: Vector-based memory retrieval with embeddings
-- **Configurable extraction**: YAML-based configuration for memory extraction
-- **Debug tracking**: SQLite-based tracking of transcript โ memory conversion
-- **Client metadata**: Device information preserved for debugging and reference
-- **User isolation**: All data scoped to individual users with multi-device support
-
-**Implementation**:
-- **Memory System**: `src/advanced_omi_backend/memory/memory_service.py` + `src/advanced_omi_backend/controllers/memory_controller.py`
-- **Configuration**: memory settings in `config/config.yml` (memory section)
-
-### Authentication & Security
-- **Email Authentication**: Login with email and password
-- **JWT tokens**: Secure API and WebSocket authentication with 1-hour expiration
-- **Role-based access**: Admin vs regular user permissions
-- **Data isolation**: Users can only access their own data
-- **Client ID Management**: Automatic client-user association via `objectid_suffix-device_name` format
-- **Multi-device support**: Single user can connect multiple devices
-- **Security headers**: Proper CORS, cookie security, and token validation
-
-**Implementation**: See `src/advanced_omi_backend/auth.py` for authentication logic, `src/advanced_omi_backend/users.py` for user management, and [`auth.md`](./auth.md) for comprehensive documentation.
-
-## Verification
-
-```bash
-# System health check
-curl http://localhost:8000/health
-
-# Web dashboard
-open http://localhost:3000
-
-# View active clients (requires auth token)
-curl -H "Authorization: Bearer your-token" http://localhost:8000/api/clients/active
-```
-
-## HAVPE Relay Configuration
-
-For ESP32 audio streaming using the HAVPE relay (`extras/havpe-relay/`):
-
-```bash
-# Environment variables for HAVPE relay
-export AUTH_USERNAME="user@example.com" # Email address
-export AUTH_PASSWORD="your-password"
-export DEVICE_NAME="havpe" # Device identifier
-
-# Run the relay
-cd extras/havpe-relay
-python main.py --backend-url http://your-server:8000 --backend-ws-url ws://your-server:8000
-```
-
-The relay will automatically:
-- Authenticate using `AUTH_USERNAME` (email address)
-- Generate client ID as `objectid_suffix-havpe`
-- Forward ESP32 audio to the backend with proper authentication
-- Handle token refresh and reconnection
-
-## Development tip
-uv sync --group (whatever group you want to sync)
-(for example, deepgram, etc.)
-
-## Troubleshooting
-
-**Service Issues:**
-- Check logs: `docker compose logs chronicle-backend`
-- Restart services: `docker compose restart`
-- View all services: `docker compose ps`
-
-**Authentication Issues:**
-- Verify `AUTH_SECRET_KEY` is set and long enough (minimum 32 characters)
-- Check admin credentials match `.env` file
-- Ensure user email/password combinations are correct
-
-**Transcription Issues:**
-- **Deepgram**: Verify API key is valid and `TRANSCRIPTION_PROVIDER=deepgram`
-- **Mistral**: Verify API key is valid and `TRANSCRIPTION_PROVIDER=mistral`
-- **Self-hosted**: Ensure ASR service is running on port 8765
-- Check transcription service connection in health endpoint
-
-**Memory Issues:**
-- Ensure Ollama is running and model is pulled
-- Check Qdrant connection in health endpoint
-- Memory processing happens at conversation end
-
-**Connection Issues:**
-- Use server's IP address, not localhost for mobile clients
-- Ensure WebSocket connections include authentication token
-- Check firewall/port settings for remote connections
-
-## Distributed Deployment
-
-### Single Machine vs Distributed Setup
-
-**Single Machine (Default):**
-```bash
-# Everything on one machine
-docker compose up --build -d
-```
-
-**Distributed Setup (GPU + Backend separation):**
-
-#### GPU Machine Setup
-```bash
-# Start GPU-accelerated services
-cd extras/asr-services
-docker compose up moonshine -d
-
-cd extras/speaker-recognition
-docker compose up --build -d
-
-# Ollama with GPU support
-docker run -d --gpus=all -p 11434:11434 \
- -v ollama:/root/.ollama \
- ollama/ollama:latest
-```
-
-#### Backend Machine Configuration
-```bash
-# .env configuration for distributed services
-OLLAMA_BASE_URL=http://[gpu-machine-tailscale-ip]:11434
-SPEAKER_SERVICE_URL=http://[gpu-machine-tailscale-ip]:8085
-PARAKEET_ASR_URL=http://[gpu-machine-tailscale-ip]:8080
-
-# Start lightweight backend services
-docker compose up --build -d
-```
-
-#### Tailscale Networking
-```bash
-# Install on each machine
-curl -fsSL https://tailscale.com/install.sh | sh
-sudo tailscale up
-
-# Find machine IPs
-tailscale ip -4
-```
-
-**Benefits of Distributed Setup:**
-- GPU services on dedicated hardware
-- Lightweight backend on VPS/Raspberry Pi
-- Automatic Tailscale IP support (100.x.x.x) - no CORS configuration needed
-- Encrypted inter-service communication
-
-**Service Examples:**
-- GPU machine: LLM inference, ASR, speaker recognition
-- Backend machine: FastAPI, WebUI, databases
-- Database machine: MongoDB, Qdrant (optional separation)
-
-## Data Architecture
-
-The chronicle backend uses a **user-centric data architecture**:
-
-- **All memories are keyed by database user_id** (not client_id)
-- **Client information is stored in metadata** for reference and debugging
-- **User email is included** for easy identification in admin interfaces
-- **Multi-device support**: Users can access their data from any registered device
-
-For detailed information, see [User Data Architecture](user-data-architecture.md).
-
-## Memory Provider Selection
-
-### Choosing a Memory Provider
-
-Chronicle offers two memory backends:
-
-#### 1. Chronicle Native
-```bash
-# In your .env file
-MEMORY_PROVIDER=chronicle
-LLM_PROVIDER=openai
-OPENAI_API_KEY=your-openai-key-here
-```
-
-**Benefits:**
-- Full control over memory processing
-- Individual fact storage with no fallbacks
-- Custom prompts and extraction logic
-- Smart deduplication algorithms
-- LLM-driven memory updates (ADD/UPDATE/DELETE)
-- No external dependencies
-
-#### 2. OpenMemory MCP
-```bash
-# First, start the external server
-cd extras/openmemory-mcp
-docker compose up -d
-
-# Then configure Chronicle
-MEMORY_PROVIDER=openmemory_mcp
-OPENMEMORY_MCP_URL=http://host.docker.internal:8765
-```
-
-**Benefits:**
-- Cross-client compatibility (works with Claude Desktop, Cursor, etc.)
-- Professional memory processing
-- Web UI at http://localhost:8765
-- Battle-tested deduplication
-
-**Use OpenMemory MCP when:**
-- You want cross-client memory sharing
-- You're already using OpenMemory in other tools
-- You prefer external expertise over custom logic
-
-**See [MEMORY_PROVIDERS.md](../MEMORY_PROVIDERS.md) for detailed comparison**
-
-## Memory & Action Item Configuration
-
-> ๐ฏ **New to memory configuration?** Read our [Memory Configuration Guide](./memory-configuration-guide.md) for a step-by-step setup guide with examples.
-
-The system uses **centralized configuration** via `config/config.yml` for all models (LLM, embeddings, vector store) and memory extraction settings.
-
-### Configuration File Location
-- **Path**: repository `config/config.yml` (override with `CONFIG_FILE` env var)
-- **Hot-reload**: Changes are applied on next processing cycle (no restart required)
-- **Fallback**: If file is missing, system uses safe defaults with environment variables
-
-### LLM Provider & Model Configuration
-
-โญ **OpenAI is STRONGLY RECOMMENDED** for optimal memory extraction performance.
-
-The system supports **multiple LLM providers** - configure via environment variables:
-
-```bash
-# In your .env file
-LLM_PROVIDER=openai # RECOMMENDED: Use "openai" for best results
-OPENAI_API_KEY=your-openai-api-key
-OPENAI_MODEL=gpt-4o-mini # RECOMMENDED: "gpt-5-mini" for better memory extraction
-
-# Alternative: Local Ollama (may have reduced memory quality)
-LLM_PROVIDER=ollama
-OLLAMA_BASE_URL=http://ollama:11434
-OLLAMA_MODEL=gemma3n:e4b # Fallback if YAML config fails to load
-```
-
-**Why OpenAI is recommended:**
-- **Enhanced memory extraction**: Creates multiple granular memories instead of fallback transcripts
-- **Better fact extraction**: More reliable JSON parsing and structured output
-- **No more "fallback memories"**: Eliminates generic transcript-based memory entries
-- **Improved conversation understanding**: Better context awareness and detail extraction
-
-**YAML Configuration** (provider-specific models):
-```yaml
-memory_extraction:
- enabled: true
- prompt: |
- Extract anything relevant about this conversation that would be valuable to remember.
- Focus on key topics, people, decisions, dates, and emotional context.
- llm_settings:
- # Model selection based on LLM_PROVIDER:
- # - Ollama: "gemma3n:e4b", "llama3.1:latest", "llama3.2:latest", etc.
- # - OpenAI: "gpt-5-mini" (recommended for JSON reliability), "gpt-5-mini", "gpt-3.5-turbo", etc.
- model: "gemma3n:e4b"
- temperature: 0.1
-
-fact_extraction:
- enabled: false # Disabled to avoid JSON parsing issues
- # RECOMMENDATION: Enable with OpenAI GPT-4o for better JSON reliability
- llm_settings:
- model: "gemma3n:e4b" # Auto-switches based on LLM_PROVIDER
- temperature: 0.0 # Lower for factual accuracy
-```
-
-**Provider-Specific Behavior:**
-- **Ollama**: Uses local models with Ollama embeddings (nomic-embed-text)
-- **OpenAI**: Uses OpenAI models with OpenAI embeddings (text-embedding-3-small)
-- **Embeddings**: Automatically selected based on provider (768 dims for Ollama, 1536 for OpenAI)
-
-#### Fixing JSON Parsing Errors
-
-If you experience JSON parsing errors in fact extraction:
-
-1. **Switch to OpenAI GPT-4o** (recommended solution):
- ```bash
- # In your .env file
- LLM_PROVIDER=openai
- OPENAI_API_KEY=your-openai-api-key
- OPENAI_MODEL=gpt-4o-mini
- ```
-
-2. **Enable fact extraction** with reliable JSON output:
- ```yaml
- # In config/config.yml (memory section)
- fact_extraction:
- enabled: true # Safe to enable with GPT-4o
- ```
-
-3. **Monitor logs** for JSON parsing success:
- ```bash
- # Check for JSON parsing errors
- docker logs advanced-backend | grep "JSONDecodeError"
-
- # Verify OpenAI usage
- docker logs advanced-backend | grep "OpenAI response"
- ```
-
-**Why GPT-4o helps with JSON errors:**
-- More consistent JSON formatting
-- Better instruction following for structured output
-- Reduced malformed JSON responses
-- Built-in JSON mode for reliable parsing
-
-#### Testing OpenAI Configuration
-
-To verify your OpenAI setup is working:
-
-1. **Check logs for OpenAI usage**:
- ```bash
- # Start the backend and check logs
- docker logs advanced-backend | grep -i "openai"
-
- # You should see:
- # "Using OpenAI provider with model: gpt-5-mini"
- ```
-
-2. **Test memory extraction** with a conversation:
- ```bash
- # The health endpoint includes LLM provider info
- curl http://localhost:8000/health
-
- # Response should include: "llm_provider": "openai"
- ```
-
-3. **Monitor memory processing**:
- ```bash
- # After a conversation ends, check for successful processing
- docker logs advanced-backend | grep "memory processing"
- ```
-
-If you see errors about missing API keys or models, verify your `.env` file has:
-```bash
-LLM_PROVIDER=openai
-OPENAI_API_KEY=sk-your-actual-api-key-here
-OPENAI_MODEL=gpt-4o-mini
-```
-
-### Quality Control Settings
-```yaml
-quality_control:
- min_conversation_length: 50 # Skip very short conversations
- max_conversation_length: 50000 # Skip extremely long conversations
- skip_low_content: true # Skip conversations with mostly filler words
- min_content_ratio: 0.3 # Minimum meaningful content ratio
- skip_patterns: # Regex patterns to skip
- - "^(um|uh|hmm|yeah|ok|okay)\\s*$"
- - "^test\\s*$"
- - "^testing\\s*$"
-```
-
-### Processing & Performance
-```yaml
-processing:
- parallel_processing: true # Enable concurrent processing
- max_concurrent_tasks: 3 # Limit concurrent LLM requests
- processing_timeout: 300 # Timeout for memory extraction (seconds)
- retry_failed: true # Retry failed extractions
- max_retries: 2 # Maximum retry attempts
- retry_delay: 5 # Delay between retries (seconds)
-```
-
-### Debug & Monitoring
-```yaml
-debug:
- enabled: true
- db_path: "/app/debug/memory_debug.db"
- log_level: "INFO" # DEBUG, INFO, WARNING, ERROR
- log_full_conversations: false # Privacy consideration
- log_extracted_memories: true # Log successful extractions
-```
-
-### Configuration Validation
-The system validates configuration on startup and provides detailed error messages for invalid settings. Use the debug API to verify your configuration:
-
-```bash
-# Check current configuration
-curl -H "Authorization: Bearer $ADMIN_TOKEN" \
- http://localhost:8000/api/debug/memory/config
-```
-
-### API Endpoints for Debugging
-- `GET /api/debug/memory/stats` - Processing statistics
-- `GET /api/debug/memory/sessions` - Recent memory sessions
-- `GET /api/debug/memory/session/{audio_uuid}` - Detailed session info
-- `GET /api/debug/memory/config` - Current configuration
-- `GET /api/debug/memory/pipeline/{audio_uuid}` - Pipeline trace
-
-**Implementation**: See `src/advanced_omi_backend/routers/modules/system_routes.py` for debug endpoints and system utilities.
-
-## Next Steps
-
-- **Configure Google OAuth** for easy user login
-- **Set up Ollama** for local memory processing
-- **Deploy ASR service** for self-hosted transcription
-- **Connect audio clients** using the WebSocket API
-- **Explore the dashboard** to manage conversations and users
-- **Review the user data architecture** for understanding data organization
-- **Customize memory extraction** by editing the `memory` section in `config/config.yml`
-- **Monitor processing performance** using debug API endpoints
diff --git a/README.md b/README.md
index f44e266f..b70f4255 100644
--- a/README.md
+++ b/README.md
@@ -34,6 +34,126 @@ Run setup wizard, start services, access at http://localhost:5173
- **๐๏ธ [Architecture Details](Docs/features.md)** - Technical deep dive
- **๐ณ [Docker/K8s](README-K8S.md)** - Container deployment
+## Project Structure
+
+```
+chronicle/
+โโโ app/ # React Native mobile app
+โ โโโ app/ # App components and screens
+โ โโโ plugins/ # Expo plugins
+โโโ backends/
+โ โโโ advanced/ # Main AI backend (FastAPI)
+โ โ โโโ src/ # Backend source code
+โ โ โโโ init.py # Interactive setup wizard
+โ โ โโโ docker-compose.yml
+โ โโโ simple/ # Basic backend implementation
+โ โโโ other-backends/ # Example implementations
+โโโ extras/
+โ โโโ speaker-recognition/ # Voice identification service
+โ โโโ asr-services/ # Offline speech-to-text (Parakeet)
+โ โโโ openmemory-mcp/ # External memory server
+โโโ Docs/ # Technical documentation
+โโโ config/ # Central configuration files
+โโโ tests/ # Integration & unit tests
+โโโ wizard.py # Root setup orchestrator
+โโโ services.py # Service lifecycle manager
+โโโ *.sh # Convenience scripts (wrappers)
+```
+
+## Service Architecture
+
+```
+โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
+โ Chronicle System โ
+โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
+โ โ
+โ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโ โ
+โ โ Mobile App โโโโโบโ Backend โโโโบโ MongoDB โ โ
+โ โ (React โ โ (FastAPI) โ โ โ โ
+โ โ Native) โ โ โ โโโโโโโโโโโโโโ โ
+โ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโ โ
+โ โ โ
+โ โผ โ
+โ โโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโ โ
+โ โ โ โ
+โ โโโโโโผโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโผโโโ โ
+โ โ Deepgram โ โ OpenAI โ โ Qdrant โ โ
+โ โ STT โ โ LLM โ โ (Vector โ โ
+โ โ โ โ โ โ Store) โ โ
+โ โโโโโโโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโโโ โ
+โ โ
+โ Optional Services: โ
+โ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโ โ
+โ โ Speaker โ โ Parakeet โ โ Ollama โ โ
+โ โ Recognition โ โ (Local ASR) โ โ (Local โ โ
+โ โ โ โ โ โ LLM) โ โ
+โ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโ โ
+โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
+```
+
+## Quick Command Reference
+
+### Setup & Configuration
+```bash
+# Interactive setup wizard (recommended for first-time users)
+./wizard.sh
+
+# Full command (what the script wraps)
+uv run --with-requirements setup-requirements.txt python wizard.py
+```
+
+**Note**: Convenience scripts (*.sh) are wrappers around `wizard.py` and `services.py` that simplify the longer `uv run` commands.
+
+### Service Management
+```bash
+# Start all configured services
+./start.sh
+
+# Restart all services (preserves containers)
+./restart.sh
+
+# Check service status
+./status.sh
+
+# Stop all services
+./stop.sh
+```
+
+
+Full commands (click to expand)
+
+```bash
+# What the convenience scripts wrap
+uv run --with-requirements setup-requirements.txt python services.py start --all --build
+uv run --with-requirements setup-requirements.txt python services.py restart --all
+uv run --with-requirements setup-requirements.txt python services.py status
+uv run --with-requirements setup-requirements.txt python services.py stop --all
+```
+
+
+### Development
+```bash
+# Backend development
+cd backends/advanced
+uv run python src/main.py
+
+# Run tests
+./run-test.sh
+
+# Mobile app
+cd app
+npm start
+```
+
+### Health Checks
+```bash
+# Backend health
+curl http://localhost:8000/health
+
+# Web dashboard
+open http://localhost:5173
+```
+
## Vision
This fits as a small part of the larger idea of "Have various sensors feeding the state of YOUR world to computers/AI and get some use out of it"
diff --git a/backends/advanced/Docs/quickstart.md b/backends/advanced/Docs/quickstart.md
deleted file mode 100644
index 0d681978..00000000
--- a/backends/advanced/Docs/quickstart.md
+++ /dev/null
@@ -1,729 +0,0 @@
-# Chronicle Backend Quickstart Guide
-
-> ๐ **New to chronicle?** This is your starting point! After reading this, continue with [architecture.md](./architecture.md) for technical details.
-
-## Overview
-
-Chronicle is an eco-system of services to support "AI wearable" agents/functionality.
-At the moment, the basic functionalities are:
-- Audio capture (via WebSocket, from OMI device, files, or a laptop)
-- Audio transcription
-- **Advanced memory system** with pluggable providers (Chronicle native or OpenMemory MCP)
-- **Enhanced memory extraction** with individual fact storage and smart updates
-- **Semantic memory search** with relevance threshold filtering and live results
-- Action item extraction
-- Modern React web dashboard with live recording and advanced search features
-- Comprehensive user management with JWT authentication
-
-**Core Implementation**: See `src/advanced_omi_backend/main.py` for the complete FastAPI application and WebSocket handling.
-
-## Prerequisites
-
-- Docker and Docker Compose
-- API keys for your chosen providers (see setup script)
-
-## Quick Start
-
-### Step 1: Interactive Setup (Recommended)
-
-Run the interactive setup wizard to configure all services with guided prompts:
-```bash
-cd backends/advanced
-./init.sh
-```
-
-**The setup wizard will guide you through:**
-- **Authentication**: Admin email/password setup
-- **Transcription Provider**: Choose Deepgram, Mistral, or Offline (Parakeet)
-- **LLM Provider**: Choose OpenAI or Ollama for memory extraction
-- **Memory Provider**: Choose Chronicle Native or OpenMemory MCP
-- **Optional Services**: Speaker Recognition and other extras
-- **Network Configuration**: Ports and host settings
-
-**Example flow:**
-```
-๐ Chronicle Interactive Setup
-===============================================
-
-โบ Authentication Setup
-----------------------
-Admin email [admin@example.com]: john@company.com
-Admin password (min 8 chars): ********
-
-โบ Speech-to-Text Configuration
--------------------------------
-Choose your transcription provider:
- 1) Deepgram (recommended - high quality, requires API key)
- 2) Mistral (Voxtral models - requires API key)
- 3) Offline (Parakeet ASR - requires GPU, runs locally)
- 4) None (skip transcription setup)
-Enter choice (1-4) [1]: 1
-
-Get your API key from: https://console.deepgram.com/
-Deepgram API key: dg_xxxxxxxxxxxxx
-
-โบ LLM Provider Configuration
-----------------------------
-Choose your LLM provider for memory extraction:
- 1) OpenAI (GPT-4, GPT-3.5 - requires API key)
- 2) Ollama (local models - requires Ollama server)
- 3) Skip (no memory extraction)
-Enter choice (1-3) [1]: 1
-```
-
-### Step 2: HTTPS Setup (Optional)
-
-For microphone access and secure connections, set up HTTPS:
-```bash
-cd backends/advanced
-./setup-https.sh 100.83.66.30 # Your Tailscale/network IP
-```
-
-This creates SSL certificates and configures nginx for secure access.
-
-### Step 3: Start the System
-
-**Start all services:**
-```bash
-cd backends/advanced
-docker compose up --build -d
-```
-
-This starts:
-- **Backend API**: `http://localhost:8000`
-- **Web Dashboard**: `http://localhost:5173`
-- **MongoDB**: `localhost:27017`
-- **Qdrant**: `localhost:6333`
-
-### Step 4: Optional Services
-
-**If you configured optional services during setup, start them:**
-
-```bash
-# OpenMemory MCP (if selected)
-cd ../../extras/openmemory-mcp && docker compose up -d
-
-# Parakeet ASR (if selected for offline transcription)
-cd ../../extras/asr-services && docker compose up parakeet -d
-
-# Speaker Recognition (if enabled)
-cd ../../extras/speaker-recognition && docker compose up --build -d
-```
-
-### Manual Configuration (Alternative)
-
-If you prefer manual configuration, copy the `.env.template` file to `.env` and configure the required values:
-
-**Required Environment Variables:**
-```bash
-AUTH_SECRET_KEY=your-super-secret-jwt-key-here
-ADMIN_PASSWORD=your-secure-admin-password
-ADMIN_EMAIL=admin@example.com
-```
-
-**Memory Provider Configuration:**
-```bash
-# Memory Provider (Choose One)
-# Option 1: Chronicle Native (Default - Recommended)
-MEMORY_PROVIDER=chronicle
-
-# Option 2: OpenMemory MCP (Cross-client compatibility)
-# MEMORY_PROVIDER=openmemory_mcp
-# OPENMEMORY_MCP_URL=http://host.docker.internal:8765
-# OPENMEMORY_CLIENT_NAME=chronicle
-# OPENMEMORY_USER_ID=openmemory
-```
-
-**LLM Configuration (Choose One):**
-```bash
-# Option 1: OpenAI (Recommended for best memory extraction)
-LLM_PROVIDER=openai
-OPENAI_API_KEY=your-openai-api-key-here
-OPENAI_MODEL=gpt-4o-mini
-
-# Option 2: Local Ollama
-LLM_PROVIDER=ollama
-OLLAMA_BASE_URL=http://ollama:11434
-```
-
-**Transcription Services (Choose One):**
-```bash
-# Option 1: Deepgram (Recommended for best transcription quality)
-TRANSCRIPTION_PROVIDER=deepgram
-DEEPGRAM_API_KEY=your-deepgram-api-key-here
-
-# Option 2: Mistral (Voxtral models for transcription)
-TRANSCRIPTION_PROVIDER=mistral
-MISTRAL_API_KEY=your-mistral-api-key-here
-MISTRAL_MODEL=voxtral-mini-2507
-
-# Option 3: Local ASR service
-PARAKEET_ASR_URL=http://host.docker.internal:8080
-```
-
-**Important Notes:**
-- **OpenAI is strongly recommended** for LLM processing as it provides much better memory extraction and eliminates JSON parsing errors
-- **TRANSCRIPTION_PROVIDER** determines which service to use:
- - `deepgram`: Uses Deepgram's Nova-3 model for high-quality transcription
- - `mistral`: Uses Mistral's Voxtral models for transcription
- - If not set, system falls back to offline ASR service
-- The system requires either online API keys or offline ASR service configuration
-
-### Testing Your Setup (Optional)
-
-After configuration, verify everything works with the integration test suite:
-```bash
-./run-test.sh
-
-# Alternative: Manual test with detailed logging
-source .env && export DEEPGRAM_API_KEY OPENAI_API_KEY && \
- uv run robot --outputdir ../../test-results --loglevel INFO ../../tests/integration/integration_test.robot
-```
-This end-to-end test validates the complete audio processing pipeline using Robot Framework.
-
-## Using the System
-
-### Web Dashboard
-
-1. Open `http://localhost:5173`
-2. **Login** using the sidebar:
- - **Admin**: `admin@example.com` / `your-admin-password`
- - **Create new users** via admin interface
-
-### Dashboard Features
-
-- **Conversations**: View audio recordings, transcripts, and cropped audio
-- **Memories**: Advanced memory search with semantic search, relevance threshold filtering, and memory count display
-- **Live Recording**: Real-time audio recording with WebSocket streaming (HTTPS required)
-- **User Management**: Create/delete users and their data
-- **Client Management**: View active connections and close conversations
-- **System Monitoring**: Debug tools and system health monitoring
-
-### Audio Client Connection
-
-Connect audio clients via WebSocket with authentication:
-
-**WebSocket URLs:**
-```javascript
-// Opus audio stream
-ws://your-server-ip:8000/ws?token=YOUR_JWT_TOKEN&device_name=YOUR_DEVICE_NAME
-
-// PCM audio stream
-ws://your-server-ip:8000/ws_pcm?token=YOUR_JWT_TOKEN&device_name=YOUR_DEVICE_NAME
-```
-
-**Authentication Methods:**
-The system uses email-based authentication with JWT tokens:
-
-```bash
-# Login with email
-curl -X POST "http://localhost:8000/auth/jwt/login" \
- -H "Content-Type: application/x-www-form-urlencoded" \
- -d "username=admin@example.com&password=your-admin-password"
-
-# Response: {"access_token": "eyJhbGciOiJIUzI1NiIs...", "token_type": "bearer"}
-```
-
-**Authentication Flow:**
-1. **User Registration**: Admin creates users via API or dashboard
-2. **Login**: Users authenticate with email and password
-3. **Token Usage**: Include JWT token in API calls and WebSocket connections
-4. **Data Access**: Users can only access their own data (admins see all)
-
-For detailed authentication documentation, see [`auth.md`](./auth.md).
-
-**Create User Account:**
-```bash
-export ADMIN_TOKEN="your-admin-token"
-
-# Create user
-curl -X POST "http://localhost:8000/api/create_user" \
- -H "Authorization: Bearer $ADMIN_TOKEN" \
- -H "Content-Type: application/json" \
- -d '{"email": "user@example.com", "password": "userpass", "display_name": "John Doe"}'
-
-# Response includes the user_id (MongoDB ObjectId)
-# {"message": "User user@example.com created successfully", "user": {"id": "507f1f77bcf86cd799439011", ...}}
-```
-
-**Client ID Format:**
-The system automatically generates client IDs using the last 6 characters of the MongoDB ObjectId plus device name (e.g., `439011-phone`, `439011-desktop`). This ensures proper user-client association and data isolation.
-
-## Add Existing Data
-
-### Audio File Upload & Processing
-
-The system supports processing existing audio files through the file upload API. This allows you to import and process pre-recorded conversations without requiring a live WebSocket connection.
-
-**Upload and Process WAV Files:**
-```bash
-export USER_TOKEN="your-jwt-token"
-
-# Upload single WAV file
-curl -X POST "http://localhost:8000/api/audio/upload" \
- -H "Authorization: Bearer $USER_TOKEN" \
- -F "files=@/path/to/audio.wav" \
- -F "device_name=file_upload"
-
-# Upload multiple WAV files
-curl -X POST "http://localhost:8000/api/audio/upload" \
- -H "Authorization: Bearer $USER_TOKEN" \
- -F "files=@/path/to/recording1.wav" \
- -F "files=@/path/to/recording2.wav" \
- -F "device_name=import_batch"
-```
-
-**Response Example:**
-```json
-{
- "message": "Successfully processed 2 audio files",
- "processed_files": [
- {
- "filename": "recording1.wav",
- "sample_rate": 16000,
- "channels": 1,
- "duration_seconds": 120.5,
- "size_bytes": 3856000
- },
- {
- "filename": "recording2.wav",
- "sample_rate": 44100,
- "channels": 2,
- "duration_seconds": 85.2,
- "size_bytes": 7532800
- }
- ],
- "client_id": "user01-import_batch"
-}
-```
-
-## System Features
-
-### Audio Processing
-- **Real-time streaming**: WebSocket audio ingestion
-- **Multiple formats**: Opus and PCM audio support
-- **Per-client processing**: Isolated conversation management
-- **Speech detection**: Automatic silence removal
-- **Audio cropping**: Extract only speech segments
-
-**Implementation**: See `src/advanced_omi_backend/main.py` for WebSocket endpoints and `src/advanced_omi_backend/processors.py` for audio processing pipeline.
-
-### Transcription Options
-- **Deepgram API**: Cloud-based batch processing, high accuracy (recommended)
-- **Mistral API**: Voxtral models for transcription with REST API processing
-- **Self-hosted ASR**: Local Wyoming protocol services with real-time processing
-- **Collection timeout**: 1.5 minute collection for optimal online processing quality
-
-### Conversation Management
-- **Automatic chunking**: 60-second audio segments
-- **Conversation timeouts**: Auto-close after 1.5 minutes of silence
-- **Speaker identification**: Track multiple speakers per conversation
-- **Manual controls**: Close conversations via API or dashboard
-
-### Memory & Intelligence
-
-#### Pluggable Memory System
-- **Two memory providers**: Choose between Chronicle native or OpenMemory MCP
-- **Chronicle Provider**: Full control with custom extraction, individual fact storage, smart deduplication
-- **OpenMemory MCP Provider**: Cross-client compatibility (Claude Desktop, Cursor, Windsurf), professional processing
-
-#### Enhanced Memory Processing
-- **Individual fact storage**: No more generic transcript fallbacks
-- **Smart memory updates**: LLM-driven ADD/UPDATE/DELETE actions
-- **Enhanced prompts**: Improved fact extraction with granular, specific memories
-- **User-centric storage**: All memories keyed by database user_id
-- **Semantic search**: Vector-based memory retrieval with embeddings
-- **Configurable extraction**: YAML-based configuration for memory extraction
-- **Debug tracking**: SQLite-based tracking of transcript โ memory conversion
-- **Client metadata**: Device information preserved for debugging and reference
-- **User isolation**: All data scoped to individual users with multi-device support
-
-**Implementation**:
-- **Memory System**: `src/advanced_omi_backend/memory/memory_service.py` + `src/advanced_omi_backend/controllers/memory_controller.py`
-- **Configuration**: `config/config.yml` (memory + models) in repo root
-
-### Authentication & Security
-- **Email Authentication**: Login with email and password
-- **JWT tokens**: Secure API and WebSocket authentication with 1-hour expiration
-- **Role-based access**: Admin vs regular user permissions
-- **Data isolation**: Users can only access their own data
-- **Client ID Management**: Automatic client-user association via `objectid_suffix-device_name` format
-- **Multi-device support**: Single user can connect multiple devices
-- **Security headers**: Proper CORS, cookie security, and token validation
-
-**Implementation**: See `src/advanced_omi_backend/auth.py` for authentication logic, `src/advanced_omi_backend/users.py` for user management, and [`auth.md`](./auth.md) for comprehensive documentation.
-
-## Verification
-
-```bash
-# System health check
-curl http://localhost:8000/health
-
-# Web dashboard
-open http://localhost:3000
-
-# View active clients (requires auth token)
-curl -H "Authorization: Bearer your-token" http://localhost:8000/api/clients/active
-```
-
-## HAVPE Relay Configuration
-
-For ESP32 audio streaming using the HAVPE relay (`extras/havpe-relay/`):
-
-```bash
-# Environment variables for HAVPE relay
-export AUTH_USERNAME="user@example.com" # Email address
-export AUTH_PASSWORD="your-password"
-export DEVICE_NAME="havpe" # Device identifier
-
-# Run the relay
-cd extras/havpe-relay
-python main.py --backend-url http://your-server:8000 --backend-ws-url ws://your-server:8000
-```
-
-The relay will automatically:
-- Authenticate using `AUTH_USERNAME` (email address)
-- Generate client ID as `objectid_suffix-havpe`
-- Forward ESP32 audio to the backend with proper authentication
-- Handle token refresh and reconnection
-
-## Development tip
-uv sync --group (whatever group you want to sync)
-(for example, deepgram, etc.)
-
-## Troubleshooting
-
-**Service Issues:**
-- Check logs: `docker compose logs chronicle-backend`
-- Restart services: `docker compose restart`
-- View all services: `docker compose ps`
-
-**Authentication Issues:**
-- Verify `AUTH_SECRET_KEY` is set and long enough (minimum 32 characters)
-- Check admin credentials match `.env` file
-- Ensure user email/password combinations are correct
-
-**Transcription Issues:**
-- **Deepgram**: Verify API key is valid and `TRANSCRIPTION_PROVIDER=deepgram`
-- **Mistral**: Verify API key is valid and `TRANSCRIPTION_PROVIDER=mistral`
-- **Self-hosted**: Ensure ASR service is running on port 8765
-- Check transcription service connection in health endpoint
-
-**Memory Issues:**
-- Ensure Ollama is running and model is pulled
-- Check Qdrant connection in health endpoint
-- Memory processing happens at conversation end
-
-**Connection Issues:**
-- Use server's IP address, not localhost for mobile clients
-- Ensure WebSocket connections include authentication token
-- Check firewall/port settings for remote connections
-
-## Distributed Deployment
-
-### Single Machine vs Distributed Setup
-
-**Single Machine (Default):**
-```bash
-# Everything on one machine
-docker compose up --build -d
-```
-
-**Distributed Setup (GPU + Backend separation):**
-
-#### GPU Machine Setup
-```bash
-# Start GPU-accelerated services
-cd extras/asr-services
-docker compose up moonshine -d
-
-cd extras/speaker-recognition
-docker compose up --build -d
-
-# Ollama with GPU support
-docker run -d --gpus=all -p 11434:11434 \
- -v ollama:/root/.ollama \
- ollama/ollama:latest
-```
-
-#### Backend Machine Configuration
-```bash
-# .env configuration for distributed services
-OLLAMA_BASE_URL=http://[gpu-machine-tailscale-ip]:11434
-SPEAKER_SERVICE_URL=http://[gpu-machine-tailscale-ip]:8085
-PARAKEET_ASR_URL=http://[gpu-machine-tailscale-ip]:8080
-
-# Start lightweight backend services
-docker compose up --build -d
-```
-
-#### Tailscale Networking
-```bash
-# Install on each machine
-curl -fsSL https://tailscale.com/install.sh | sh
-sudo tailscale up
-
-# Find machine IPs
-tailscale ip -4
-```
-
-**Benefits of Distributed Setup:**
-- GPU services on dedicated hardware
-- Lightweight backend on VPS/Raspberry Pi
-- Automatic Tailscale IP support (100.x.x.x) - no CORS configuration needed
-- Encrypted inter-service communication
-
-**Service Examples:**
-- GPU machine: LLM inference, ASR, speaker recognition
-- Backend machine: FastAPI, WebUI, databases
-- Database machine: MongoDB, Qdrant (optional separation)
-
-## Data Architecture
-
-The chronicle backend uses a **user-centric data architecture**:
-
-- **All memories are keyed by database user_id** (not client_id)
-- **Client information is stored in metadata** for reference and debugging
-- **User email is included** for easy identification in admin interfaces
-- **Multi-device support**: Users can access their data from any registered device
-
-For detailed information, see [User Data Architecture](user-data-architecture.md).
-
-## Memory Provider Selection
-
-### Choosing a Memory Provider
-
-Chronicle offers two memory backends:
-
-#### 1. Chronicle Native
-```bash
-# In your .env file
-MEMORY_PROVIDER=chronicle
-LLM_PROVIDER=openai
-OPENAI_API_KEY=your-openai-key-here
-```
-
-**Benefits:**
-- Full control over memory processing
-- Individual fact storage with no fallbacks
-- Custom prompts and extraction logic
-- Smart deduplication algorithms
-- LLM-driven memory updates (ADD/UPDATE/DELETE)
-- No external dependencies
-
-#### 2. OpenMemory MCP
-```bash
-# First, start the external server
-cd extras/openmemory-mcp
-docker compose up -d
-
-# Then configure Chronicle
-MEMORY_PROVIDER=openmemory_mcp
-OPENMEMORY_MCP_URL=http://host.docker.internal:8765
-```
-
-**Benefits:**
-- Cross-client compatibility (works with Claude Desktop, Cursor, etc.)
-- Professional memory processing
-- Web UI at http://localhost:8765
-- Battle-tested deduplication
-
-**Use OpenMemory MCP when:**
-- You want cross-client memory sharing
-- You're already using OpenMemory in other tools
-- You prefer external expertise over custom logic
-
-**See [MEMORY_PROVIDERS.md](../MEMORY_PROVIDERS.md) for detailed comparison**
-
-## Memory & Action Item Configuration
-
-> ๐ฏ **New to memory configuration?** Read our [Memory Configuration Guide](./memory-configuration-guide.md) for a step-by-step setup guide with examples.
-
-The system uses **centralized configuration** via `config/config.yml` for all memory extraction and model settings.
-
-### Configuration File Location
-- **Path**: `config/config.yml` in repo root
-- **Hot-reload**: Changes are applied on next processing cycle (no restart required)
-- **Fallback**: If file is missing, system uses safe defaults with environment variables
-
-### LLM Provider & Model Configuration
-
-โญ **OpenAI is STRONGLY RECOMMENDED** for optimal memory extraction performance.
-
-The system supports **multiple LLM providers** - configure via environment variables:
-
-```bash
-# In your .env file
-LLM_PROVIDER=openai # RECOMMENDED: Use "openai" for best results
-OPENAI_API_KEY=your-openai-api-key
-OPENAI_MODEL=gpt-4o-mini # RECOMMENDED: "gpt-5-mini" for better memory extraction
-
-# Alternative: Local Ollama (may have reduced memory quality)
-LLM_PROVIDER=ollama
-OLLAMA_BASE_URL=http://ollama:11434
-OLLAMA_MODEL=gemma3n:e4b # Fallback if YAML config fails to load
-```
-
-**Why OpenAI is recommended:**
-- **Enhanced memory extraction**: Creates multiple granular memories instead of fallback transcripts
-- **Better fact extraction**: More reliable JSON parsing and structured output
-- **No more "fallback memories"**: Eliminates generic transcript-based memory entries
-- **Improved conversation understanding**: Better context awareness and detail extraction
-
-**YAML Configuration** (provider-specific models):
-```yaml
-memory_extraction:
- enabled: true
- prompt: |
- Extract anything relevant about this conversation that would be valuable to remember.
- Focus on key topics, people, decisions, dates, and emotional context.
- llm_settings:
- # Model selection based on LLM_PROVIDER:
- # - Ollama: "gemma3n:e4b", "llama3.1:latest", "llama3.2:latest", etc.
- # - OpenAI: "gpt-5-mini" (recommended for JSON reliability), "gpt-5-mini", "gpt-3.5-turbo", etc.
- model: "gemma3n:e4b"
- temperature: 0.1
-
-fact_extraction:
- enabled: false # Disabled to avoid JSON parsing issues
- # RECOMMENDATION: Enable with OpenAI GPT-4o for better JSON reliability
- llm_settings:
- model: "gemma3n:e4b" # Auto-switches based on LLM_PROVIDER
- temperature: 0.0 # Lower for factual accuracy
-```
-
-**Provider-Specific Behavior:**
-- **Ollama**: Uses local models with Ollama embeddings (nomic-embed-text)
-- **OpenAI**: Uses OpenAI models with OpenAI embeddings (text-embedding-3-small)
-- **Embeddings**: Automatically selected based on provider (768 dims for Ollama, 1536 for OpenAI)
-
-#### Fixing JSON Parsing Errors
-
-If you experience JSON parsing errors in fact extraction:
-
-1. **Switch to OpenAI GPT-4o** (recommended solution):
- ```bash
- # In your .env file
- LLM_PROVIDER=openai
- OPENAI_API_KEY=your-openai-api-key
- OPENAI_MODEL=gpt-4o-mini
- ```
-
-2. **Enable fact extraction** with reliable JSON output:
- ```yaml
- # In config/config.yml (memory section)
- fact_extraction:
- enabled: true # Safe to enable with GPT-4o
- ```
-
-3. **Monitor logs** for JSON parsing success:
- ```bash
- # Check for JSON parsing errors
- docker logs advanced-backend | grep "JSONDecodeError"
-
- # Verify OpenAI usage
- docker logs advanced-backend | grep "OpenAI response"
- ```
-
-**Why GPT-4o helps with JSON errors:**
-- More consistent JSON formatting
-- Better instruction following for structured output
-- Reduced malformed JSON responses
-- Built-in JSON mode for reliable parsing
-
-#### Testing OpenAI Configuration
-
-To verify your OpenAI setup is working:
-
-1. **Check logs for OpenAI usage**:
- ```bash
- # Start the backend and check logs
- docker logs advanced-backend | grep -i "openai"
-
- # You should see:
- # "Using OpenAI provider with model: gpt-5-mini"
- ```
-
-2. **Test memory extraction** with a conversation:
- ```bash
- # The health endpoint includes LLM provider info
- curl http://localhost:8000/health
-
- # Response should include: "llm_provider": "openai"
- ```
-
-3. **Monitor memory processing**:
- ```bash
- # After a conversation ends, check for successful processing
- docker logs advanced-backend | grep "memory processing"
- ```
-
-If you see errors about missing API keys or models, verify your `.env` file has:
-```bash
-LLM_PROVIDER=openai
-OPENAI_API_KEY=sk-your-actual-api-key-here
-OPENAI_MODEL=gpt-4o-mini
-```
-
-### Quality Control Settings
-```yaml
-quality_control:
- min_conversation_length: 50 # Skip very short conversations
- max_conversation_length: 50000 # Skip extremely long conversations
- skip_low_content: true # Skip conversations with mostly filler words
- min_content_ratio: 0.3 # Minimum meaningful content ratio
- skip_patterns: # Regex patterns to skip
- - "^(um|uh|hmm|yeah|ok|okay)\\s*$"
- - "^test\\s*$"
- - "^testing\\s*$"
-```
-
-### Processing & Performance
-```yaml
-processing:
- parallel_processing: true # Enable concurrent processing
- max_concurrent_tasks: 3 # Limit concurrent LLM requests
- processing_timeout: 300 # Timeout for memory extraction (seconds)
- retry_failed: true # Retry failed extractions
- max_retries: 2 # Maximum retry attempts
- retry_delay: 5 # Delay between retries (seconds)
-```
-
-### Debug & Monitoring
-```yaml
-debug:
- enabled: true
- db_path: "/app/debug/memory_debug.db"
- log_level: "INFO" # DEBUG, INFO, WARNING, ERROR
- log_full_conversations: false # Privacy consideration
- log_extracted_memories: true # Log successful extractions
-```
-
-### Configuration Validation
-The system validates configuration on startup and provides detailed error messages for invalid settings. Use the debug API to verify your configuration:
-
-```bash
-# Check current configuration
-curl -H "Authorization: Bearer $ADMIN_TOKEN" \
- http://localhost:8000/api/debug/memory/config
-```
-
-### API Endpoints for Debugging
-- `GET /api/debug/memory/stats` - Processing statistics
-- `GET /api/debug/memory/sessions` - Recent memory sessions
-- `GET /api/debug/memory/session/{audio_uuid}` - Detailed session info
-- `GET /api/debug/memory/config` - Current configuration
-- `GET /api/debug/memory/pipeline/{audio_uuid}` - Pipeline trace
-
-**Implementation**: See `src/advanced_omi_backend/routers/modules/system_routes.py` for debug endpoints and system utilities.
-
-## Next Steps
-
-- **Configure Google OAuth** for easy user login
-- **Set up Ollama** for local memory processing
-- **Deploy ASR service** for self-hosted transcription
-- **Connect audio clients** using the WebSocket API
-- **Explore the dashboard** to manage conversations and users
-- **Review the user data architecture** for understanding data organization
-- **Customize memory extraction** by editing the `memory` section in `config/config.yml`
-- **Monitor processing performance** using debug API endpoints
diff --git a/backends/advanced/SETUP_SCRIPTS.md b/backends/advanced/SETUP_SCRIPTS.md
deleted file mode 100644
index b45c8910..00000000
--- a/backends/advanced/SETUP_SCRIPTS.md
+++ /dev/null
@@ -1,160 +0,0 @@
-# Setup Scripts Guide
-
-This document explains the different setup scripts available in Friend-Lite and when to use each one.
-
-## Script Overview
-
-| Script | Purpose | When to Use |
-|--------|---------|-------------|
-| `init.py` | **Main interactive setup wizard** | **Recommended for all users** - First time setup with guided configuration (located at repo root). Memory now configured in `config/config.yml`. |
-| `setup-https.sh` | HTTPS certificate generation | **Optional** - When you need secure connections for microphone access |
-
-## Main Setup Script: `init.py`
-
-**Purpose**: Interactive wizard that configures all services with guided prompts.
-
-### What it does:
-- โ
**Authentication Setup**: Admin email/password with secure key generation
-- โ
**Transcription Provider Selection**: Choose between Deepgram, Mistral, or Offline (Parakeet)
-- โ
**LLM Provider Configuration**: Choose between OpenAI (recommended) or Ollama
-- โ
**Memory Provider Setup**: Choose between Friend-Lite Native or OpenMemory MCP
-- โ
**API Key Collection**: Prompts for required keys with helpful links to obtain them
-- โ
**Optional Services**: Speaker Recognition, network configuration
-- โ
**Configuration Validation**: Creates complete .env with all settings
-
-### Usage:
-```bash
-# From repository root
-python backends/advanced/init.py
-```
-
-### Example Flow:
-```
-๐ Friend-Lite Interactive Setup
-===============================================
-
-โบ Authentication Setup
-----------------------
-Admin email [admin@example.com]: john@company.com
-Admin password (min 8 chars): ********
-โ
Admin account configured
-
-โบ Speech-to-Text Configuration
--------------------------------
-Choose your transcription provider:
- 1) Deepgram (recommended - high quality, requires API key)
- 2) Mistral (Voxtral models - requires API key)
- 3) Offline (Parakeet ASR - requires GPU, runs locally)
- 4) None (skip transcription setup)
-Enter choice (1-4) [1]: 1
-
-Get your API key from: https://console.deepgram.com/
-Deepgram API key: dg_xxxxxxxxxxxxx
-โ
Deepgram configured
-
-โบ LLM Provider Configuration
-----------------------------
-Choose your LLM provider for memory extraction:
- 1) OpenAI (GPT-4, GPT-3.5 - requires API key)
- 2) Ollama (local models - requires Ollama server)
- 3) Skip (no memory extraction)
-Enter choice (1-3) [1]: 1
-
-Get your API key from: https://platform.openai.com/api-keys
-OpenAI API key: sk-xxxxxxxxxxxxx
-OpenAI model [gpt-4o-mini]: gpt-4o-mini
-โ
OpenAI configured
-
-...continues through all configuration sections...
-
-โบ Configuration Summary
------------------------
-โ
Admin Account: john@company.com
-โ
Transcription: deepgram
-โ
LLM Provider: openai
-โ
Memory Provider: friend_lite
-โ
Backend URL: http://localhost:8000
-โ
Dashboard URL: http://localhost:5173
-
-โบ Next Steps
-------------
-1. Start the main services:
- docker compose up --build -d
-
-2. Access the dashboard:
- http://localhost:5173
-
-Setup complete! ๐
-```
-
-## HTTPS Setup Script: `setup-https.sh`
-
-**Purpose**: Generate SSL certificates and configure nginx for secure HTTPS access.
-
-### When needed:
-- **Microphone access** from browsers (HTTPS required)
-- **Remote access** via Tailscale or network
-- **Production deployments** requiring secure connections
-
-### Usage:
-```bash
-cd backends/advanced
-./setup-https.sh 100.83.66.30 # Your Tailscale or network IP
-```
-
-### What it does:
-- Generates self-signed SSL certificates for your IP
-- Configures nginx proxy for HTTPS access
-- Configures nginx for automatic HTTPS access
-- Provides HTTPS URLs for dashboard access
-
-### After HTTPS setup:
-```bash
-# Start services with HTTPS
-docker compose up --build -d
-
-# Access via HTTPS
-https://localhost/
-https://100.83.66.30/ # Your configured IP
-```
-
-
-## Recommended Setup Flow
-
-### New Users (Recommended):
-1. **Run main setup**: `python backends/advanced/init.py`
-2. **Start services**: `docker compose up --build -d`
-3. **Optional HTTPS**: `./setup-https.sh your-ip` (if needed)
-
-### Manual Configuration (Advanced):
-1. **Copy template**: `cp .env.template .env`
-2. **Edit manually**: Configure all providers and keys
-3. **Start services**: `docker compose up --build -d`
-
-## Script Locations
-
-Setup scripts are located as follows:
-```
-. # Project root
-โโโ init.py # Main interactive setup wizard (repo root)
-โโโ backends/advanced/
- โโโ setup-https.sh # HTTPS certificate generation
- โโโ .env.template # Environment template
- โโโ docker-compose.yml
-```
-
-## Getting Help
-
-- **Setup Issues**: See `Docs/quickstart.md` for detailed documentation
-- **Configuration**: See `MEMORY_PROVIDERS.md` for provider comparisons
-- **Troubleshooting**: Check `CLAUDE.md` for common issues
-- **HTTPS Problems**: Ensure your IP is accessible and not behind firewall
-
-## Key Benefits of New Setup
-
-โ
**No more guessing**: Interactive prompts guide you through every choice
-โ
**API key validation**: Links provided to obtain required keys
-โ
**Provider selection**: Choose best services for your needs
-โ
**Complete configuration**: Creates working .env with all settings
-โ
**Next steps guidance**: Clear instructions for starting services
-โ
**No manual editing**: Reduces errors from manual .env editing
diff --git a/config/README.md b/config/README.md
index e3a5cf3c..e4f3cf36 100644
--- a/config/README.md
+++ b/config/README.md
@@ -20,6 +20,9 @@ This directory contains Chronicle's centralized configuration files.
```bash
# Option 1: Run the interactive wizard (recommended)
+./wizard.sh
+
+# Or use direct command:
uv run --with-requirements setup-requirements.txt python wizard.py
# Option 2: Manual setup
@@ -102,5 +105,5 @@ The setup wizard automatically backs up `config.yml` before making changes:
For detailed configuration guides, see:
- `/Docs/memory-configuration-guide.md` - Memory settings
-- `/backends/advanced/Docs/quickstart.md` - Setup guide
-- `/CLAUDE.md` - Project overview
+- `/quickstart.md` - Setup guide
+- `/CLAUDE.md` - Project overview and technical reference
diff --git a/quickstart.md b/quickstart.md
index 0608ada9..86d4851b 100644
--- a/quickstart.md
+++ b/quickstart.md
@@ -147,9 +147,15 @@ If you choose Mycelia as your memory provider during setup wizard, the wizard wi
**Run the setup wizard:**
```bash
+# Using convenience script (recommended)
+./wizard.sh
+
+# Or use direct command:
uv run --with-requirements setup-requirements.txt python wizard.py
```
+**Note**: Convenience scripts (`./wizard.sh`, `./start.sh`, `./restart.sh`, `./stop.sh`, `./status.sh`) are wrappers around `wizard.py` and `services.py` that simplify the longer `uv run` commands.
+
### What the Setup Wizard Will Ask You
The wizard will ask questions - here's what to answer:
@@ -289,9 +295,14 @@ Before connecting your phone, make sure everything works:
### Service Issues
**General Service Management:**
-- **Services not responding**: Try restarting with `./restart.sh` or `uv run --with-requirements setup-requirements.txt python services.py restart --all`
-- **Check service status**: Use `uv run --with-requirements setup-requirements.txt python services.py status`
-- **Stop all services**: Use `uv run --with-requirements setup-requirements.txt python services.py stop --all`
+- **Services not responding**: Try restarting with `./restart.sh`
+- **Check service status**: Use `./status.sh`
+- **Stop all services**: Use `./stop.sh`
+
+*Full commands (what the convenience scripts wrap):*
+- Restart: `uv run --with-requirements setup-requirements.txt python services.py restart --all`
+- Status: `uv run --with-requirements setup-requirements.txt python services.py status`
+- Stop: `uv run --with-requirements setup-requirements.txt python services.py stop --all`
**Cloud Services (Deepgram/OpenAI):**
- **Transcription not working**: Check Deepgram API key is correct
From 26fdd4c8ed6a393d9716d10644454d1df9e5af92 Mon Sep 17 00:00:00 2001
From: Ankush Malaker <43288948+AnkushMalaker@users.noreply.github.com>
Date: Sat, 10 Jan 2026 09:33:29 +0530
Subject: [PATCH 2/5] Update setup instructions and enhance service management
scripts
- Replaced direct command instructions with convenience scripts (`./wizard.sh` and `./start.sh`) for easier setup and service management.
- Added detailed usage of convenience scripts for checking service status, restarting, and stopping services.
- Clarified the distinction between convenience scripts and direct command usage for improved user guidance.
---
Docs/features.md | 8 ++++----
Docs/init-system.md | 41 ++++++++++++++++++++++++++++------------
Docs/ports-and-access.md | 34 ++++++++++++++++++++++++---------
3 files changed, 58 insertions(+), 25 deletions(-)
diff --git a/Docs/features.md b/Docs/features.md
index 57e3413f..0332c6ee 100644
--- a/Docs/features.md
+++ b/Docs/features.md
@@ -171,8 +171,8 @@ Backends and ASR services use standardized audio streaming:
### Single Machine (Recommended for beginners)
1. **Clone the repository**
-2. **Run interactive setup**: `uv run --with-requirements setup-requirements.txt python init.py`
-3. **Start all services**: `python services.py start --all --build`
+2. **Run interactive setup**: `./wizard.sh`
+3. **Start all services**: `./start.sh`
4. **Access WebUI**: `http://localhost:5173` for the React web dashboard
### Distributed Setup (Advanced users with multiple machines)
@@ -215,8 +215,8 @@ Backends and ASR services use standardized audio streaming:
### For Production Use
1. Use **Advanced Backend** for full features
-2. Run the orchestrated setup: `uv run --with-requirements setup-requirements.txt python init.py`
-3. Start all services: `python services.py start --all --build`
+2. Run the orchestrated setup: `./wizard.sh`
+3. Start all services: `./start.sh`
4. Access the Web UI at http://localhost:5173 for conversation management
### For OMI Users
diff --git a/Docs/init-system.md b/Docs/init-system.md
index 3df6316c..14d7cb3f 100644
--- a/Docs/init-system.md
+++ b/Docs/init-system.md
@@ -38,7 +38,10 @@ The root orchestrator handles service selection and delegates configuration to i
Set up multiple services together with automatic URL coordination:
```bash
-# From project root
+# From project root (using convenience script)
+./wizard.sh
+
+# Or use direct command:
uv run --with-requirements setup-requirements.txt python wizard.py
```
@@ -136,7 +139,28 @@ Services use `host.docker.internal` for inter-container communication:
Chronicle now separates **configuration** from **service lifecycle management**:
### Unified Service Management
-Use the `services.py` script for all service operations:
+
+**Convenience Scripts (Recommended):**
+```bash
+# Start all configured services
+./start.sh
+
+# Check service status
+./status.sh
+
+# Restart all services
+./restart.sh
+
+# Stop all services
+./stop.sh
+```
+
+**Note**: Convenience scripts wrap the longer `uv run --with-requirements setup-requirements.txt python` commands for ease of use.
+
+
+Full commands (click to expand)
+
+Use the `services.py` script directly for more control:
```bash
# Start all configured services
@@ -161,19 +185,12 @@ uv run --with-requirements setup-requirements.txt python services.py stop --all
uv run --with-requirements setup-requirements.txt python services.py stop asr-services openmemory-mcp
```
-**Convenience Scripts:**
-```bash
-# Quick start (from project root)
-./start.sh
-
-# Quick restart (from project root)
-./restart.sh
-```
+
**Important Notes:**
- **Restart** restarts containers without rebuilding - use for configuration changes (.env updates)
-- **For code changes**, use `stop` + `start --build` to rebuild images
-- Example: `uv run --with-requirements setup-requirements.txt python services.py stop --all && uv run --with-requirements setup-requirements.txt python services.py start --all --build`
+- **For code changes**, use `./stop.sh` then `./start.sh` to rebuild images
+- Convenience scripts handle common operations; use direct commands for specific service selection
### Manual Service Management
You can also manage services individually:
diff --git a/Docs/ports-and-access.md b/Docs/ports-and-access.md
index 6e7a095e..00f5ee64 100644
--- a/Docs/ports-and-access.md
+++ b/Docs/ports-and-access.md
@@ -7,11 +7,11 @@
git clone
cd chronicle
-# Configure all services
-uv run --with-requirements setup-requirements.txt python init.py
+# Configure all services (using convenience script)
+./wizard.sh
-# Start all configured services
-uv run --with-requirements setup-requirements.txt python services.py start --all --build
+# Start all configured services
+./start.sh
```
### 2. Service Access Points
@@ -91,6 +91,26 @@ REACT_UI_HTTPS=true
## Service Management Commands
+**Convenience Scripts (Recommended):**
+```bash
+# Check what's running
+./status.sh
+
+# Start all configured services
+./start.sh
+
+# Restart all services
+./restart.sh
+
+# Stop all services
+./stop.sh
+```
+
+**Note**: Convenience scripts wrap the longer `uv run --with-requirements setup-requirements.txt python` commands for ease of use.
+
+
+Full commands (click to expand)
+
```bash
# Check what's running
uv run --with-requirements setup-requirements.txt python services.py status
@@ -111,11 +131,7 @@ uv run --with-requirements setup-requirements.txt python services.py restart bac
uv run --with-requirements setup-requirements.txt python services.py stop --all
```
-**Convenience Scripts:**
-```bash
-./start.sh # Quick start all configured services
-./restart.sh # Quick restart all configured services
-```
+
**Important:** Use `restart` for configuration changes (.env updates). For code changes, use `stop` + `start --build` to rebuild images.
From d3807f72d3ce5b17b64feeda1b81d8ed31fb3009 Mon Sep 17 00:00:00 2001
From: Ankush Malaker <43288948+AnkushMalaker@users.noreply.github.com>
Date: Sat, 10 Jan 2026 11:45:12 +0530
Subject: [PATCH 3/5] Update speaker recognition models and documentation
- Changed the speaker diarization model from `pyannote/speaker-diarization-3.1` to `pyannote/speaker-diarization-community-1` across multiple files for consistency.
- Updated README files to reflect the new model and its usage instructions, ensuring users have the correct links and information for setup.
- Enhanced clarity in configuration settings related to speaker recognition.
---
backends/advanced/Docs/README_speaker_enrollment.md | 4 ++--
extras/speaker-recognition/README.md | 5 ++---
extras/speaker-recognition/scripts/download-pyannote.py | 2 +-
.../src/simple_speaker_recognition/core/audio_backend.py | 2 +-
4 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/backends/advanced/Docs/README_speaker_enrollment.md b/backends/advanced/Docs/README_speaker_enrollment.md
index 1aec9706..6f705d67 100644
--- a/backends/advanced/Docs/README_speaker_enrollment.md
+++ b/backends/advanced/Docs/README_speaker_enrollment.md
@@ -175,9 +175,9 @@ python enroll_speaker.py --identify "audio_chunk_test_recognition_67890.wav"
Edit `speaker_recognition/speaker_recognition.py` to adjust:
- `SIMILARITY_THRESHOLD = 0.85`: Cosine similarity threshold for identification
-- `device`: CUDA device for GPU acceleration
+- `device`: CUDA device for GPU acceleration
- Embedding model: Currently uses `speechbrain/spkrec-ecapa-voxceleb`
-- Diarization model: Currently uses `pyannote/speaker-diarization-3.1`
+- Diarization model: Currently uses `pyannote/speaker-diarization-community-1`
### Audio Settings
diff --git a/extras/speaker-recognition/README.md b/extras/speaker-recognition/README.md
index 4bfbc810..e3d114db 100644
--- a/extras/speaker-recognition/README.md
+++ b/extras/speaker-recognition/README.md
@@ -15,9 +15,8 @@ cp .env.template .env
# Edit .env and add your Hugging Face token
```
Get your HF token from https://huggingface.co/settings/tokens
-Accept the terms and conditions for
-https://huggingface.co/pyannote/speaker-diarization-3.1
-https://huggingface.co/pyannote/segmentation-3.0
+Accept the terms and conditions for
+https://huggingface.co/pyannote/speaker-diarization-community-1
### 2. Choose CPU or GPU setup
diff --git a/extras/speaker-recognition/scripts/download-pyannote.py b/extras/speaker-recognition/scripts/download-pyannote.py
index b2c51394..8d7cfcc6 100755
--- a/extras/speaker-recognition/scripts/download-pyannote.py
+++ b/extras/speaker-recognition/scripts/download-pyannote.py
@@ -33,7 +33,7 @@ def download_models():
# Import and download models
logger.info("Downloading speaker diarization model...")
- Pipeline.from_pretrained('pyannote/speaker-diarization-3.1', token=hf_token)
+ Pipeline.from_pretrained('pyannote/speaker-diarization-community-1', token=hf_token)
logger.info("Downloading speaker embedding model...")
PretrainedSpeakerEmbedding('pyannote/wespeaker-voxceleb-resnet34-LM', token=hf_token)
diff --git a/extras/speaker-recognition/src/simple_speaker_recognition/core/audio_backend.py b/extras/speaker-recognition/src/simple_speaker_recognition/core/audio_backend.py
index 040c8ac8..ad286c25 100644
--- a/extras/speaker-recognition/src/simple_speaker_recognition/core/audio_backend.py
+++ b/extras/speaker-recognition/src/simple_speaker_recognition/core/audio_backend.py
@@ -20,7 +20,7 @@ class AudioBackend:
def __init__(self, hf_token: str, device: torch.device):
self.device = device
self.diar = Pipeline.from_pretrained(
- "pyannote/speaker-diarization-3.1", token=hf_token
+ "pyannote/speaker-diarization-community-1", token=hf_token
).to(device)
# Configure pipeline with proper segmentation parameters to reduce over-segmentation
From f308073b8d01468f45b12769b70c3d43bbf67c85 Mon Sep 17 00:00:00 2001
From: Ankush Malaker <43288948+AnkushMalaker@users.noreply.github.com>
Date: Sat, 10 Jan 2026 12:28:16 +0530
Subject: [PATCH 4/5] Enhance transcription provider selection and update HTTPS
documentation
- Added a new function in `wizard.py` to prompt users for their preferred transcription provider, allowing options for Deepgram, Parakeet ASR, or none.
- Updated the service setup logic to automatically include ASR services if Parakeet is selected.
- Introduced a new documentation file on SSL certificates and HTTPS setup, detailing the importance of HTTPS for secure connections and microphone access.
- Removed outdated HTTPS setup documentation from `backends/advanced/Docs/HTTPS_SETUP.md` to streamline resources.
---
Docs/ssl-certificates.md | 73 ++++++++
backends/advanced/Docs/HTTPS_SETUP.md | 255 --------------------------
backends/advanced/init.py | 44 ++++-
extras/asr-services/init.py | 9 +
extras/speaker-recognition/init.py | 9 +
wizard.py | 57 +++++-
6 files changed, 176 insertions(+), 271 deletions(-)
create mode 100644 Docs/ssl-certificates.md
delete mode 100644 backends/advanced/Docs/HTTPS_SETUP.md
diff --git a/Docs/ssl-certificates.md b/Docs/ssl-certificates.md
new file mode 100644
index 00000000..1980c833
--- /dev/null
+++ b/Docs/ssl-certificates.md
@@ -0,0 +1,73 @@
+# SSL Certificates & HTTPS
+
+Chronicle uses automatic HTTPS setup for secure microphone access and remote connections.
+
+## Why HTTPS is Needed
+
+Modern browsers require HTTPS for:
+- **Microphone access** over network (not localhost)
+- **Secure WebSocket connections** (WSS)
+- **Remote access** via Tailscale/VPN
+- **Production deployments**
+
+## SSL Implementation
+
+### Advanced Backend โ Caddy
+
+The main backend uses **Caddy** for automatic HTTPS:
+
+**Configuration**: `backends/advanced/Caddyfile`
+**Activation**: Caddy starts when using `--profile https` or when wizard enables HTTPS
+**Certificate**: Self-signed for local/Tailscale IPs, automatic Let's Encrypt for domains
+
+**Ports**:
+- `443` - HTTPS (main access)
+- `80` - HTTP (redirects to HTTPS)
+
+**Access**: `https://localhost` or `https://your-tailscale-ip`
+
+### Speaker Recognition โ nginx
+
+The speaker recognition service uses **nginx** for HTTPS:
+
+**Configuration**: `extras/speaker-recognition/nginx.conf`
+**Certificate**: Self-signed via `ssl/generate-ssl.sh`
+
+**Ports**:
+- `8444` - HTTPS
+- `8081` - HTTP (redirects to HTTPS)
+
+**Access**: `https://localhost:8444`
+
+## Setup via Wizard
+
+When you run `./wizard.sh`, the setup wizard:
+1. Asks if you want to enable HTTPS
+2. Prompts for your Tailscale IP or domain
+3. Generates SSL certificates automatically
+4. Configures Caddy/nginx as needed
+5. Updates CORS settings for HTTPS origins
+
+**No manual setup required** - the wizard handles everything.
+
+## Browser Certificate Warnings
+
+Since we use self-signed certificates for local/Tailscale IPs, browsers will show security warnings:
+
+1. Click "Advanced"
+2. Click "Proceed to localhost (unsafe)" or similar
+3. Microphone access will now work
+
+For production with real domains, Caddy automatically obtains valid Let's Encrypt certificates.
+
+## Troubleshooting
+
+**HTTPS not working**:
+- Check Caddy/nginx containers are running: `docker compose ps`
+- Verify certificates exist: `ls backends/advanced/ssl/` or `ls extras/speaker-recognition/ssl/`
+- Check you're using `https://` not `http://`
+
+**Microphone not accessible**:
+- Ensure you're accessing via HTTPS (not HTTP)
+- Accept browser certificate warning
+- Verify you're not using `localhost` from remote device (use Tailscale IP instead)
diff --git a/backends/advanced/Docs/HTTPS_SETUP.md b/backends/advanced/Docs/HTTPS_SETUP.md
deleted file mode 100644
index 54852a20..00000000
--- a/backends/advanced/Docs/HTTPS_SETUP.md
+++ /dev/null
@@ -1,255 +0,0 @@
-# HTTPS Setup for Chronicle Advanced Backend
-
-This guide explains how to set up HTTPS/SSL access for Chronicle Advanced Backend, enabling secure microphone access and network connectivity.
-
-## Why HTTPS is Needed
-
-Modern browsers require HTTPS for:
-- **Microphone access** over network connections (not localhost)
-- **Secure WebSocket connections** (WSS)
-- **Tailscale/VPN access** with audio features
-- **Production deployments**
-
-## Quick Setup
-
-### 1. Initialize HTTPS with Your IP
-
-Run the initialization script with your Tailscale or network IP:
-
-```bash
-cd backends/advanced
-./init.sh 100.83.66.30 # Replace with your actual IP
-```
-
-This script will:
-- Generate SSL certificates for localhost and your IP
-- Create nginx configuration files
-- Update CORS settings for HTTPS origins
-
-### 2. Start with HTTPS Proxy
-
-```bash
-# HTTPS with nginx proxy (REQUIRED for network microphone access)
-docker compose up --build -d
-
-# HTTP only (no nginx, localhost microphone access only)
-docker compose up --build -d
-```
-
-**NOTE**: The nginx service now starts automatically with the standard docker compose command, providing immediate HTTPS access when SSL certificates are configured.
-
-### 3. Access the Services
-
-#### Chronicle Advanced Backend (Primary - ports 80/443)
-- **HTTPS:** https://localhost/ or https://your-ip/ (accept SSL certificate)
-- **HTTP:** http://localhost/ (redirects to HTTPS)
-- **Features:** Dashboard, Live Recording, Conversations, Memories
-
-#### Speaker Recognition Service (Secondary - ports 8081/8444)
-- **HTTPS:** https://localhost:8444/ or https://your-ip:8444/ (accept SSL certificate)
-- **HTTP:** http://localhost:8081/ (redirects to HTTPS)
-- **Features:** Speaker enrollment, audio analysis, live inference
-
-## Port Allocation
-
-### Advanced Backend (Primary Service)
-- **Port 80:** HTTP (redirects to HTTPS)
-- **Port 443:** HTTPS with nginx proxy
-- **Port 5173:** Direct Vite dev server (development only)
-- **Port 8000:** Direct backend API (development only)
-
-### Speaker Recognition (Secondary Service)
-- **Port 8081:** HTTP (redirects to HTTPS)
-- **Port 8444:** HTTPS with nginx proxy
-- **Port 5175:** Direct React dev server (internal)
-- **Port 8085:** Direct API service (internal)
-
-## Manual Setup
-
-### SSL Certificate Generation
-
-If you need to regenerate certificates:
-
-```bash
-cd ssl
-./generate-ssl.sh 100.83.66.30 # Your IP address
-```
-
-### Environment Configuration
-
-Update your `.env` file to include HTTPS origins:
-
-```bash
-CORS_ORIGINS=https://localhost,https://127.0.0.1,https://100.83.66.30
-```
-
-## Docker Compose Profiles
-
-### With HTTPS Configuration (Network Access)
-**Services started:**
-- โ
nginx (ports 443/80) - SSL termination and proxy
-- โ
webui (port 5173, internal) - Vite dev server
-- โ
chronicle-backend (port 8000, internal)
-- โ
mongo, qdrant (databases)
-
-**Access:** https://localhost/ or https://your-ip/
-**Microphone:** Works over network with HTTPS
-
-### Without HTTPS Configuration (Default - Localhost Only)
-**Services started:**
-- โ
nginx (ports 443/80) - but without SSL certificates
-- โ
webui (port 5173, direct access) - Vite dev server
-- โ
chronicle-backend (port 8000)
-- โ
mongo, qdrant (databases)
-
-**Access:** http://localhost:5173
-**Microphone:** Only works on localhost (browser security)
-
-## Nginx Configuration
-
-The setup uses a single nginx configuration:
-
-### Single Config (`nginx.conf.template`)
-- Proxies to `webui:5173` for the Vite dev server
-- Handles WebSocket connections for audio streaming
-- SSL termination with proper headers
-- Supports Vite HMR (Hot Module Replacement) over WSS
-- Always provides development experience with hot reload
-
-## WebSocket Endpoints
-
-All WebSocket endpoints are proxied through nginx with SSL:
-
-- **`wss://your-ip/ws_pcm`** - Primary audio streaming (Wyoming protocol + PCM)
-- **`wss://your-ip/ws_omi`** - OMI device audio streaming (Wyoming protocol + Opus)
-- **`wss://your-ip/ws`** - Legacy audio streaming (Opus packets)
-
-**Note:** When accessed through HTTPS proxy, all API calls use relative URLs automatically.
-
-## Browser Certificate Trust
-
-Since we use self-signed certificates, browsers will show security warnings:
-
-### Chrome/Edge
-1. Visit https://localhost/
-2. Click "Advanced" โ "Proceed to localhost (unsafe)"
-3. Or add certificate to trusted store
-
-### Firefox
-1. Visit https://localhost/
-2. Click "Advanced" โ "Accept the Risk and Continue"
-
-### Safari
-1. Visit https://localhost/
-2. Click "Show Details" โ "visit this website"
-
-## Troubleshooting
-
-### Certificate Issues
-
-**Problem:** "SSL certificate problem: self signed certificate"
-**Solution:**
-```bash
-# Regenerate certificates
-cd ssl
-./generate-ssl.sh your-ip
-docker compose restart nginx
-```
-
-### WebSocket Connection Fails
-
-**Problem:** WSS connection refused
-**Solution:**
-1. Check nginx is running: `docker compose ps nginx`
-2. Verify certificate: `curl -k https://localhost/health`
-3. Check logs: `docker compose logs nginx`
-
-### CORS Errors
-
-**Problem:** "Cross-Origin Request Blocked"
-**Solution:**
-1. Update CORS_ORIGINS in `.env` to include your HTTPS origin
-2. Restart backend: `docker compose restart chronicle-backend`
-
-### Microphone Access Denied
-
-**Problem:** Browser blocks microphone access
-**Solution:**
-1. Ensure you're using HTTPS (not HTTP)
-2. Accept SSL certificate warnings
-3. Grant microphone permissions when prompted
-
-## Port Reference
-
-### HTTPS Setup (Production)
-- **443** - HTTPS (nginx โ webui:80)
-- **80** - HTTP redirect to HTTPS
-
-### HTTPS Setup (Development)
-- **8443** - HTTPS (nginx-dev โ webui-dev:5173)
-- **8080** - HTTP redirect to HTTPS
-
-### Standard Setup
-- **3000** - HTTP (webui production)
-- **5173** - HTTP (webui development)
-- **8000** - HTTP (chronicle-backend)
-
-## Live Recording Feature
-
-The Live Recording feature automatically adapts to your connection:
-
-- **HTTP + localhost:** Uses `ws://localhost:8000/ws_pcm`
-- **HTTPS:** Uses `wss://your-domain/ws_pcm`
-- **Microphone access:** Requires HTTPS for network connections
-
-Access at:
-- Local: https://localhost/live-record
-- Network: https://your-ip/live-record
-
-## Security Considerations
-
-### Self-Signed Certificates
-- Only for development and local network use
-- Use proper CA certificates for production
-- Consider Let's Encrypt for public deployments
-
-### Network Security
-- HTTPS encrypts all traffic including WebSocket data
-- Nginx handles SSL termination
-- Backend services remain on internal Docker network
-
-### Browser Security
-- Modern browsers block microphone access over HTTP (except localhost)
-- WSS required for secure WebSocket connections over network
-- CORS properly configured for cross-origin requests
-
-## Production Deployment
-
-For production deployments:
-
-1. **Use proper SSL certificates** (Let's Encrypt, commercial CA)
-2. **Update nginx configuration** with your domain name
-3. **Configure DNS** to point to your server
-4. **Use production docker compose profile**:
- ```bash
- docker compose up -d
- ```
-
-## Integration with Other Services
-
-### Speaker Recognition
-If using the speaker recognition service alongside Chronicle:
-
-```bash
-# Use different HTTPS ports to avoid conflicts
-# Speaker Recognition: 443/80
-# Chronicle: 8443/8080
-docker compose up -d
-```
-
-### Tailscale Integration
-The setup is optimized for Tailscale usage:
-
-- SSL certificates include your Tailscale IP
-- CORS automatically supports 100.x.x.x IP range
-- WebSocket connections work over Tailscale network
\ No newline at end of file
diff --git a/backends/advanced/init.py b/backends/advanced/init.py
index fe04fd15..a802124e 100644
--- a/backends/advanced/init.py
+++ b/backends/advanced/init.py
@@ -7,6 +7,7 @@
import argparse
import getpass
import os
+import platform
import secrets
import shutil
import subprocess
@@ -15,7 +16,6 @@
from pathlib import Path
from typing import Any, Dict
-import yaml
from dotenv import get_key, set_key
from rich.console import Console
from rich.panel import Panel
@@ -157,13 +157,36 @@ def setup_transcription(self):
self.console.print("[blue][INFO][/blue] API keys are stored in .env")
self.console.print()
- choices = {
- "1": "Deepgram (recommended - high quality, cloud-based)",
- "2": "Offline (Parakeet ASR - requires GPU, runs locally)",
- "3": "None (skip transcription setup)"
- }
+ # Check if transcription provider was provided via command line
+ if hasattr(self.args, 'transcription_provider') and self.args.transcription_provider:
+ provider = self.args.transcription_provider
+ self.console.print(f"[green][SUCCESS][/green] Transcription provider configured via wizard: {provider}")
+
+ # Map provider to choice
+ if provider == "deepgram":
+ choice = "1"
+ elif provider == "parakeet":
+ choice = "2"
+ elif provider == "none":
+ choice = "3"
+ else:
+ choice = "1" # Default to Deepgram
+ else:
+ # Interactive prompt
+ is_macos = platform.system() == 'Darwin'
+
+ if is_macos:
+ parakeet_desc = "Offline (Parakeet ASR - CPU-based, runs locally)"
+ else:
+ parakeet_desc = "Offline (Parakeet ASR - GPU recommended, runs locally)"
+
+ choices = {
+ "1": "Deepgram (recommended - high quality, cloud-based)",
+ "2": parakeet_desc,
+ "3": "None (skip transcription setup)"
+ }
- choice = self.prompt_choice("Choose your transcription provider:", choices, "1")
+ choice = self.prompt_choice("Choose your transcription provider:", choices, "1")
if choice == "1":
self.console.print("[blue][INFO][/blue] Deepgram selected")
@@ -690,10 +713,13 @@ def run(self):
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(description="Chronicle Advanced Backend Setup")
- parser.add_argument("--speaker-service-url",
+ parser.add_argument("--speaker-service-url",
help="Speaker Recognition service URL (default: prompt user)")
- parser.add_argument("--parakeet-asr-url",
+ parser.add_argument("--parakeet-asr-url",
help="Parakeet ASR service URL (default: prompt user)")
+ parser.add_argument("--transcription-provider",
+ choices=["deepgram", "parakeet", "none"],
+ help="Transcription provider (default: prompt user)")
parser.add_argument("--enable-https", action="store_true",
help="Enable HTTPS configuration (default: prompt user)")
parser.add_argument("--server-ip",
diff --git a/extras/asr-services/init.py b/extras/asr-services/init.py
index 911c527b..d65043cf 100755
--- a/extras/asr-services/init.py
+++ b/extras/asr-services/init.py
@@ -6,6 +6,7 @@
import argparse
import os
+import platform
import shutil
import subprocess
import sys
@@ -131,10 +132,18 @@ def setup_cuda_version(self):
"""Configure PyTorch CUDA version"""
self.print_section("PyTorch CUDA Version Configuration")
+ # Detect macOS (Darwin) and auto-default to CPU
+ is_macos = platform.system() == 'Darwin'
+
# Check if provided via command line
if hasattr(self.args, 'pytorch_cuda_version') and self.args.pytorch_cuda_version:
cuda_version = self.args.pytorch_cuda_version
self.console.print(f"[green][SUCCESS][/green] PyTorch CUDA version configured from command line: {cuda_version}")
+ elif is_macos:
+ # Auto-default to CPU on macOS
+ cuda_version = "cpu"
+ self.console.print("[blue][INFO][/blue] Detected macOS - GPU acceleration not available (Apple Silicon/Intel)")
+ self.console.print("[green][SUCCESS][/green] Using CPU-only PyTorch build")
else:
# Detect system CUDA version and suggest as default
detected_cuda = self.detect_cuda_version()
diff --git a/extras/speaker-recognition/init.py b/extras/speaker-recognition/init.py
index 8267e35b..b69e04ee 100755
--- a/extras/speaker-recognition/init.py
+++ b/extras/speaker-recognition/init.py
@@ -7,6 +7,7 @@
import argparse
import getpass
import os
+import platform
import shutil
import subprocess
import sys
@@ -188,10 +189,18 @@ def setup_compute_mode(self):
"""Configure compute mode (CPU/GPU)"""
self.print_section("Compute Mode Configuration")
+ # Detect macOS (Darwin) and auto-default to CPU
+ is_macos = platform.system() == 'Darwin'
+
# Check if provided via command line
if hasattr(self.args, 'compute_mode') and self.args.compute_mode:
compute_mode = self.args.compute_mode
self.console.print(f"[green][SUCCESS][/green] Compute mode configured from command line: {compute_mode}")
+ elif is_macos:
+ # Auto-default to CPU on macOS
+ compute_mode = "cpu"
+ self.console.print("[blue][INFO][/blue] Detected macOS - GPU acceleration not available (Apple Silicon/Intel)")
+ self.console.print("[green][SUCCESS][/green] Using CPU mode")
else:
choices = {
"1": "CPU-only (works everywhere)",
diff --git a/wizard.py b/wizard.py
index a2e2b2f7..31a436cd 100755
--- a/wizard.py
+++ b/wizard.py
@@ -14,7 +14,7 @@
from dotenv import get_key
from rich import print as rprint
from rich.console import Console
-from rich.prompt import Confirm
+from rich.prompt import Confirm, Prompt
console = Console()
@@ -153,18 +153,22 @@ def cleanup_unselected_services(selected_services):
console.print(f"๐งน [dim]Backed up {service_name} configuration to {backup_file.name} (service not selected)[/dim]")
def run_service_setup(service_name, selected_services, https_enabled=False, server_ip=None,
- obsidian_enabled=False, neo4j_password=None):
+ obsidian_enabled=False, neo4j_password=None, transcription_provider='deepgram'):
"""Execute individual service setup script"""
if service_name == 'advanced':
service = SERVICES['backend'][service_name]
-
+
# For advanced backend, pass URLs of other selected services and HTTPS config
cmd = service['cmd'].copy()
if 'speaker-recognition' in selected_services:
cmd.extend(['--speaker-service-url', 'http://speaker-service:8085'])
if 'asr-services' in selected_services:
cmd.extend(['--parakeet-asr-url', 'http://host.docker.internal:8767'])
-
+
+ # Pass transcription provider choice from wizard
+ if transcription_provider:
+ cmd.extend(['--transcription-provider', transcription_provider])
+
# Add HTTPS configuration
if https_enabled and server_ip:
cmd.extend(['--enable-https', '--server-ip', server_ip])
@@ -331,6 +335,37 @@ def setup_config_file():
else:
console.print("โน๏ธ [blue]config/config.yml already exists, keeping existing configuration[/blue]")
+def select_transcription_provider():
+ """Ask user which transcription provider they want"""
+ console.print("\n๐ค [bold cyan]Transcription Provider[/bold cyan]")
+ console.print("Choose your speech-to-text provider:")
+ console.print()
+
+ choices = {
+ "1": "Deepgram (cloud-based, high quality, requires API key)",
+ "2": "Parakeet ASR (offline, runs locally, requires GPU)",
+ "3": "None (skip transcription setup)"
+ }
+
+ for key, desc in choices.items():
+ console.print(f" {key}) {desc}")
+ console.print()
+
+ while True:
+ try:
+ choice = Prompt.ask("Enter choice", default="1")
+ if choice in choices:
+ if choice == "1":
+ return "deepgram"
+ elif choice == "2":
+ return "parakeet"
+ elif choice == "3":
+ return "none"
+ console.print(f"[red]Invalid choice. Please select from {list(choices.keys())}[/red]")
+ except EOFError:
+ console.print("Using default: Deepgram")
+ return "deepgram"
+
def main():
"""Main orchestration logic"""
console.print("๐ [bold green]Welcome to Chronicle![/bold green]\n")
@@ -344,9 +379,17 @@ def main():
# Show what's available
show_service_status()
+ # Ask about transcription provider FIRST (determines which services are needed)
+ transcription_provider = select_transcription_provider()
+
# Service Selection
selected_services = select_services()
-
+
+ # Auto-add asr-services if Parakeet was chosen
+ if transcription_provider == "parakeet" and 'asr-services' not in selected_services:
+ console.print("[blue][INFO][/blue] Auto-adding ASR services for Parakeet transcription")
+ selected_services.append('asr-services')
+
if not selected_services:
console.print("\n[yellow]No services selected. Exiting.[/yellow]")
return
@@ -442,10 +485,10 @@ def main():
success_count = 0
failed_services = []
-
+
for service in selected_services:
if run_service_setup(service, selected_services, https_enabled, server_ip,
- obsidian_enabled, neo4j_password):
+ obsidian_enabled, neo4j_password, transcription_provider):
success_count += 1
else:
failed_services.append(service)
From 2f7e46eabd9868a30b420d9e04bb2bdf369afc1b Mon Sep 17 00:00:00 2001
From: Ankush Malaker <43288948+AnkushMalaker@users.noreply.github.com>
Date: Sat, 10 Jan 2026 12:31:58 +0530
Subject: [PATCH 5/5] Remove HTTPS setup scripts and related configurations
- Deleted `init-https.sh`, `setup-https.sh`, and `nginx.conf.template` as part of the transition to a new HTTPS setup process.
- Updated `README.md` to reflect the new automatic HTTPS configuration via the setup wizard.
- Adjusted `init.py` to remove references to the deleted HTTPS scripts and ensure proper handling of Caddyfile generation for SSL.
- Streamlined documentation to clarify the new approach for HTTPS setup and configuration management.
---
backends/advanced/Docs/UI.md | 2 +-
backends/advanced/README.md | 27 +--
backends/advanced/init-https.sh | 96 --------
backends/advanced/init.py | 27 +--
backends/advanced/nginx.conf.template | 221 -----------------
backends/advanced/setup-https.sh | 336 --------------------------
6 files changed, 9 insertions(+), 700 deletions(-)
delete mode 100755 backends/advanced/init-https.sh
delete mode 100644 backends/advanced/nginx.conf.template
delete mode 100755 backends/advanced/setup-https.sh
diff --git a/backends/advanced/Docs/UI.md b/backends/advanced/Docs/UI.md
index 6447a2a0..02bdf943 100644
--- a/backends/advanced/Docs/UI.md
+++ b/backends/advanced/Docs/UI.md
@@ -10,7 +10,7 @@ The Chronicle web dashboard provides a comprehensive interface for managing conv
### Dashboard URL
- **HTTP**: `http://localhost:5173` (development) or `http://localhost:3000` (production)
-- **HTTPS**: `https://localhost/` (with HTTPS configuration via `init-https.sh`)
+- **HTTPS**: `https://localhost/` (automatic via setup wizard - see [Docs/ssl-certificates.md](../../../Docs/ssl-certificates.md))
- **Live Recording**: Available at `/live-record` page for real-time audio streaming
- **Network Access**: Configure `BACKEND_PUBLIC_URL` for remote device access via Tailscale/LAN
diff --git a/backends/advanced/README.md b/backends/advanced/README.md
index d493241c..0f5a4490 100644
--- a/backends/advanced/README.md
+++ b/backends/advanced/README.md
@@ -34,15 +34,10 @@ Modern React-based web dashboard located in `./webui/` with:
- **Transcription Provider**: Choose between Deepgram, Mistral, or Offline (Parakeet)
- **LLM Provider**: Choose between OpenAI (recommended) or Ollama for memory extraction
- **Memory Provider**: Choose between Friend-Lite Native or OpenMemory MCP
+- **HTTPS Configuration**: Optional SSL setup for microphone access (uses Caddy)
- **Optional Services**: Speaker Recognition, network configuration
- **API Keys**: Prompts for all required keys with helpful links
-**HTTPS Setup (Optional):**
-```bash
-# For microphone access and secure connections
-./setup-https.sh your-tailscale-ip
-```
-
#### 2. Start Services
**HTTP Mode (Default - No SSL required):**
@@ -55,25 +50,13 @@ docker compose up --build -d
**HTTPS Mode (For network access and microphone features):**
```bash
-# Start with nginx SSL proxy - requires SSL setup first (see below)
-docker compose up --build -d
+# Start with HTTPS (requires Caddy configuration from wizard)
+docker compose --profile https up --build -d
```
- **Web Dashboard**: https://localhost/ or https://your-ip/
- **Backend API**: https://localhost/api/ or https://your-ip/api/
-#### 3. HTTPS Setup (Optional - For Network Access & Microphone Features)
-
-For network access and microphone features, HTTPS can be configured during initialization or separately:
-
-```bash
-# If not done during init.sh, run HTTPS setup
-./init-https.sh 100.83.66.30 # Replace with your IP
-
-# Start with HTTPS proxy
-docker compose up --build -d
-```
-
-#### Access URLs
+#### 3. Access URLs
**Friend-Lite Advanced Backend (Primary - ports 80/443):**
- **HTTPS Dashboard**: https://localhost/ or https://your-ip/
@@ -91,7 +74,7 @@ docker compose up --build -d
- ๐ **Network Access** from other devices via Tailscale/LAN
- ๐ **Automatic protocol detection** - Frontend auto-configures for HTTP/HTTPS
-See [Docs/HTTPS_SETUP.md](Docs/HTTPS_SETUP.md) for detailed configuration.
+See [Docs/ssl-certificates.md](../../Docs/ssl-certificates.md) for how SSL is configured.
## Testing
diff --git a/backends/advanced/init-https.sh b/backends/advanced/init-https.sh
deleted file mode 100755
index d1c1b5af..00000000
--- a/backends/advanced/init-https.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash
-set -e
-
-# Initialize Chronicle Advanced Backend with HTTPS proxy
-# Usage: ./init.sh
-
-if [ $# -ne 1 ]; then
- echo "Usage: $0 "
- echo "Example: $0 100.83.66.30"
- echo ""
- echo "This script will:"
- echo " 1. Generate SSL certificates for localhost and your Tailscale IP"
- echo " 2. Create nginx.conf from template"
- echo " 3. Set up HTTPS proxy for the backend"
- exit 1
-fi
-
-TAILSCALE_IP="$1"
-
-# Validate IP format (basic check)
-if ! echo "$TAILSCALE_IP" | grep -E '^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$' > /dev/null; then
- echo "Error: Invalid IP format. Expected format: xxx.xxx.xxx.xxx"
- exit 1
-fi
-
-echo "๐ Initializing Chronicle Advanced Backend with Tailscale IP: $TAILSCALE_IP"
-echo ""
-
-# Check if nginx.conf.template exists
-if [ ! -f "nginx.conf.template" ]; then
- echo "โ Error: nginx.conf.template not found"
- echo " Make sure you're running this from the backends/advanced directory"
- exit 1
-fi
-
-# Generate SSL certificates
-echo "๐ Step 1: Generating SSL certificates..."
-if [ -f "ssl/generate-ssl.sh" ]; then
- ./ssl/generate-ssl.sh "$TAILSCALE_IP"
- echo "โ
SSL certificates generated"
-else
- echo "โ Error: ssl/generate-ssl.sh not found"
- exit 1
-fi
-
-echo ""
-
-# Create nginx.conf from template
-echo "๐ Step 2: Creating nginx configuration..."
-sed "s/TAILSCALE_IP/$TAILSCALE_IP/g" nginx.conf.template > nginx.conf
-echo "โ
nginx.conf created with IP: $TAILSCALE_IP"
-
-echo ""
-
-# Update .env file with HTTPS CORS origins
-echo "๐ Step 3: Updating CORS origins..."
-if [ -f ".env" ]; then
- # Update existing .env file
- if grep -q "CORS_ORIGINS" .env; then
- # Update existing CORS_ORIGINS line
- sed -i "s/CORS_ORIGINS=.*/CORS_ORIGINS=https:\/\/localhost,https:\/\/localhost:443,https:\/\/127.0.0.1,https:\/\/$TAILSCALE_IP/" .env
- else
- # Add CORS_ORIGINS line
- echo "CORS_ORIGINS=https://localhost,https://localhost:443,https://127.0.0.1,https://$TAILSCALE_IP" >> .env
- fi
- echo "โ
Updated CORS origins in .env file"
-else
- echo "โ ๏ธ No .env file found. You may need to:"
- echo " 1. Copy .env.template to .env"
- echo " 2. Add: CORS_ORIGINS=https://localhost,https://localhost:443,https://127.0.0.1,https://$TAILSCALE_IP"
-fi
-
-echo ""
-echo "๐ Step 4: Memory configuration now lives in config.yml (memory section)"
-
-echo ""
-echo "๐ Initialization complete!"
-echo ""
-echo "Next steps:"
-echo " 1. Start the services:"
-echo " docker compose up --build -d"
-echo ""
-echo " 2. Access the dashboard:"
-echo " ๐ https://localhost/ (accept SSL certificate)"
-echo " ๐ https://$TAILSCALE_IP/"
-echo ""
-echo " 3. Test live recording:"
-echo " ๐ฑ Navigate to Live Record page"
-echo " ๐ค Microphone access will work over HTTPS"
-echo ""
-echo "๐ง Services included:"
-echo " - Chronicle Backend: Internal (proxied through nginx)"
-echo " - Web Dashboard: https://localhost/ or https://$TAILSCALE_IP/"
-echo " - WebSocket Audio: wss://localhost/ws_pcm or wss://$TAILSCALE_IP/ws_pcm"
-echo ""
-echo "๐ For more details, see: Docs/HTTPS_SETUP.md"
diff --git a/backends/advanced/init.py b/backends/advanced/init.py
index a802124e..dddbfdcb 100644
--- a/backends/advanced/init.py
+++ b/backends/advanced/init.py
@@ -469,30 +469,7 @@ def setup_https(self):
except subprocess.CalledProcessError:
self.console.print("[yellow][WARNING][/yellow] SSL certificate generation failed")
else:
- self.console.print(f"[yellow][WARNING][/yellow] SSL script not found at {ssl_script}")
-
- # Generate nginx.conf from template
- self.console.print("[blue][INFO][/blue] Creating nginx configuration...")
- nginx_template = script_dir / "nginx.conf.template"
- if nginx_template.exists():
- try:
- with open(nginx_template, 'r') as f:
- nginx_content = f.read()
-
- # Replace TAILSCALE_IP with server_ip
- nginx_content = nginx_content.replace('TAILSCALE_IP', server_ip)
-
- with open('nginx.conf', 'w') as f:
- f.write(nginx_content)
-
- self.console.print(f"[green][SUCCESS][/green] nginx.conf created for: {server_ip}")
- self.config["HTTPS_ENABLED"] = "true"
- self.config["SERVER_IP"] = server_ip
-
- except Exception as e:
- self.console.print(f"[yellow][WARNING][/yellow] nginx.conf generation failed: {e}")
- else:
- self.console.print("[yellow][WARNING][/yellow] nginx.conf.template not found")
+ self.console.print(f"[yellow][WARNING][/warning] SSL script not found at {ssl_script}")
# Generate Caddyfile from template
self.console.print("[blue][INFO][/blue] Creating Caddyfile configuration...")
@@ -519,6 +496,8 @@ def setup_https(self):
f.write(caddyfile_content)
self.console.print(f"[green][SUCCESS][/green] Caddyfile created for: {server_ip}")
+ self.config["HTTPS_ENABLED"] = "true"
+ self.config["SERVER_IP"] = server_ip
except Exception as e:
self.console.print(f"[red]โ ERROR: Caddyfile generation failed: {e}[/red]")
diff --git a/backends/advanced/nginx.conf.template b/backends/advanced/nginx.conf.template
deleted file mode 100644
index e5a3e025..00000000
--- a/backends/advanced/nginx.conf.template
+++ /dev/null
@@ -1,221 +0,0 @@
-worker_processes 1;
-
-events {
- worker_connections 1024;
-}
-
-http {
- # Basic settings
- sendfile on;
- tcp_nopush on;
- tcp_nodelay on;
- keepalive_timeout 65;
- types_hash_max_size 2048;
- client_max_body_size 100M;
-
- # MIME types
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- # Logging
- access_log /var/log/nginx/access.log;
- error_log /var/log/nginx/error.log;
-
- # Gzip compression
- gzip on;
- gzip_vary on;
- gzip_min_length 10240;
- gzip_proxied expired no-cache no-store private auth;
- gzip_types
- text/plain
- text/css
- text/xml
- text/javascript
- application/x-javascript
- application/xml+rss
- application/javascript
- application/json;
-
- # WebSocket proxy settings
- map $http_upgrade $connection_upgrade {
- default upgrade;
- '' close;
- }
-
- # Upstream services
- upstream chronicle_backend {
- server chronicle-backend:8000;
- }
-
- upstream friend_webui {
- server webui:5173;
- }
-
- # HTTPS Server
- server {
- listen 443 ssl http2;
- server_name localhost TAILSCALE_IP;
-
- # SSL Configuration
- ssl_certificate /etc/nginx/ssl/server.crt;
- ssl_certificate_key /etc/nginx/ssl/server.key;
- ssl_protocols TLSv1.2 TLSv1.3;
- ssl_ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
- ssl_prefer_server_ciphers off;
-
- # Security headers
- add_header X-Frame-Options DENY;
- add_header X-Content-Type-Options nosniff;
- add_header X-XSS-Protection "1; mode=block";
-
- # Backend API endpoints
- location /api/ {
- proxy_pass http://chronicle_backend/api/;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_redirect off;
- }
-
- # Authentication endpoints
- location /auth/ {
- proxy_pass http://chronicle_backend/auth/;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_redirect off;
- }
-
- # Users endpoints
- location /users/ {
- proxy_pass http://chronicle_backend/users/;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_redirect off;
- }
-
- # WebSocket endpoints for audio streaming
- location /ws_pcm {
- proxy_pass http://chronicle_backend/ws_pcm;
- proxy_http_version 1.1;
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection $connection_upgrade;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_cache_bypass $http_upgrade;
- proxy_read_timeout 86400;
- proxy_send_timeout 86400;
- proxy_connect_timeout 60s;
- proxy_buffering off;
- proxy_request_buffering off;
- }
-
- location /ws_omi {
- proxy_pass http://chronicle_backend/ws_omi;
- proxy_http_version 1.1;
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection $connection_upgrade;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_cache_bypass $http_upgrade;
- proxy_read_timeout 86400;
- proxy_send_timeout 86400;
- proxy_connect_timeout 60s;
- proxy_buffering off;
- proxy_request_buffering off;
- }
-
- # Legacy WebSocket endpoint
- location /ws {
- proxy_pass http://chronicle_backend/ws;
- proxy_http_version 1.1;
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection $connection_upgrade;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_cache_bypass $http_upgrade;
- proxy_read_timeout 86400;
- proxy_send_timeout 86400;
- proxy_connect_timeout 60s;
- proxy_buffering off;
- proxy_request_buffering off;
- }
-
- # Health check endpoints
- location /health {
- proxy_pass http://chronicle_backend/health;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- }
-
- # Readiness check endpoint
- location /readiness {
- proxy_pass http://chronicle_backend/readiness;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- }
-
- # Audio file serving
- location /audio/ {
- proxy_pass http://chronicle_backend/audio/;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_redirect off;
-
- # Add headers for audio file serving
- proxy_set_header Accept-Ranges bytes;
- proxy_cache_bypass $http_range;
- }
-
- # Vite HMR WebSocket (specific path)
- location /@vite/client {
- proxy_pass http://friend_webui/@vite/client;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_http_version 1.1;
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection $connection_upgrade;
- proxy_cache_bypass $http_upgrade;
- }
-
- # Frontend Vite dev server (with HMR support)
- location / {
- proxy_pass http://friend_webui/;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_redirect off;
-
- # Handle WebSocket upgrade for Vite HMR
- proxy_http_version 1.1;
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection $connection_upgrade;
- }
- }
-
- # HTTP redirect to HTTPS
- server {
- listen 80;
- server_name localhost TAILSCALE_IP;
- return 301 https://$host$request_uri;
- }
-}
\ No newline at end of file
diff --git a/backends/advanced/setup-https.sh b/backends/advanced/setup-https.sh
deleted file mode 100755
index b565cddc..00000000
--- a/backends/advanced/setup-https.sh
+++ /dev/null
@@ -1,336 +0,0 @@
-#!/bin/bash
-set -e
-
-# Chronicle Advanced Backend Initialization Script
-# Comprehensive setup for all configuration files and optional services
-
-# Colors for output
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-BLUE='\033[0;34m'
-CYAN='\033[0;36m'
-NC='\033[0m' # No Color
-
-print_info() {
- echo -e "${BLUE}[INFO]${NC} $1"
-}
-
-print_success() {
- echo -e "${GREEN}[SUCCESS]${NC} $1"
-}
-
-print_warning() {
- echo -e "${YELLOW}[WARNING]${NC} $1"
-}
-
-print_error() {
- echo -e "${RED}[ERROR]${NC} $1"
-}
-
-print_header() {
- echo ""
- echo -e "${CYAN}===============================================${NC}"
- echo -e "${CYAN}$1${NC}"
- echo -e "${CYAN}===============================================${NC}"
- echo ""
-}
-
-# Reusable backup helper function
-backup_with_timestamp() {
- local filepath="$1"
-
- # Verify the file exists
- if [ ! -f "$filepath" ]; then
- print_error "Cannot backup '$filepath': file does not exist"
- return 1
- fi
-
- # Generate timestamp (POSIX-safe fallback if needed)
- local timestamp
- if command -v date >/dev/null 2>&1; then
- timestamp=$(date +%Y%m%d_%H%M%S 2>/dev/null) || timestamp=$(date +%Y%m%d_%H%M%S)
- else
- # POSIX fallback - use current time in seconds since epoch
- timestamp="$(date +%s)"
- fi
-
- local backup_path="${filepath}.${timestamp}.backup"
-
- # Create the backup
- if cp "$filepath" "$backup_path"; then
- echo "$backup_path"
- return 0
- else
- print_error "Failed to create backup of '$filepath'"
- return 1
- fi
-}
-
-# Check if we're in the right directory
-if [ ! -f "pyproject.toml" ] || [ ! -d "src" ]; then
- print_error "Please run this script from the backends/advanced directory"
- exit 1
-fi
-
-print_header "Chronicle Advanced Backend Initialization"
-echo "This script will help you set up the Chronicle backend with all necessary configurations."
-echo ""
-
-# Function to prompt yes/no
-prompt_yes_no() {
- local prompt="$1"
- local default="$2"
- local response
-
- if [ "$default" = "y" ]; then
- prompt="$prompt [Y/n]: "
- else
- prompt="$prompt [y/N]: "
- fi
-
- read -p "$prompt" response
- response=${response:-$default}
-
- if [[ "$response" =~ ^[Yy]$ ]]; then
- return 0
- else
- return 1
- fi
-}
-
-# Step 1: Handle .env file
-print_header "Step 1: Environment Configuration"
-if [ -f ".env" ]; then
- print_info ".env file already exists"
- if prompt_yes_no "Do you want to update it from template?" "n"; then
- backup_path=$(backup_with_timestamp ".env")
- if [ $? -eq 0 ]; then
- print_info "Backed up existing .env to $backup_path"
- cp .env.template .env
- print_success ".env created from template"
- print_warning "Please edit .env to add your API keys and configuration"
- else
- print_error "Failed to backup .env file, aborting update"
- fi
- fi
-else
- if [ -f ".env.template" ]; then
- cp .env.template .env
- print_success ".env file created from template"
- print_warning "IMPORTANT: Edit .env file to add your API keys:"
- echo " - DEEPGRAM_API_KEY (for speech-to-text)"
- echo " - OPENAI_API_KEY (for memory extraction)"
- echo " - ADMIN_EMAIL and ADMIN_PASSWORD"
- echo ""
- if prompt_yes_no "Would you like to edit .env now?" "y"; then
- ${EDITOR:-nano} .env
- fi
- else
- print_error ".env.template not found!"
- exit 1
- fi
-fi
-
-# Step 2: Memory configuration
-print_header "Step 2: Memory Configuration"
-print_info "Memory settings are managed in config.yml (memory section)."
-
-# Step 3: Diarization configuration
-print_header "Step 3: Diarization Configuration"
-if [ -f "diarization_config.json" ]; then
- print_info "diarization_config.json already exists"
- if prompt_yes_no "Do you want to reset it from template?" "n"; then
- backup_path=$(backup_with_timestamp "diarization_config.json")
- if [ $? -eq 0 ]; then
- print_info "Backed up existing diarization_config.json to $backup_path"
- cp diarization_config.json.template diarization_config.json
- print_success "diarization_config.json reset from template"
- else
- print_error "Failed to backup diarization_config.json file, aborting reset"
- fi
- fi
-else
- if [ -f "diarization_config.json.template" ]; then
- cp diarization_config.json.template diarization_config.json
- print_success "diarization_config.json created from template"
- else
- print_error "diarization_config.json.template not found!"
- exit 1
- fi
-fi
-
-# Step 4: HTTPS Setup (optional)
-print_header "Step 4: HTTPS Configuration (Optional)"
-echo "HTTPS is required for:"
-echo " - Microphone access from browsers"
-echo " - Remote access via network/Tailscale"
-echo " - Secure WebSocket connections"
-echo ""
-
-if prompt_yes_no "Do you want to set up HTTPS?" "n"; then
- if [ -f "init-https.sh" ]; then
- echo ""
- print_info "Please enter your Tailscale IP or network IP"
- print_info "Example: 100.83.66.30"
- read -p "IP Address: " TAILSCALE_IP
-
- if [ -n "$TAILSCALE_IP" ]; then
- ./init-https.sh "$TAILSCALE_IP"
- HTTPS_ENABLED=true
- else
- print_warning "Skipping HTTPS setup - no IP provided"
- HTTPS_ENABLED=false
- fi
- else
- print_warning "init-https.sh not found, skipping HTTPS setup"
- HTTPS_ENABLED=false
- fi
-else
- print_info "Skipping HTTPS setup"
- HTTPS_ENABLED=false
-fi
-
-# Step 5: Optional Services
-print_header "Step 5: Optional Services (extras/)"
-
-echo "Configure additional services from extras/:"
-echo ""
-
-# Helper function to update or add environment variable in .env file
-update_env_var() {
- local key=$1
- local value=$2
-
- # Use Python to safely update the .env file
- python3 -c "
-import sys
-import re
-
-key = '$key'
-value = '$value'
-env_file = '.env'
-
-# Read existing .env file
-try:
- with open(env_file, 'r') as f:
- lines = f.readlines()
-except FileNotFoundError:
- lines = []
-
-# Check if key exists (uncommented)
-updated = False
-for i, line in enumerate(lines):
- # Skip comments
- if line.strip().startswith('#'):
- continue
- # Check for existing key
- if re.match(f'^\\s*{re.escape(key)}=', line):
- lines[i] = f'{key}={value}\\n'
- updated = True
- break
-
-# If not found, append to end
-if not updated:
- if lines and not lines[-1].endswith('\\n'):
- lines.append('\\n')
- lines.append(f'{key}={value}\\n')
-
-# Write back to file
-with open(env_file, 'w') as f:
- f.writelines(lines)
-"
-}
-
-# OpenMemory MCP (Memory Provider)
-if prompt_yes_no "Use OpenMemory MCP for memory management? (cross-client compatible)" "n"; then
- update_env_var "MEMORY_PROVIDER" "openmemory_mcp"
- print_success "Configured for OpenMemory MCP"
- OPENMEMORY_ENABLED=true
-else
- OPENMEMORY_ENABLED=false
-fi
-
-# Parakeet ASR (Offline Transcription)
-if prompt_yes_no "Use Parakeet for offline transcription? (requires GPU)" "n"; then
- update_env_var "PARAKEET_ASR_URL" "http://host.docker.internal:8767"
- print_success "Configured for Parakeet ASR"
- PARAKEET_ENABLED=true
-else
- PARAKEET_ENABLED=false
-fi
-
-# Speaker Recognition
-if prompt_yes_no "Enable Speaker Recognition service?" "n"; then
- update_env_var "SPEAKER_SERVICE_URL" "http://host.docker.internal:8001"
- print_success "Configured for Speaker Recognition"
- SPEAKER_ENABLED=true
-else
- SPEAKER_ENABLED=false
-fi
-
-# Step 6: Summary and Next Steps
-print_header "Setup Complete!"
-
-echo "Configuration Summary:"
-echo "----------------------"
-echo "โ
Environment file (.env) configured"
-echo "โ
Memory configuration (config.yml) ready"
-echo "โ
Diarization configuration (diarization_config.json) ready"
-
-if [ "$HTTPS_ENABLED" = true ]; then
- echo "โ
HTTPS configured with SSL certificates"
-fi
-
-echo ""
-echo "Next Steps:"
-echo "-----------"
-
-if [ "$HTTPS_ENABLED" = true ]; then
- echo "1. Start the services with HTTPS:"
- echo " ${CYAN}docker compose up --build -d${NC}"
- echo ""
- echo "2. Access the dashboard:"
- echo " ๐ https://localhost/"
- echo " ๐ https://$TAILSCALE_IP/"
-else
- echo "1. Start the services:"
- echo " ${CYAN}docker compose up --build -d${NC}"
- echo ""
- echo "2. Access the dashboard:"
- echo " ๐ http://localhost:5173"
-fi
-
-echo ""
-echo "3. Check service health:"
-echo " ${CYAN}curl http://localhost:8000/health${NC}"
-
-echo ""
-if [ "$OPENMEMORY_ENABLED" = true ] || [ "$PARAKEET_ENABLED" = true ] || [ "$SPEAKER_ENABLED" = true ]; then
- echo "Start Optional Services:"
- echo "------------------------"
-
- if [ "$OPENMEMORY_ENABLED" = true ]; then
- echo "OpenMemory MCP:"
- echo " ${CYAN}cd ../../extras/openmemory-mcp && docker compose up -d${NC}"
- fi
-
- if [ "$PARAKEET_ENABLED" = true ]; then
- echo "Parakeet ASR:"
- echo " ${CYAN}cd ../../extras/asr-services && docker compose up parakeet -d${NC}"
- fi
-
- if [ "$SPEAKER_ENABLED" = true ]; then
- echo "Speaker Recognition:"
- echo " ${CYAN}cd ../../extras/speaker-recognition && docker compose up --build -d${NC}"
- fi
-fi
-
-echo ""
-echo "For more information, see:"
-echo " - Docs/quickstart.md"
-echo " - Docs/memory-configuration-guide.md"
-echo " - MEMORY_PROVIDERS.md"
-
-echo ""
-print_success "Initialization complete! ๐"