Skip to content

Commit 24a3419

Browse files
committed
Enhance Docker command handling and configuration management
- Updated `run_compose_command` to support separate build commands for services, including profile management for backend and speaker-recognition services. - Improved error handling and output streaming during Docker command execution. - Added `ensure_docker_network` function to verify and create the required Docker network before starting services. - Refactored configuration files to utilize `oc.env` for environment variable management, ensuring better compatibility and flexibility across different environments.
1 parent 42eb911 commit 24a3419

File tree

5 files changed

+161
-109
lines changed

5 files changed

+161
-109
lines changed

services.py

Lines changed: 138 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -80,27 +80,104 @@ def run_compose_command(service_name, command, build=False):
8080
console.print(f"[red]❌ Docker compose file not found: {compose_file}[/red]")
8181
return False
8282

83+
# Step 1: If build is requested, run build separately first (no timeout for CUDA builds)
84+
if build and command == 'up':
85+
# Build command - need to specify profiles for build too
86+
build_cmd = ['docker', 'compose']
87+
88+
# Add profiles to build command (needed for profile-specific services)
89+
if service_name == 'backend':
90+
caddyfile_path = service_path / 'Caddyfile'
91+
if caddyfile_path.exists() and caddyfile_path.is_file():
92+
build_cmd.extend(['--profile', 'https'])
93+
94+
obsidian_enabled = False
95+
config_data = load_config_yml()
96+
if config_data:
97+
memory_config = config_data.get('memory', {})
98+
obsidian_config = memory_config.get('obsidian', {})
99+
if obsidian_config.get('enabled', False):
100+
obsidian_enabled = True
101+
102+
if not obsidian_enabled:
103+
env_file = service_path / '.env'
104+
if env_file.exists():
105+
env_values = dotenv_values(env_file)
106+
if env_values.get('OBSIDIAN_ENABLED', 'false').lower() == 'true':
107+
obsidian_enabled = True
108+
109+
if obsidian_enabled:
110+
build_cmd.extend(['--profile', 'obsidian'])
111+
112+
elif service_name == 'speaker-recognition':
113+
env_file = service_path / '.env'
114+
if env_file.exists():
115+
env_values = dotenv_values(env_file)
116+
compute_mode = env_values.get('COMPUTE_MODE', 'cpu')
117+
build_cmd.extend(['--profile', compute_mode])
118+
119+
build_cmd.append('build')
120+
121+
# Run build with streaming output (no timeout)
122+
console.print(f"[cyan]🔨 Building {service_name} (this may take several minutes for CUDA/GPU builds)...[/cyan]")
123+
try:
124+
process = subprocess.Popen(
125+
build_cmd,
126+
cwd=service_path,
127+
stdout=subprocess.PIPE,
128+
stderr=subprocess.STDOUT,
129+
text=True,
130+
bufsize=1
131+
)
132+
133+
if process.stdout is None:
134+
raise RuntimeError("Process stdout is None - unable to read command output")
135+
136+
for line in process.stdout:
137+
line = line.rstrip()
138+
if not line:
139+
continue
140+
141+
if 'error' in line.lower() or 'failed' in line.lower():
142+
console.print(f" [red]{line}[/red]")
143+
elif 'Successfully' in line or 'built' in line.lower():
144+
console.print(f" [green]{line}[/green]")
145+
elif 'Building' in line or 'Step' in line:
146+
console.print(f" [cyan]{line}[/cyan]")
147+
elif 'warning' in line.lower():
148+
console.print(f" [yellow]{line}[/yellow]")
149+
else:
150+
console.print(f" [dim]{line}[/dim]")
151+
152+
process.wait()
153+
154+
if process.returncode != 0:
155+
console.print(f"\n[red]❌ Build failed for {service_name}[/red]")
156+
return False
157+
158+
console.print(f"[green]✅ Build completed for {service_name}[/green]")
159+
160+
except Exception as e:
161+
console.print(f"[red]❌ Error building {service_name}: {e}[/red]")
162+
return False
163+
164+
# Step 2: Run the actual command (up/down/restart/status)
83165
cmd = ['docker', 'compose']
84166

85-
# For backend service, check if HTTPS is configured (Caddyfile exists)
167+
# Add profiles for backend service
86168
if service_name == 'backend':
87169
caddyfile_path = service_path / 'Caddyfile'
88170
if caddyfile_path.exists() and caddyfile_path.is_file():
89-
# Enable HTTPS profile to start Caddy service
90171
cmd.extend(['--profile', 'https'])
91172

92-
# Check if Obsidian/Neo4j is enabled
93173
obsidian_enabled = False
94-
95-
# Method 1: Check config.yml (preferred)
96174
config_data = load_config_yml()
97175
if config_data:
98176
memory_config = config_data.get('memory', {})
99177
obsidian_config = memory_config.get('obsidian', {})
100178
if obsidian_config.get('enabled', False):
101179
obsidian_enabled = True
102180

103-
# Method 2: Fallback to .env for backward compatibility
104181
if not obsidian_enabled:
105182
env_file = service_path / '.env'
106183
if env_file.exists():
@@ -114,30 +191,22 @@ def run_compose_command(service_name, command, build=False):
114191

115192
# Handle speaker-recognition service specially
116193
if service_name == 'speaker-recognition' and command in ['up', 'down']:
117-
# Read configuration to determine profile
118194
env_file = service_path / '.env'
119195
if env_file.exists():
120196
env_values = dotenv_values(env_file)
121197
compute_mode = env_values.get('COMPUTE_MODE', 'cpu')
122198

123-
# Add profile flag for both up and down commands
124-
if compute_mode == 'gpu':
125-
cmd.extend(['--profile', 'gpu'])
126-
else:
127-
cmd.extend(['--profile', 'cpu'])
199+
cmd.extend(['--profile', compute_mode])
128200

129201
if command == 'up':
130202
https_enabled = env_values.get('REACT_UI_HTTPS', 'false')
131203
if https_enabled.lower() == 'true':
132-
# HTTPS mode: start with profile for all services (includes nginx)
133204
cmd.extend(['up', '-d'])
134205
else:
135-
# HTTP mode: start specific services with profile (no nginx)
136206
cmd.extend(['up', '-d', 'speaker-service-gpu' if compute_mode == 'gpu' else 'speaker-service-cpu', 'web-ui'])
137207
elif command == 'down':
138208
cmd.extend(['down'])
139209
else:
140-
# Fallback: no profile
141210
if command == 'up':
142211
cmd.extend(['up', '-d'])
143212
elif command == 'down':
@@ -152,90 +221,73 @@ def run_compose_command(service_name, command, build=False):
152221
cmd.extend(['restart'])
153222
elif command == 'status':
154223
cmd.extend(['ps'])
155-
156-
if command == 'up' and build:
157-
cmd.append('--build')
158-
224+
159225
try:
160-
# For commands that need real-time output (build), stream to console
161-
if build and command == 'up':
162-
console.print(f"[dim]Building {service_name} containers...[/dim]")
163-
process = subprocess.Popen(
164-
cmd,
165-
cwd=service_path,
166-
stdout=subprocess.PIPE,
167-
stderr=subprocess.STDOUT,
168-
text=True,
169-
bufsize=1
170-
)
171-
172-
# Simply stream all output with coloring
173-
all_output = []
174-
175-
if process.stdout is None:
176-
raise RuntimeError("Process stdout is None - unable to read command output")
177-
for line in process.stdout:
178-
line = line.rstrip()
179-
if not line:
180-
continue
181-
182-
# Store for error context
183-
all_output.append(line)
184-
185-
# Print with appropriate coloring
186-
if 'error' in line.lower() or 'failed' in line.lower():
187-
console.print(f" [red]{line}[/red]")
188-
elif 'Successfully' in line or 'Started' in line or 'Created' in line:
189-
console.print(f" [green]{line}[/green]")
190-
elif 'Building' in line or 'Creating' in line:
191-
console.print(f" [cyan]{line}[/cyan]")
192-
elif 'warning' in line.lower():
193-
console.print(f" [yellow]{line}[/yellow]")
194-
else:
195-
console.print(f" [dim]{line}[/dim]")
196-
197-
# Wait for process to complete
198-
process.wait()
199-
200-
# If build failed, show error summary
201-
if process.returncode != 0:
202-
console.print(f"\n[red]❌ Build failed for {service_name}[/red]")
203-
return False
204-
226+
# Run the command with timeout (build already done if needed)
227+
result = subprocess.run(
228+
cmd,
229+
cwd=service_path,
230+
capture_output=True,
231+
text=True,
232+
check=False,
233+
timeout=120 # 2 minute timeout
234+
)
235+
236+
if result.returncode == 0:
205237
return True
206238
else:
207-
# For non-build commands, run silently unless there's an error
208-
result = subprocess.run(
209-
cmd,
210-
cwd=service_path,
211-
capture_output=True,
212-
text=True,
213-
check=False,
214-
timeout=120 # 2 minute timeout for service status checks
215-
)
216-
217-
if result.returncode == 0:
218-
return True
219-
else:
220-
console.print(f"[red]❌ Command failed[/red]")
221-
if result.stderr:
222-
console.print("[red]Error output:[/red]")
223-
# Show all error output
224-
for line in result.stderr.splitlines():
225-
console.print(f" [dim]{line}[/dim]")
226-
return False
227-
239+
console.print(f"[red]❌ Command failed[/red]")
240+
if result.stderr:
241+
console.print("[red]Error output:[/red]")
242+
for line in result.stderr.splitlines():
243+
console.print(f" [dim]{line}[/dim]")
244+
return False
245+
228246
except subprocess.TimeoutExpired:
229247
console.print(f"[red]❌ Command timed out after 2 minutes for {service_name}[/red]")
230248
return False
231249
except Exception as e:
232250
console.print(f"[red]❌ Error running command: {e}[/red]")
233251
return False
234252

253+
def ensure_docker_network():
254+
"""Ensure chronicle-network exists"""
255+
try:
256+
# Check if network already exists
257+
result = subprocess.run(
258+
['docker', 'network', 'inspect', 'chronicle-network'],
259+
capture_output=True,
260+
check=False
261+
)
262+
263+
if result.returncode != 0:
264+
# Network doesn't exist, create it
265+
console.print("[blue]📡 Creating chronicle-network...[/blue]")
266+
subprocess.run(
267+
['docker', 'network', 'create', 'chronicle-network'],
268+
check=True,
269+
capture_output=True
270+
)
271+
console.print("[green]✅ chronicle-network created[/green]")
272+
else:
273+
console.print("[dim]📡 chronicle-network already exists[/dim]")
274+
return True
275+
except subprocess.CalledProcessError as e:
276+
console.print(f"[red]❌ Failed to create network: {e}[/red]")
277+
return False
278+
except Exception as e:
279+
console.print(f"[red]❌ Error checking/creating network: {e}[/red]")
280+
return False
281+
235282
def start_services(services, build=False):
236283
"""Start specified services"""
237284
console.print(f"🚀 [bold]Starting {len(services)} services...[/bold]")
238-
285+
286+
# Ensure Docker network exists before starting services
287+
if not ensure_docker_network():
288+
console.print("[red]❌ Cannot start services without Docker network[/red]")
289+
return
290+
239291
success_count = 0
240292
for service_name in services:
241293
if service_name not in SERVICES:

tests/configs/deepgram-openai.yml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ memory:
2323
timeout_seconds: 1200
2424
models:
2525
- api_family: openai
26-
api_key: ${OPENAI_API_KEY:-}
26+
api_key: ${oc.env:OPENAI_API_KEY,}
2727
description: OpenAI GPT-4o-mini
2828
model_name: gpt-4o-mini
2929
model_output: json
@@ -35,7 +35,7 @@ models:
3535
model_url: https://api.openai.com/v1
3636
name: openai-llm
3737
- api_family: openai
38-
api_key: ${OPENAI_API_KEY:-}
38+
api_key: ${oc.env:OPENAI_API_KEY,}
3939
description: OpenAI text-embedding-3-small
4040
embedding_dimensions: 1536
4141
model_name: text-embedding-3-small
@@ -48,14 +48,14 @@ models:
4848
description: Qdrant vector database
4949
model_params:
5050
collection_name: omi_memories
51-
host: ${QDRANT_BASE_URL:-qdrant}
52-
port: ${QDRANT_PORT:-6333}
51+
host: ${oc.env:QDRANT_BASE_URL,qdrant}
52+
port: ${oc.env:QDRANT_PORT,6333}
5353
model_provider: qdrant
5454
model_type: vector_store
55-
model_url: http://${QDRANT_BASE_URL:-qdrant}:${QDRANT_PORT:-6333}
55+
model_url: http://${oc.env:QDRANT_BASE_URL,qdrant}:${oc.env:QDRANT_PORT,6333}
5656
name: vs-qdrant
5757
- api_family: http
58-
api_key: ${DEEPGRAM_API_KEY:-}
58+
api_key: ${oc.env:DEEPGRAM_API_KEY,}
5959
description: Deepgram Nova 3 (batch)
6060
model_provider: deepgram
6161
model_type: stt
@@ -64,7 +64,7 @@ models:
6464
operations:
6565
stt_transcribe:
6666
headers:
67-
Authorization: Token ${DEEPGRAM_API_KEY:-}
67+
Authorization: Token ${oc.env:DEEPGRAM_API_KEY,}
6868
Content-Type: audio/raw
6969
method: POST
7070
path: /listen
@@ -84,7 +84,7 @@ models:
8484
words: results.channels[0].alternatives[0].words
8585
type: json
8686
- api_family: websocket
87-
api_key: ${DEEPGRAM_API_KEY:-}
87+
api_key: ${oc.env:DEEPGRAM_API_KEY,}
8888
description: Deepgram Nova 3 (streaming)
8989
model_provider: deepgram
9090
model_type: stt_stream

tests/configs/mock-services.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,11 @@ models:
3838
description: Qdrant vector database (local)
3939
model_params:
4040
collection_name: omi_memories
41-
host: ${QDRANT_BASE_URL:-qdrant}
42-
port: ${QDRANT_PORT:-6333}
41+
host: ${oc.env:QDRANT_BASE_URL,qdrant}
42+
port: ${oc.env:QDRANT_PORT,6333}
4343
model_provider: qdrant
4444
model_type: vector_store
45-
model_url: http://${QDRANT_BASE_URL:-qdrant}:${QDRANT_PORT:-6333}
45+
model_url: http://${oc.env:QDRANT_BASE_URL,qdrant}:${oc.env:QDRANT_PORT,6333}
4646
name: vs-qdrant
4747
speaker_recognition:
4848
enabled: false

tests/configs/parakeet-ollama.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ models:
1515
api_family: openai
1616
model_name: llama3.1:latest
1717
model_url: http://localhost:11434/v1
18-
api_key: ${OPENAI_API_KEY:-ollama}
18+
api_key: ${oc.env:OPENAI_API_KEY,ollama}
1919
model_params:
2020
temperature: 0.2
2121
max_tokens: 2000
@@ -28,7 +28,7 @@ models:
2828
api_family: openai
2929
model_name: nomic-embed-text:latest
3030
model_url: http://localhost:11434/v1
31-
api_key: ${OPENAI_API_KEY:-ollama}
31+
api_key: ${oc.env:OPENAI_API_KEY,ollama}
3232
embedding_dimensions: 768
3333
model_output: vector
3434

@@ -37,18 +37,18 @@ models:
3737
model_type: vector_store
3838
model_provider: qdrant
3939
api_family: qdrant
40-
model_url: http://${QDRANT_BASE_URL:-qdrant}:${QDRANT_PORT:-6333}
40+
model_url: http://${oc.env:QDRANT_BASE_URL,qdrant}:${oc.env:QDRANT_PORT,6333}
4141
model_params:
42-
host: ${QDRANT_BASE_URL:-qdrant}
43-
port: ${QDRANT_PORT:-6333}
42+
host: ${oc.env:QDRANT_BASE_URL,qdrant}
43+
port: ${oc.env:QDRANT_PORT,6333}
4444
collection_name: omi_memories
4545

4646
- name: stt-parakeet-batch
4747
description: Parakeet NeMo ASR (batch) - local offline transcription
4848
model_type: stt
4949
model_provider: parakeet
5050
api_family: http
51-
model_url: ${PARAKEET_ASR_URL:-http://localhost:8767}
51+
model_url: ${oc.env:PARAKEET_ASR_URL,http://localhost:8767}
5252
api_key: ''
5353
operations:
5454
stt_transcribe:

0 commit comments

Comments
 (0)