-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcx.lib
More file actions
687 lines (621 loc) · 25 KB
/
cx.lib
File metadata and controls
687 lines (621 loc) · 25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
#!/usr/bin/env bash
# Shared utilities for cx* scripts
#
# Source this file: source cx.lib
CXLIB_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly CXLIB_DIR
source "$CXLIB_DIR/lib.sh"
# ─────────────────────────────────────────────────────────────
# Debug logging — always on, writes to stderr
# ─────────────────────────────────────────────────────────────
_dbg() {
local func="${FUNCNAME[1]:-main}"
printf '[cx.lib:%s] %s\n' "$func" "$*" >&2
}
_dbg "sourced cx.lib, CXLIB_DIR=$CXLIB_DIR"
# ─────────────────────────────────────────────────────────────
# Terminal detection
# ─────────────────────────────────────────────────────────────
cxlibIsTty=""
cxlibRed=""
cxlibReset=""
if [[ -t 1 ]]; then
cxlibIsTty=1
cxlibRed=$'\033[31m'
cxlibReset=$'\033[0m'
fi
_dbg "terminal detection: cxlibIsTty=${cxlibIsTty:-<empty>}"
# ─────────────────────────────────────────────────────────────
# Output helpers
# ─────────────────────────────────────────────────────────────
warn_msg() {
echo "warning: $*"
}
err_msg() {
echo "error: $*" >&2
}
# Print step name when a step begins. In progressive mode, prints without
# a newline so time can be appended by cxlibStepDone. In batch mode, prints
# a complete "start" line.
cxlibStepProgress() {
if [[ -n "$cxlibIsTty" ]]; then
printf "%s" "$1"
else
printf "start %s\n" "$1"
fi
}
# Finish a step line. In progressive mode, appends time to the name already
# printed by cxlibStepProgress. In batch mode, prints a complete "done" line.
cxlibStepDone() {
local name="$1" time_str="$2" status="$3" logfile="${4:-}"
if [[ -n "$cxlibIsTty" ]]; then
if [[ $status -eq 0 ]]; then
printf " - %s\n" "$time_str"
else
if [[ -n "$logfile" ]]; then
printf " ${cxlibRed}✗${cxlibReset} log file: %s\n" "$logfile"
else
printf " %s%s%s\n" "$cxlibRed" "✗" "$cxlibReset"
fi
fi
else
if [[ $status -eq 0 ]]; then
printf "done %s - %s\n" "$name" "$time_str"
else
if [[ -n "$logfile" ]]; then
printf "FAILED %s log file: %s\n" "$name" "$logfile"
else
printf "FAILED %s\n" "$name"
fi
fi
fi
}
# ─────────────────────────────────────────────────────────────
# Timing
# ─────────────────────────────────────────────────────────────
cxlibTimerStart=""
timer_start() {
cxlibTimerStart=$(date +%s)
_dbg "timer started at $cxlibTimerStart"
}
# Format seconds as "Xm Ys" or "Xs"
cxlibFormatElapsed() {
local elapsed="$1"
local min=$((elapsed / 60))
local sec=$((elapsed % 60))
if [[ $min -gt 0 ]]; then
echo "${min}m ${sec}s"
else
echo "${sec}s"
fi
}
timer_elapsed() {
local end
end=$(date +%s)
cxlibFormatElapsed $((end - cxlibTimerStart))
}
# ─────────────────────────────────────────────────────────────
# Architecture detection
# ─────────────────────────────────────────────────────────────
# Return the Docker platform string for the current host.
# Maps uname output to the linux/ARCH format that Docker and
# buildx expect for --platform flags.
host_arch() {
local raw_arch
raw_arch="$(uname -m)"
_dbg "uname -m = $raw_arch"
case "$raw_arch" in
x86_64|amd64) _dbg "-> linux/amd64"; echo "linux/amd64" ;;
arm64|aarch64) _dbg "-> linux/arm64"; echo "linux/arm64" ;;
*) _dbg "-> linux/$raw_arch (unmapped)"; echo "linux/$raw_arch" ;;
esac
}
# Return 0 (true) when the target platform differs from the host
# and a buildx cross-compile pass is needed. Returns 1 when the
# target matches the host or is empty.
needs_cross_build() {
local target="${1:-}"
local host
host="$(host_arch)"
_dbg "target=${target:-<empty>} host=$host"
if [[ -n "$target" && "$target" != "$host" ]]; then
_dbg "-> cross-build NEEDED"
return 0
else
_dbg "-> cross-build not needed"
return 1
fi
}
# Query the currently configured kubectl cluster for the architecture
# of its first ready node and return the Docker platform string.
# Falls back to linux/amd64 if kubectl is unavailable, not configured,
# or returns no nodes. This is used to set CONTAINER_ARCH automatically
# so builds always target the actual cluster without manual overrides.
cluster_arch() {
local raw
_dbg "querying kubectl for node architecture..."
raw=$(kubectl get nodes \
-o jsonpath='{.items[0].status.nodeInfo.architecture}' \
2>/dev/null) || true
_dbg "kubectl raw arch=${raw:-<empty>}"
case "${raw:-}" in
amd64|x86_64) _dbg "-> linux/amd64"; echo "linux/amd64" ;;
arm64|aarch64) _dbg "-> linux/arm64"; echo "linux/arm64" ;;
"") _dbg "-> linux/amd64 (fallback, kubectl unavailable or no nodes)"; echo "linux/amd64" ;;
*) _dbg "-> linux/${raw} (unmapped)"; echo "linux/${raw}" ;;
esac
}
# Detect the target container architecture from the configured cluster.
# Override by setting CONTAINER_ARCH in the environment before running,
# for example CONTAINER_ARCH=linux/arm64 cxship app.
: "${CONTAINER_ARCH:=$(cluster_arch)}"
export CONTAINER_ARCH
_dbg "CONTAINER_ARCH=$CONTAINER_ARCH"
# ─────────────────────────────────────────────────────────────
# Project detection
# ─────────────────────────────────────────────────────────────
# Find project root. Prefers git rev-parse --show-toplevel which handles
# worktrees correctly. Falls back to walking up for non-git directories.
find_project_root() {
local git_root
git_root=$(git rev-parse --show-toplevel 2>/dev/null) || true
_dbg "git_root=${git_root:-<empty>}"
if [[ -n "$git_root" && -f "$git_root/pom.xml" ]]; then
_dbg "-> $git_root (git root has pom.xml)"
echo "$git_root"
return
fi
local dir="$PWD"
local last_with_pom=""
_dbg "walking up from $dir looking for pom.xml..."
while [[ "$dir" != "/" ]]; do
if [[ -f "$dir/pom.xml" ]]; then
_dbg " found pom.xml in $dir"
last_with_pom="$dir"
fi
dir="$(dirname "$dir")"
done
_dbg "-> ${last_with_pom:-<none found>}"
echo "$last_with_pom"
}
# Require being in a project directory, exit with error if not
# Sets PROJECT_ROOT variable on success
require_project_root() {
_dbg "resolving PROJECT_ROOT..."
PROJECT_ROOT=$(find_project_root)
if [[ -z "$PROJECT_ROOT" ]]; then
err_msg "Not in a project directory (no pom.xml found)"
echo "Run this command from a project root or subdirectory" >&2
exit 1
fi
_dbg "PROJECT_ROOT=$PROJECT_ROOT"
}
# ─────────────────────────────────────────────────────────────
# Locking (prevent parallel builds)
# ─────────────────────────────────────────────────────────────
# Acquire exclusive lock for the current project
# Usage: acquire_lock [lock_name]
# Default lock_name is "build"
# Dies if another process holds the lock.
# In a git worktree, all worktrees share the same lock because they
# share Maven artifacts and Docker image tags.
acquire_lock() {
local lock_name="${1:-build}"
local lock_key="$PROJECT_ROOT"
_dbg "lock_name=$lock_name lock_key=$lock_key"
local common_dir
common_dir=$(git -C "$PROJECT_ROOT" rev-parse --git-common-dir 2>/dev/null) || true
if [[ -n "$common_dir" ]]; then
lock_key=$(cd "$PROJECT_ROOT" && cd "$common_dir" && pwd)
_dbg "worktree detected, lock_key adjusted to $lock_key"
fi
local project_hash
project_hash=$(printf '%s' "$lock_key" | cksum | awk '{print $1}' | cut -c1-8)
local lock_dir="/tmp/${lock_name}-${project_hash}.lock"
local pid_file="$lock_dir/pid"
_dbg "lock_dir=$lock_dir pid=$$"
local lock_cleanup_trap=""
printf -v lock_cleanup_trap 'rm -rf %q' "$lock_dir"
if mkdir "$lock_dir" 2>/dev/null; then
echo "$$" > "$pid_file"
trap "$lock_cleanup_trap" EXIT
_dbg "lock acquired (fresh)"
return
fi
local lock_pid=""
if [[ -f "$pid_file" ]]; then
lock_pid=$(<"$pid_file")
fi
_dbg "lock_dir exists, held by pid=${lock_pid:-<unknown>}"
if [[ -n "$lock_pid" ]] && kill -0 "$lock_pid" 2>/dev/null; then
_dbg "holder pid $lock_pid is alive -> aborting"
err_msg "Another $lock_name is running for this project"
echo "Lock file: $lock_dir" >&2
exit 1
fi
_dbg "stale lock (pid $lock_pid dead), removing and retrying"
rm -rf "$lock_dir" 2>/dev/null || true
if mkdir "$lock_dir" 2>/dev/null; then
echo "$$" > "$pid_file"
trap "$lock_cleanup_trap" EXIT
_dbg "lock acquired (after stale cleanup)"
return
fi
_dbg "lock acquire failed on retry"
err_msg "Another $lock_name is running for this project"
echo "Lock file: $lock_dir" >&2
exit 1
}
# ─────────────────────────────────────────────────────────────
# Component helpers
# ─────────────────────────────────────────────────────────────
map_component_name_to_folder() {
local component="$1"
if [[ "$component" == "edge" ]]; then
_dbg "mapped 'edge' -> 'cx-edge'"
echo "cx-edge"
else
echo "$component"
fi
}
display_component_name() {
local component="$1"
if [[ "$component" == "cx-edge" ]]; then
echo "edge"
else
echo "$component"
fi
}
format_components() {
local formatted=()
local component
for component in "$@"; do
formatted+=("$(display_component_name "$component")")
done
local IFS=','
echo "${formatted[*]}"
}
# List components as space-separated string
list_components() {
local components=()
local component
while IFS= read -r component; do
components+=("$(display_component_name "$component")")
done < <("$CXLIB_DIR/cxconfig" components 2>/dev/null)
printf "%s " "${components[@]}"
}
# Validate component exists
# Usage: valid_component <component> <all_components_array_name>
valid_component() {
local comp="$1"
local -n all_comps=$2
local known
for known in "${all_comps[@]}"; do
[[ "$comp" == "$known" ]] && return 0
done
return 1
}
# ─────────────────────────────────────────────────────────────
# Component expansion and parallel execution
# ─────────────────────────────────────────────────────────────
# Expand "all" keyword and validate components. When "all" is used, expands
# to modules from docker/pom.xml. Otherwise validates against all known
# components (any docker subdir with pom.xml or Dockerfile).
# Usage: expand_components <components_array> <all_components_array>
# Modifies the first array in place.
expand_components() {
local -n targetComponents=$1
local -n validComponents=$2
_dbg "input components: ${targetComponents[*]}"
local i
for i in "${!targetComponents[@]}"; do
local before="${targetComponents[$i]}"
targetComponents[$i]="$(map_component_name_to_folder "${targetComponents[$i]}")"
[[ "$before" != "${targetComponents[$i]}" ]] && _dbg " renamed: $before -> ${targetComponents[$i]}"
done
if [[ ${#targetComponents[@]} -eq 1 && "${targetComponents[0]}" == "all" ]]; then
_dbg "expanding 'all' via cxconfig..."
mapfile -t targetComponents < <("$CXLIB_DIR/cxconfig" all)
_dbg "expanded to: ${targetComponents[*]}"
else
for c in "${targetComponents[@]}"; do
if valid_component "$c" validComponents; then
_dbg " validated: $c"
else
_dbg " UNKNOWN component: $c"
err_msg "Unknown component '$c'"
exit 1
fi
done
fi
}
# Run a function on components in parallel, collecting failures.
# Usage: run_parallel <func_name> <components_array> [extra_args...]
# Returns: sets global $parallel_failures array with failed components
run_parallel() {
local func="$1"
local -n itemsToProcess=$2
shift 2
local extra_args=("$@")
_dbg "func=$func items=(${itemsToProcess[*]}) extra_args=(${extra_args[*]:-})"
parallel_failures=()
local -A pids=()
local item
for item in "${itemsToProcess[@]}"; do
(
cxlibIsTty=""
_dbg "subshell start: $func $item ${extra_args[*]:-}"
"$func" "$item" "${extra_args[@]}" || exit 1
) &
pids[$item]=$!
_dbg " launched $func $item -> pid ${pids[$item]}"
done
for item in "${itemsToProcess[@]}"; do
_dbg " waiting on pid ${pids[$item]} ($item)..."
if wait "${pids[$item]}"; then
_dbg " $item -> OK"
else
_dbg " $item -> FAILED"
parallel_failures+=("$item")
fi
done
_dbg "parallel_failures=(${parallel_failures[*]:-})"
}
# Load all known components, validate user selection is non-empty,
# and expand "all" keyword. Takes the caller's components array by
# reference.
# Usage: load_and_expand_components <components_array_name>
load_and_expand_components() {
local -n _comps=$1
_dbg "loading all known components via cxconfig..."
mapfile -t _all_components < <("$CXLIB_DIR/cxconfig" components)
_dbg "known components: ${_all_components[*]}"
if [[ ${#_comps[@]} -eq 0 ]]; then
_dbg "no components specified -> error"
err_msg "Specify components or use 'all'"
echo "Available: ${_all_components[*]}" >&2
exit 1
fi
_dbg "requested components: ${_comps[*]}"
expand_components "$1" _all_components
}
# Report parallel execution results. Exits with 1 on failure.
# Usage: report_parallel_result <verb> <components_array_name> [hook_name]
report_parallel_result() {
local verb="$1"
local -n _rp_comps=$2
local hook_name="${3:-}"
_dbg "verb=$verb components=(${_rp_comps[*]}) failures=(${parallel_failures[*]:-}) hook=${hook_name:-<none>}"
echo
if [[ ${#parallel_failures[@]} -eq 0 ]]; then
echo "$verb $(format_components "${_rp_comps[@]}") - $(timer_elapsed)"
[[ -n "$hook_name" ]] && run_completion_hook "$hook_name" success
else
echo "Failed $(format_components "${parallel_failures[@]}") - $(timer_elapsed)"
[[ -n "$hook_name" ]] && run_completion_hook "$hook_name" failure
exit 1
fi
}
# ─────────────────────────────────────────────────────────────
# Build helpers
# ─────────────────────────────────────────────────────────────
# Build components with a single Maven invocation from the project root.
# Maven components get -pl docker/X,docker/Y -am. Non-Maven components
# (Dockerfile only) get docker build directly.
# When CONTAINER_ARCH is set and differs from the host architecture,
# a second buildx pass cross-compiles the images after the native
# Maven build assembles the Docker context.
# Usage: build_components <project_root> <component...>
build_components() {
local project_root="$1"
shift
_dbg "project_root=$project_root components=($*)"
local -a maven_pl=()
local -a maven_display=()
local -a maven_comps=()
local -a docker_only=()
for comp in "$@"; do
local docker_dir docker_path
docker_dir=$("$CXLIB_DIR/cxconfig" dir "$comp")
docker_path="$project_root/$docker_dir"
if [[ -f "$docker_path/pom.xml" ]]; then
_dbg " $comp -> maven (docker_dir=$docker_dir has pom.xml)"
maven_pl+=("$docker_dir")
maven_display+=("docker/$(display_component_name "$comp")")
maven_comps+=("$comp")
elif [[ -f "$docker_path/Dockerfile" ]]; then
_dbg " $comp -> docker-only (docker_dir=$docker_dir has Dockerfile, no pom.xml)"
docker_only+=("$comp")
else
_dbg " $comp -> ERROR: docker_dir=$docker_dir has no pom.xml or Dockerfile"
err_msg "Component '$comp' has no pom.xml or Dockerfile"
return 1
fi
done
if [[ ${#maven_pl[@]} -gt 0 ]]; then
local pl_csv display_csv log_file
pl_csv=$(IFS=,; echo "${maven_pl[*]}")
display_csv=$(IFS=,; echo "${maven_display[*]}")
log_file=$(logfile cxbuild "maven")
_dbg "maven build: pl=$pl_csv log=$log_file"
# Run fmt:format before install so the formatter check built into the install
# lifecycle (fmt-maven-plugin:check) does not fail on formatting drift. Mirrors
# what qb does for local dev builds.
run_logged "maven $display_csv" "$log_file" \
bash -c 'cd "$1" && shift && mvn com.spotify.fmt:fmt-maven-plugin:format install -pl "$1" -am -DskipTests -Djacoco.skip=true -T6' \
_ "$project_root" "$pl_csv" \
|| return 1
fi
if needs_cross_build "${CONTAINER_ARCH:-}"; then
_dbg "cross-build pass for CONTAINER_ARCH=$CONTAINER_ARCH"
for comp in "${maven_comps[@]}"; do
_dbg " buildx_rebuild $comp"
buildx_rebuild "$project_root" "$comp" "$CONTAINER_ARCH" || return 1
done
else
_dbg "no cross-build needed"
fi
for comp in "${docker_only[@]}"; do
local log_file
log_file=$(logfile cxbuild "$comp")
local image docker_dir docker_path
docker_dir=$("$CXLIB_DIR/cxconfig" dir "$comp")
docker_path="$project_root/$docker_dir"
image=$("$CXLIB_DIR/cxconfig" image "$comp")
_dbg "docker-only build: $comp image=$image log=$log_file"
if needs_cross_build "${CONTAINER_ARCH:-}"; then
_dbg " using buildx --platform $CONTAINER_ARCH"
run_logged "docker $(display_component_name "$comp")" "$log_file" \
docker buildx build --platform "$CONTAINER_ARCH" --load \
-t "$image" "$docker_path" \
|| return 1
else
run_logged "docker $(display_component_name "$comp")" "$log_file" \
docker build \
-t "$image" "$docker_path" \
|| return 1
fi
done
}
# Rebuild a component image for a non-native platform. Uses the
# Docker build context that the fabric8 plugin assembled during
# the Maven build at target/docker/.../build/. That context
# contains a Dockerfile with Maven properties already resolved
# and a maven/ directory with the files from the assembly
# descriptor, so no Maven property interpolation is needed here.
# Saves the resulting image as a tarball for push_component.
# Usage: buildx_rebuild <project_root> <component> <platform>
buildx_rebuild() {
local project_root="$1"
local component="$2"
local platform="$3"
_dbg "project_root=$project_root component=$component platform=$platform"
local docker_dir docker_path image context_dir
docker_dir=$("$CXLIB_DIR/cxconfig" dir "$component")
docker_path="$project_root/$docker_dir"
image=$("$CXLIB_DIR/cxconfig" image "$component")
_dbg "docker_dir=$docker_dir image=$image"
context_dir=$(find "$docker_path/target/docker" \
-type d -name build 2>/dev/null | head -1)
_dbg "context_dir=${context_dir:-<not found>}"
if [[ -z "$context_dir" ]]; then
err_msg "No assembled context for $component. Run the Maven build first."
return 1
fi
local log_file
log_file=$(logfile cxbuild "buildx-$component")
_dbg "buildx build: image=$image platform=$platform context=$context_dir log=$log_file"
run_logged "buildx $(display_component_name "$component")" "$log_file" \
docker buildx build --platform "$platform" --load \
--build-arg "GITLAB_NPM_AUTH_TOKEN=${GITLAB_NPM_AUTH_TOKEN:-}" \
-t "$image" "$context_dir" \
|| return 1
local tarball="$docker_path/target/image.tar"
_dbg "saving cross-compiled image to $tarball"
docker save "$image" -o "$tarball" 2>/dev/null || true
}
# Push a component image to a k8s host. Uses prebuilt tarball if available,
# otherwise falls back to docker save.
# Usage: push_component <component> <host>
push_component() {
local component="$1"
local host="$2"
local docker_dir docker_path tarball image
docker_dir=$("$CXLIB_DIR/cxconfig" dir "$component")
docker_path="$PROJECT_ROOT/$docker_dir"
tarball="$docker_path/target/image.tar"
image=$("$CXLIB_DIR/cxconfig" image "$component")
_dbg "component=$component host=$host image=$image docker_dir=$docker_dir"
_dbg "tarball=$tarball exists=$([ -f "$tarball" ] && echo yes || echo no)"
local log_file
log_file=$(logfile cxpush "$component")
if [[ -f "$tarball" ]]; then
_dbg "pushing via tarball"
run_logged "push $(display_component_name "$component")" "$log_file" \
"$CXLIB_DIR/kpush" -q -r "$host" -T "$tarball" "$image"
else
_dbg "pushing via docker save pipe (no tarball)"
run_logged "push $(display_component_name "$component")" "$log_file" \
"$CXLIB_DIR/kpush" -q -r "$host" "$image"
fi
}
# Deploy a component image to remote host
# Usage: deploy_component <component> <remote_host> <namespace>
deploy_component() {
local component="$1"
local remote_host="$2"
local namespace="$3"
local image deploy_name timeout
image=$("$CXLIB_DIR/cxconfig" image "$component")
deploy_name=$("$CXLIB_DIR/cxconfig" deploy "$component")
timeout=$("$CXLIB_DIR/cxconfig" timeout "$component")
_dbg "component=$component host=$remote_host ns=$namespace"
_dbg "image=$image deploy=$deploy_name timeout=$timeout"
local log_file
log_file=$(logfile cxdeploy "$component")
_dbg "log=$log_file"
run_logged "deploy $(display_component_name "$component")" "$log_file" \
"$CXLIB_DIR/kdeploy" -q -r "$remote_host" -n "$namespace" -t "$timeout" "$image" "$deploy_name"
}
# Run a command with output redirected to logfile, showing name and elapsed
# time on completion. On failure, shows logfile path instead of time.
run_logged() {
local name="$1"
local logfile="$2"
shift 2
_dbg "name='$name' logfile=$logfile cmd=($*)"
local start
start=$(date +%s)
cxlibStepProgress "$name"
"$@" > "$logfile" 2>&1
local status=$?
local elapsed=$(($(date +%s) - start))
local time_str
time_str=$(cxlibFormatElapsed $elapsed)
_dbg "name='$name' status=$status elapsed=${time_str}"
if [[ $status -ne 0 ]]; then
_dbg "FAILED — log contents tail:"
tail -20 "$logfile" >&2 2>/dev/null || true
fi
cxlibStepDone "$name" "$time_str" "$status" "$logfile"
return $status
}
# ─────────────────────────────────────────────────────────────
# Completion hooks
# ─────────────────────────────────────────────────────────────
# Run user completion hook if it exists.
# Usage: run_completion_hook <script_name> <status>
# Looks for ~/.config/cx/hooks/completion or uses built-in notification.
# The hook receives two arguments: script name and status (success/failure).
run_completion_hook() {
local script_name="$1"
local status="$2"
local hook="$HOME/.config/cx/hooks/completion"
_dbg "script_name=$script_name status=$status hook=$hook"
if [[ -x "$hook" ]]; then
_dbg "running hook: $hook $script_name $status"
"$hook" "$script_name" "$status" 2>/dev/null || true
else
_dbg "hook not found or not executable"
fi
}
# ─────────────────────────────────────────────────────────────
# Log file helpers
# ─────────────────────────────────────────────────────────────
# Generate timestamped log file path
# Usage: logfile <prefix> [suffix]
# Example: logfile "cxbuild" "graphql" -> /tmp/cxbuild-graphql-20240102-153045.log
logfile() {
local prefix="$1"
local suffix="${2:-}"
local ts
ts=$(date +%Y%m%d-%H%M%S)
local path
if [[ -n "$suffix" ]]; then
path="/tmp/${prefix}-${suffix}-${ts}.log"
else
path="/tmp/${prefix}-${ts}.log"
fi
_dbg "-> $path"
echo "$path"
}