From c4eb880f21f66acbacd7e8a60bc1807453823361 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 10 Sep 2024 10:49:47 +0200 Subject: [PATCH 01/69] Enable manually run build documentation in github actions --- .github/workflows/build-documentation.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-documentation.yml b/.github/workflows/build-documentation.yml index a2bd05f..b6d5a8b 100644 --- a/.github/workflows/build-documentation.yml +++ b/.github/workflows/build-documentation.yml @@ -1,5 +1,6 @@ name: Build documentation on: + workflow_dispatch: push: paths: - docs/** From 557657a220b895489ddfca8de93b7fe99be449da Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 08:28:41 +0200 Subject: [PATCH 02/69] Refactored config loading with more dynamic loading of config files --- bin/roll | 1 + commands/config.cmd | 238 ++++++++++++++++++ commands/config.help | 43 ++++ commands/usage.help | 3 + utils/config.sh | 558 +++++++++++++++++++++++++++++++++++++++++++ utils/env.sh | 42 +--- 6 files changed, 854 insertions(+), 31 deletions(-) create mode 100644 commands/config.cmd create mode 100644 commands/config.help create mode 100644 utils/config.sh diff --git a/bin/roll b/bin/roll index f544cb2..2f21eef 100755 --- a/bin/roll +++ b/bin/roll @@ -13,6 +13,7 @@ readonly ROLL_DIR="$( && pwd )" source "${ROLL_DIR}/utils/core.sh" +source "${ROLL_DIR}/utils/config.sh" source "${ROLL_DIR}/utils/env.sh" ## verify docker is installed diff --git a/commands/config.cmd b/commands/config.cmd new file mode 100644 index 0000000..d0b1494 --- /dev/null +++ b/commands/config.cmd @@ -0,0 +1,238 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +if (( ${#ROLL_PARAMS[@]} == 0 )) || [[ "${ROLL_PARAMS[0]}" == "help" ]]; then + roll config --help || exit $? && exit $? +fi + +## Sub-command execution +case "${ROLL_PARAMS[0]}" in + show) + # Try to load configuration if in a project directory + if ROLL_ENV_PATH="$(locateEnvPath 2>/dev/null)"; then + loadRollConfig "${ROLL_ENV_PATH}" >/dev/null 2>&1 || { + error "Failed to load configuration from ${ROLL_ENV_PATH}/.env.roll" + exit 1 + } + else + warning "Not in a Roll project directory" + exit 1 + fi + + # Filter configuration if specified + filter="${ROLL_PARAMS[1]:-}" + showConfig "$filter" + ;; + + validate) + config_file="${ROLL_PARAMS[1]:-}" + + if [[ -z "$config_file" ]]; then + if ROLL_ENV_PATH="$(locateEnvPath 2>/dev/null)"; then + config_file="${ROLL_ENV_PATH}/.env.roll" + else + error "No configuration file specified and not in a Roll project directory" + exit 1 + fi + fi + + if [[ ! -f "$config_file" ]]; then + error "Configuration file not found: $config_file" + exit 1 + fi + + info "Validating configuration: $config_file" + + if validateConfig "$config_file"; then + success "Configuration is valid" + + # Also check for conflicts if we can load the config + if loadRollConfig "$(dirname "$config_file")" >/dev/null 2>&1; then + if checkConfigConflicts >/dev/null 2>&1; then + success "No configuration conflicts detected" + else + warning "Configuration conflicts detected (see above)" + exit 1 + fi + fi + else + error "Configuration validation failed" + exit 1 + fi + ;; + + conflicts) + # Check for configuration conflicts + if ROLL_ENV_PATH="$(locateEnvPath 2>/dev/null)"; then + loadRollConfig "${ROLL_ENV_PATH}" >/dev/null 2>&1 || { + error "Failed to load configuration from ${ROLL_ENV_PATH}/.env.roll" + exit 1 + } + else + error "Not in a Roll project directory" + exit 1 + fi + + info "Checking for configuration conflicts..." + + if checkConfigConflicts; then + success "No configuration conflicts detected" + else + error "Configuration conflicts detected (see above)" + exit 1 + fi + ;; + + schema) + # Display configuration schema + initConfigSchema + + echo -e "\033[33mRoll Configuration Schema:\033[0m" + echo "" + + # Group configurations by category + echo -e "\033[36mCore Configuration:\033[0m" + i=0 + while [[ $i -lt ${#ROLL_CONFIG_SCHEMA_KEYS[@]} ]]; do + key="${ROLL_CONFIG_SCHEMA_KEYS[$i]}" + value="${ROLL_CONFIG_SCHEMA_VALUES[$i]}" + case "$key" in + ROLL_ENV_NAME|ROLL_ENV_TYPE|ROLL_ENV_SUBT) + printf " %-30s %s\n" "$key" "$value" + ;; + esac + i=$((i + 1)) + done + + echo "" + echo -e "\033[36mService Toggles:\033[0m" + i=0 + while [[ $i -lt ${#ROLL_CONFIG_SCHEMA_KEYS[@]} ]]; do + key="${ROLL_CONFIG_SCHEMA_KEYS[$i]}" + value="${ROLL_CONFIG_SCHEMA_VALUES[$i]}" + if [[ "$key" =~ ^ROLL_(NGINX|DB|REDIS|DRAGONFLY|VARNISH|ELASTICSEARCH|OPENSEARCH|ELASTICVUE|RABBITMQ|MONGODB|BROWSERSYNC|SELENIUM|TEST_DB|ALLURE|MAGEPACK|INCLUDE_GIT) ]] && [[ ! "$key" =~ _VERSION$ ]]; then + printf " %-30s %s\n" "$key" "$value" + fi + i=$((i + 1)) + done + + echo "" + echo -e "\033[36mPHP/Node/Composer Configuration:\033[0m" + i=0 + while [[ $i -lt ${#ROLL_CONFIG_SCHEMA_KEYS[@]} ]]; do + key="${ROLL_CONFIG_SCHEMA_KEYS[$i]}" + value="${ROLL_CONFIG_SCHEMA_VALUES[$i]}" + if [[ "$key" =~ ^(PHP_|COMPOSER_|NODE_) ]] || [[ "$key" =~ ^XDEBUG ]]; then + printf " %-30s %s\n" "$key" "$value" + fi + i=$((i + 1)) + done + + echo "" + echo -e "\033[36mDatabase Configuration:\033[0m" + i=0 + while [[ $i -lt ${#ROLL_CONFIG_SCHEMA_KEYS[@]} ]]; do + key="${ROLL_CONFIG_SCHEMA_KEYS[$i]}" + value="${ROLL_CONFIG_SCHEMA_VALUES[$i]}" + if [[ "$key" =~ ^(DB_|MYSQL_|MARIADB_) ]]; then + printf " %-30s %s\n" "$key" "$value" + fi + i=$((i + 1)) + done + + echo "" + echo -e "\033[36mService Version Configuration:\033[0m" + i=0 + while [[ $i -lt ${#ROLL_CONFIG_SCHEMA_KEYS[@]} ]]; do + key="${ROLL_CONFIG_SCHEMA_KEYS[$i]}" + value="${ROLL_CONFIG_SCHEMA_VALUES[$i]}" + if [[ "$key" =~ _VERSION$ ]] && [[ ! "$key" =~ ^(PHP_|DB_|MYSQL_|MARIADB_|NODE_|XDEBUG_|COMPOSER_) ]]; then + printf " %-30s %s\n" "$key" "$value" + fi + i=$((i + 1)) + done + + echo "" + echo -e "\033[36mTraefik/Network Configuration:\033[0m" + i=0 + while [[ $i -lt ${#ROLL_CONFIG_SCHEMA_KEYS[@]} ]]; do + key="${ROLL_CONFIG_SCHEMA_KEYS[$i]}" + value="${ROLL_CONFIG_SCHEMA_VALUES[$i]}" + if [[ "$key" =~ ^TRAEFIK_ ]]; then + printf " %-30s %s\n" "$key" "$value" + fi + i=$((i + 1)) + done + ;; + + set) + if [[ ${#ROLL_PARAMS[@]} -lt 3 ]]; then + error "Usage: roll config set " + exit 1 + fi + + key="${ROLL_PARAMS[1]}" + value="${ROLL_PARAMS[2]}" + + # Validate the configuration value + initConfigSchema + if ! validateConfigValue "$key" "$value"; then + error "Invalid value for $key: $value" + exit 1 + fi + + # Find configuration file + if ROLL_ENV_PATH="$(locateEnvPath 2>/dev/null)"; then + config_file="${ROLL_ENV_PATH}/.env.roll" + else + error "Not in a Roll project directory" + exit 1 + fi + + # Create backup + cp "$config_file" "${config_file}.backup.$(date +%Y%m%d_%H%M%S)" + + # Update or add the configuration + if grep -q "^${key}=" "$config_file"; then + # Update existing key - use different approach for macOS compatibility + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s/^${key}=.*/${key}=${value}/" "$config_file" + else + sed -i "s/^${key}=.*/${key}=${value}/" "$config_file" + fi + else + # Add new key + echo "${key}=${value}" >> "$config_file" + fi + + success "Configuration updated: ${key}=${value}" + info "Backup created: ${config_file}.backup.$(date +%Y%m%d_%H%M%S)" + ;; + + get) + if [[ ${#ROLL_PARAMS[@]} -lt 2 ]]; then + error "Usage: roll config get [default]" + exit 1 + fi + + key="${ROLL_PARAMS[1]}" + default_value="${ROLL_PARAMS[2]:-}" + + # Load configuration if in project directory + if ROLL_ENV_PATH="$(locateEnvPath 2>/dev/null)"; then + loadRollConfig "${ROLL_ENV_PATH}" >/dev/null 2>&1 || { + error "Failed to load configuration" + exit 1 + } + fi + + value="$(getConfig "$key" "$default_value")" + echo "$value" + ;; + + *) + error "Unknown config command: ${ROLL_PARAMS[0]}" + echo "Available commands: show, validate, conflicts, schema, set, get" + exit 1 + ;; +esac \ No newline at end of file diff --git a/commands/config.help b/commands/config.help new file mode 100644 index 0000000..b5e938a --- /dev/null +++ b/commands/config.help @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_USAGE=$(cat < [options] + +\033[33mCommands:\033[0m + show [filter] Display current environment configuration + Optional filter to show only matching keys (regex) + + validate [file] Validate configuration file syntax and values + Uses current environment config if no file specified + + conflicts Check for configuration conflicts and compatibility issues + + schema Display the configuration schema with all available options + + set Set configuration value in current environment + Creates backup before modifying + + get [default] Get configuration value from current environment + Returns default if key is not set + +\033[33mExamples:\033[0m + roll config show # Show all configuration + roll config show ROLL_ # Show only ROLL_* variables + roll config validate # Validate current environment config + roll config conflicts # Check for conflicts + roll config schema # Show configuration schema + roll config set ROLL_REDIS 1 # Enable Redis + roll config get PHP_VERSION 8.1 # Get PHP version (default 8.1) + +\033[33mOptions:\033[0m + -h, --help Display this help menu + +\033[33mNotes:\033[0m + • Configuration files use KEY=value format + • Boolean values must be 0 or 1 + • The 'set' command creates automatic backups + • Use 'schema' to see all available configuration options +EOF +) \ No newline at end of file diff --git a/commands/usage.help b/commands/usage.help index 322536c..9892e7e 100755 --- a/commands/usage.help +++ b/commands/usage.help @@ -49,6 +49,7 @@ RollDev version $(cat ${ROLL_DIR}/version) svc Orchestrates global services such as traefik, portainer and dnsmasq via docker-compose env-init Configure environment by adding \033[31m'.env.roll'\033[0m file to the current working directory env Controls an environment from any point within the root project directory + config Manage and validate Roll configuration (see \033[31m'roll config -h'\033[0m for details) db Interacts with the db service on an environment (see \033[31m'roll db -h'\033[0m for details) redis Interacts with the redis service on an environment (see \033[31m'roll redis -h'\033[0m for details) install Initializes or updates roll configuration on host machine @@ -75,3 +76,5 @@ ${ENV_TYPE_USAGE:1} ${ROLL_DARWIN:1} EOF ) + +echo -e "${ROLL_USAGE}" diff --git a/utils/config.sh b/utils/config.sh new file mode 100644 index 0000000..a6f5d40 --- /dev/null +++ b/utils/config.sh @@ -0,0 +1,558 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +## Configuration Management System +## Compatible with Bash 3.2+ (macOS default) +## Cross-platform: Linux, macOS, WSL + +# Configuration cache using simple arrays instead of associative arrays for Bash 3.2 compatibility +ROLL_CONFIG_CACHE_KEYS=() +ROLL_CONFIG_CACHE_VALUES=() +ROLL_CONFIG_LOADED_FILES=() + +# Configuration schema using indexed arrays +ROLL_CONFIG_SCHEMA_KEYS=() +ROLL_CONFIG_SCHEMA_VALUES=() + +## Helper function to find index of key in array +function findConfigIndex() { + local key="$1" + local i=0 + for cached_key in "${ROLL_CONFIG_CACHE_KEYS[@]}"; do + if [[ "$cached_key" == "$key" ]]; then + echo $i + return 0 + fi + i=$((i + 1)) + done + echo -1 +} + +## Helper function to find schema index +function findSchemaIndex() { + local key="$1" + local i=0 + for schema_key in "${ROLL_CONFIG_SCHEMA_KEYS[@]}"; do + if [[ "$schema_key" == "$key" ]]; then + echo $i + return 0 + fi + i=$((i + 1)) + done + echo -1 +} + +## Helper function to check if file is loaded +function isFileLoaded() { + local file="$1" + local loaded_file + for loaded_file in "${ROLL_CONFIG_LOADED_FILES[@]}"; do + if [[ "$loaded_file" == "$file" ]]; then + return 0 + fi + done + return 1 +} + +# Initialize configuration schema +function initConfigSchema() { + # Skip if already initialized + if [[ ${#ROLL_CONFIG_SCHEMA_KEYS[@]} -gt 0 ]]; then + return 0 + fi + + # Core Roll configuration + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_NAME); ROLL_CONFIG_SCHEMA_VALUES+=("string:required") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_TYPE); ROLL_CONFIG_SCHEMA_VALUES+=("string:required") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_SUBT); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + + # Service toggles (boolean with defaults) + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_NGINX); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:1") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_DB); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:1") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_REDIS); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:1") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_DRAGONFLY); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_VARNISH); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ELASTICSEARCH); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_OPENSEARCH); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ELASTICVUE); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_RABBITMQ); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_MONGODB); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_BROWSERSYNC); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_SELENIUM); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_SELENIUM_DEBUG); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_TEST_DB); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ALLURE); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_MAGEPACK); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_INCLUDE_GIT); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + + # Traefik configuration + ROLL_CONFIG_SCHEMA_KEYS+=(TRAEFIK_DOMAIN); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + ROLL_CONFIG_SCHEMA_KEYS+=(TRAEFIK_SUBDOMAIN); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + ROLL_CONFIG_SCHEMA_KEYS+=(TRAEFIK_LISTEN); ROLL_CONFIG_SCHEMA_VALUES+=("string:127.0.0.1") + + # PHP configuration + ROLL_CONFIG_SCHEMA_KEYS+=(PHP_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:8.1") + ROLL_CONFIG_SCHEMA_KEYS+=(PHP_XDEBUG_3); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:1") + ROLL_CONFIG_SCHEMA_KEYS+=(PHP_MEMORY_LIMIT); ROLL_CONFIG_SCHEMA_VALUES+=("string:2G") + + # Composer configuration + ROLL_CONFIG_SCHEMA_KEYS+=(COMPOSER_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + + # Database configuration + ROLL_CONFIG_SCHEMA_KEYS+=(DB_DISTRIBUTION); ROLL_CONFIG_SCHEMA_VALUES+=("string:mariadb") + ROLL_CONFIG_SCHEMA_KEYS+=(DB_DISTRIBUTION_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:10.4") + ROLL_CONFIG_SCHEMA_KEYS+=(MYSQL_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:8.0") + ROLL_CONFIG_SCHEMA_KEYS+=(MARIADB_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:10.4") + + # Service version configurations + ROLL_CONFIG_SCHEMA_KEYS+=(ELASTICSEARCH_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:7.17") + ROLL_CONFIG_SCHEMA_KEYS+=(RABBITMQ_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:3.11") + ROLL_CONFIG_SCHEMA_KEYS+=(REDIS_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:7.0") + ROLL_CONFIG_SCHEMA_KEYS+=(DRAGONFLY_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:latest") + ROLL_CONFIG_SCHEMA_KEYS+=(VARNISH_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:7.0") + ROLL_CONFIG_SCHEMA_KEYS+=(OPENSEARCH_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:2.5") + ROLL_CONFIG_SCHEMA_KEYS+=(MONGO_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:6.0") + ROLL_CONFIG_SCHEMA_KEYS+=(NGINX_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:1.24") + ROLL_CONFIG_SCHEMA_KEYS+=(MAGEPACK_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:2.3") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_SELENIUM_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:3.141.59") + + # Node configuration + ROLL_CONFIG_SCHEMA_KEYS+=(NODE_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:18") + + # Nginx configuration + ROLL_CONFIG_SCHEMA_KEYS+=(NGINX_TEMPLATE); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + ROLL_CONFIG_SCHEMA_KEYS+=(NGINX_PUBLIC); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + + # Magento specific + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ADMIN_AUTOLOGIN); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_MAGENTO_STATIC_CACHING); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + + # Environment paths and directories + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_WEB_ROOT); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_SYNC_IGNORE); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_CHOWN_DIR_LIST); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + + # Extensions and customizations + ROLL_CONFIG_SCHEMA_KEYS+=(ADD_PHP_EXT); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + + # Container configuration + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_SHELL_CONTAINER); ROLL_CONFIG_SCHEMA_VALUES+=("string:php-fpm") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_SHELL_COMMAND); ROLL_CONFIG_SCHEMA_VALUES+=("string:bash") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_SHELL_DEBUG_CONTAINER); ROLL_CONFIG_SCHEMA_VALUES+=("string:php-debug") + + # XDebug configuration + ROLL_CONFIG_SCHEMA_KEYS+=(XDEBUG_CONNECT_BACK_HOST); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + ROLL_CONFIG_SCHEMA_KEYS+=(XDEBUG_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:debug") + + # System configuration + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_RESTART_POLICY); ROLL_CONFIG_SCHEMA_VALUES+=("string:always") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_IMAGE_REPOSITORY); ROLL_CONFIG_SCHEMA_VALUES+=("string:ghcr.io/dockergiant") +} + +## Get schema for a key +function getSchema() { + local key="$1" + local index=$(findSchemaIndex "$key") + if [[ $index -ge 0 ]]; then + echo "${ROLL_CONFIG_SCHEMA_VALUES[$index]}" + fi +} + +## Validate configuration value against schema +function validateConfigValue() { + local key="$1" + local value="$2" + local schema="$(getSchema "$key")" + + if [[ -z "$schema" ]]; then + # Unknown configuration key - allow but warn + warning "Unknown configuration key: $key" + return 0 + fi + + local type="${schema%%:*}" + local constraint="${schema##*:}" + + case "$type" in + boolean) + if [[ "$value" != "0" && "$value" != "1" ]]; then + error "Configuration $key must be 0 or 1, got: $value" + return 1 + fi + ;; + string) + if [[ "$constraint" == "required" && -z "$value" ]]; then + error "Configuration $key is required but empty" + return 1 + fi + ;; + integer) + if ! [[ "$value" =~ ^[0-9]+$ ]]; then + error "Configuration $key must be an integer, got: $value" + return 1 + fi + ;; + esac + + return 0 +} + +## Set default value for configuration if not set +function setConfigDefault() { + local key="$1" + local schema="$(getSchema "$key")" + + if [[ -z "$schema" ]]; then + return 0 + fi + + local constraint="${schema##*:}" + + # Skip if already set or no default available + local index=$(findConfigIndex "$key") + if [[ $index -ge 0 || "$constraint" == "required" || "$constraint" == "optional" ]]; then + return 0 + fi + + # Set default value + ROLL_CONFIG_CACHE_KEYS+=("$key") + ROLL_CONFIG_CACHE_VALUES+=("$constraint") + export "$key"="$constraint" +} + +## Load configuration from file with validation +function loadConfigFromFile() { + local config_file="$1" + local validate_only="${2:-false}" + + if [[ ! -f "$config_file" ]]; then + error "Configuration file not found: $config_file" + return 1 + fi + + # Check if already loaded + if isFileLoaded "$config_file" && [[ "$validate_only" == "false" ]]; then + return 0 + fi + + local line_num=0 + local errors=0 + + while IFS= read -r line || [[ -n "$line" ]]; do + line_num=$((line_num + 1)) + + # Skip empty lines and comments + [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue + + # Remove Windows line endings + line="${line%$'\r'}" + + # Parse key=value pairs + if [[ "$line" =~ ^[[:space:]]*([A-Z_][A-Z0-9_]*)=(.*)$ ]]; then + local key="${BASH_REMATCH[1]}" + local value="${BASH_REMATCH[2]}" + + # Remove quotes if present + if [[ "$value" =~ ^\"(.*)\"$ ]] || [[ "$value" =~ ^\'(.*)\'$ ]]; then + value="${BASH_REMATCH[1]}" + fi + + # Validate configuration + if ! validateConfigValue "$key" "$value"; then + error "Invalid configuration at $config_file:$line_num" + errors=$((errors + 1)) + continue + fi + + # Store in cache and export if not validation-only + if [[ "$validate_only" == "false" ]]; then + local index=$(findConfigIndex "$key") + if [[ $index -ge 0 ]]; then + # Update existing + ROLL_CONFIG_CACHE_VALUES[$index]="$value" + else + # Add new + ROLL_CONFIG_CACHE_KEYS+=("$key") + ROLL_CONFIG_CACHE_VALUES+=("$value") + fi + export "$key"="$value" + fi + + elif [[ "$line" =~ ^[[:space:]]*[^=]+$ ]]; then + warning "Invalid configuration line at $config_file:$line_num: $line" + fi + + done < "$config_file" + + if [[ $errors -gt 0 ]]; then + return 1 + fi + + # Mark as loaded + if [[ "$validate_only" == "false" ]]; then + ROLL_CONFIG_LOADED_FILES+=("$config_file") + fi + + return 0 +} + +## Load Roll environment configuration +function loadRollConfig() { + local config_path="$1" + + if [[ -z "$config_path" ]]; then + config_path="$(locateEnvPath 2>/dev/null)" || { + error "Could not locate environment configuration" + return 1 + } + fi + + local config_file="$config_path/.env.roll" + + # Initialize schema if not done + initConfigSchema + + # Load configuration from file + if ! loadConfigFromFile "$config_file"; then + return 1 + fi + + # Set OS-specific defaults + case "${OSTYPE:-undefined}" in + darwin*) + setConfigValue "ROLL_ENV_SUBT" "darwin" + ;; + linux*) + setConfigValue "ROLL_ENV_SUBT" "linux" + + # Check for WSL + if grep -sqi microsoft /proc/sys/kernel/osrelease 2>/dev/null; then + setConfigValue "ROLL_ENV_SUBT" "wsl" + fi + ;; + *) + error "Unsupported OSTYPE '${OSTYPE:-undefined}'" + return 1 + ;; + esac + + # Set system-specific exports + export USER_ID="$(id -u)" + export GROUP_ID="$(id -g)" + + # Set defaults for unset values + local i=0 + while [[ $i -lt ${#ROLL_CONFIG_SCHEMA_KEYS[@]} ]]; do + setConfigDefault "${ROLL_CONFIG_SCHEMA_KEYS[$i]}" + i=$((i + 1)) + done + + # Validate environment type + if ! assertValidEnvType; then + return 1 + fi + + # Post-processing for specific configurations + postProcessConfig + + return 0 +} + +## Set configuration value +function setConfigValue() { + local key="$1" + local value="$2" + + local index=$(findConfigIndex "$key") + if [[ $index -ge 0 ]]; then + # Update existing + ROLL_CONFIG_CACHE_VALUES[$index]="$value" + else + # Add new + ROLL_CONFIG_CACHE_KEYS+=("$key") + ROLL_CONFIG_CACHE_VALUES+=("$value") + fi + export "$key"="$value" +} + +## Post-process configuration after loading +function postProcessConfig() { + # Set PHP variant based on environment type + if [[ "${ROLL_ENV_TYPE}" =~ ^magento ]] || [[ "${ROLL_ENV_TYPE}" =~ ^wordpress ]]; then + export ROLL_SVC_PHP_VARIANT="-${ROLL_ENV_TYPE}" + fi + + # Set Node.js variant + if [[ "${NODE_VERSION}" != "0" ]]; then + export ROLL_SVC_PHP_NODE="-node${NODE_VERSION}" + fi + + # Database distribution defaults + if [[ -z "${DB_DISTRIBUTION_VERSION}" ]]; then + if [[ "${DB_DISTRIBUTION}" == "mysql" ]]; then + export DB_DISTRIBUTION_VERSION="${MYSQL_VERSION:-8.0}" + else + export DB_DISTRIBUTION_VERSION="${MARIADB_VERSION:-10.4}" + fi + fi + + # XDebug version configuration + if [[ "${PHP_XDEBUG_3}" == "1" ]]; then + export XDEBUG_VERSION="xdebug3" + else + export XDEBUG_VERSION="debug" + fi + + # WSL XDebug host configuration + if [[ "${ROLL_ENV_SUBT}" == "wsl" && -z "${XDEBUG_CONNECT_BACK_HOST}" ]]; then + export XDEBUG_CONNECT_BACK_HOST="host.docker.internal" + fi + + # Linux SSH auth sock path + if [[ "${ROLL_ENV_SUBT}" == "linux" && "$(id -u)" == "1000" ]]; then + export SSH_AUTH_SOCK_PATH_ENV="/run/host-services/ssh-auth.sock" + fi + + # Environment-specific defaults + if [[ "${ROLL_ENV_TYPE}" != "local" ]]; then + export ROLL_NGINX="${ROLL_NGINX:-1}" + export ROLL_DB="${ROLL_DB:-1}" + export ROLL_REDIS="${ROLL_REDIS:-1}" + + # Bash history and SSH directories + export CHOWN_DIR_LIST="/bash_history /home/www-data/.ssh ${ROLL_CHOWN_DIR_LIST:-}" + fi + + # Magento 1 specific configuration + if [[ "${ROLL_ENV_TYPE}" == "magento1" ]]; then + if [[ -f "${ROLL_ENV_PATH}/.modman/.basedir" ]]; then + export NGINX_PUBLIC="/$(cat "${ROLL_ENV_PATH}/.modman/.basedir")" + fi + + if [[ "${ROLL_MAGENTO_STATIC_CACHING}" == "1" ]]; then + export NGINX_TEMPLATE="${NGINX_TEMPLATE:-magento1.conf}" + else + export NGINX_TEMPLATE="${NGINX_TEMPLATE:-magento1-dev.conf}" + fi + fi + + # Magento 2 specific configuration + if [[ "${ROLL_ENV_TYPE}" == "magento2" ]]; then + export ROLL_VARNISH="${ROLL_VARNISH:-1}" + export ROLL_ELASTICSEARCH="${ROLL_ELASTICSEARCH:-1}" + export ROLL_RABBITMQ="${ROLL_RABBITMQ:-1}" + + if [[ "${ROLL_MAGENTO_STATIC_CACHING}" == "1" ]]; then + if [[ "${ROLL_ADMIN_AUTOLOGIN}" == "1" ]]; then + export NGINX_TEMPLATE="${NGINX_TEMPLATE:-magento2-autologin.conf}" + else + export NGINX_TEMPLATE="${NGINX_TEMPLATE:-magento2.conf}" + fi + else + if [[ "${ROLL_ADMIN_AUTOLOGIN}" == "1" ]]; then + export NGINX_TEMPLATE="${NGINX_TEMPLATE:-magento2-dev-autologin.conf}" + else + export NGINX_TEMPLATE="${NGINX_TEMPLATE:-magento2-dev.conf}" + fi + fi + fi +} + +## Validate configuration file without loading +function validateConfig() { + local config_file="$1" + + if [[ -z "$config_file" ]]; then + config_file="$(locateEnvPath)/.env.roll" + fi + + # Initialize schema if not done + initConfigSchema + + loadConfigFromFile "$config_file" "true" +} + +## Get configuration value +function getConfig() { + local key="$1" + local default_value="$2" + + local index=$(findConfigIndex "$key") + if [[ $index -ge 0 ]]; then + echo "${ROLL_CONFIG_CACHE_VALUES[$index]}" + elif [[ -n "${!key}" ]]; then + echo "${!key}" + else + echo "${default_value}" + fi +} + +## Set configuration value +function setConfig() { + local key="$1" + local value="$2" + + if validateConfigValue "$key" "$value"; then + setConfigValue "$key" "$value" + return 0 + else + return 1 + fi +} + +## Display configuration summary +function showConfig() { + local filter="${1:-}" + + echo -e "\033[33mRoll Configuration:\033[0m" + echo "Environment: ${ROLL_ENV_NAME:-} (${ROLL_ENV_TYPE:-})" + echo "Platform: ${ROLL_ENV_SUBT:-}" + echo "" + + local i=0 + while [[ $i -lt ${#ROLL_CONFIG_CACHE_KEYS[@]} ]]; do + local key="${ROLL_CONFIG_CACHE_KEYS[$i]}" + local value="${ROLL_CONFIG_CACHE_VALUES[$i]}" + + if [[ -n "$filter" && ! "$key" =~ $filter ]]; then + i=$((i + 1)) + continue + fi + + printf " %-30s = %s\n" "$key" "$value" + i=$((i + 1)) + done +} + +## Check for configuration conflicts +function checkConfigConflicts() { + local errors=0 + + # Redis vs Dragonfly conflict + if [[ "$(getConfig ROLL_REDIS 0)" == "1" && "$(getConfig ROLL_DRAGONFLY 0)" == "1" ]]; then + error "Configuration conflict: ROLL_REDIS and ROLL_DRAGONFLY cannot both be enabled" + errors=$((errors + 1)) + fi + + # Environment type specific validations + if [[ "${ROLL_ENV_TYPE}" == "magento2" ]]; then + if [[ "$(getConfig ROLL_ELASTICSEARCH 0)" == "1" && "$(getConfig ROLL_OPENSEARCH 0)" == "1" ]]; then + warning "Both Elasticsearch and OpenSearch are enabled - this may cause conflicts" + fi + fi + + # Database distribution validation + local db_dist="$(getConfig DB_DISTRIBUTION mariadb)" + if [[ "$db_dist" != "mysql" && "$db_dist" != "mariadb" ]]; then + error "DB_DISTRIBUTION must be either 'mysql' or 'mariadb', got: $db_dist" + errors=$((errors + 1)) + fi + + return $errors +} + +## Legacy compatibility wrapper - replace old loadEnvConfig calls +function loadEnvConfig() { + local env_path="$1" + loadRollConfig "$env_path" +} \ No newline at end of file diff --git a/utils/env.sh b/utils/env.sh index 88b4895..0d54070 100644 --- a/utils/env.sh +++ b/utils/env.sh @@ -31,39 +31,19 @@ function locateEnvPath () { echo "${ROLL_ENV_PATH}" } +# Legacy function - now uses centralized config system function loadEnvConfig () { local ROLL_ENV_PATH="${1}" - eval "$(cat "${ROLL_ENV_PATH}/.env.roll" | sed 's/\r$//g' | grep "^ROLL_")" - eval "$(cat "${ROLL_ENV_PATH}/.env.roll" | sed 's/\r$//g' | grep "^TRAEFIK_")" - eval "$(cat "${ROLL_ENV_PATH}/.env.roll" | sed 's/\r$//g' | grep "^PHP_")" - eval "$(cat "${ROLL_ENV_PATH}/.env.roll" | sed 's/\r$//g' | grep "^NGINX_")" - eval "$(cat "${ROLL_ENV_PATH}/.env.roll" | sed 's/\r$//g' | grep "_VERSION")" - eval "$(cat "${ROLL_ENV_PATH}/.env.roll" | sed 's/\r$//g' | grep "^DB_")" - eval "$(cat "${ROLL_ENV_PATH}/.env.roll" | sed 's/\r$//g' | grep "ADD_PHP_EXT")" - - - ROLL_ENV_NAME="${ROLL_ENV_NAME:-}" - ROLL_ENV_TYPE="${ROLL_ENV_TYPE:-}" - ROLL_ENV_SUBT="" - - case "${OSTYPE:-undefined}" in - darwin*) - ROLL_ENV_SUBT=darwin - ;; - linux*) - ROLL_ENV_SUBT=linux - ;; - *) - fatal "Unsupported OSTYPE '${OSTYPE:-undefined}'" - ;; - esac - - export USER_ID=$(id -u $USER) - export GROUP_ID=$(id -g $USER) - export OSTYPE=$OSTYPE - export ADD_PHP_EXT=$ADD_PHP_EXT - - assertValidEnvType + + # Load new centralized configuration + if ! loadRollConfig "${ROLL_ENV_PATH}"; then + return 1 + fi + + # Set global environment path for backward compatibility + export ROLL_ENV_PATH="${ROLL_ENV_PATH}" + + return 0 } function renderEnvNetworkName() { From fa758df887c4c879b499e5eb0c94e262c0ad86e6 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 09:54:18 +0200 Subject: [PATCH 03/69] Added help files of version and install commands --- commands/install.help | 50 +++++++++++++++++++++++++++++++++++++++++++ commands/version.help | 38 ++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) create mode 100644 commands/install.help create mode 100644 commands/version.help diff --git a/commands/install.help b/commands/install.help new file mode 100644 index 0000000..8800f12 --- /dev/null +++ b/commands/install.help @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_USAGE=$(cat <&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_USAGE=$(cat < Date: Wed, 4 Jun 2025 09:55:02 +0200 Subject: [PATCH 04/69] Added new system for better command registry and auto discovery on many places --- bin/roll | 39 +--- commands/registry.cmd | 202 ++++++++++++++++++++ commands/registry.help | 53 ++++++ utils/core.sh | 5 +- utils/registry.sh | 415 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 682 insertions(+), 32 deletions(-) create mode 100644 commands/registry.cmd create mode 100644 commands/registry.help create mode 100644 utils/registry.sh diff --git a/bin/roll b/bin/roll index 2f21eef..ad6162c 100755 --- a/bin/roll +++ b/bin/roll @@ -14,6 +14,7 @@ readonly ROLL_DIR="$( )" source "${ROLL_DIR}/utils/core.sh" source "${ROLL_DIR}/utils/config.sh" +source "${ROLL_DIR}/utils/registry.sh" source "${ROLL_DIR}/utils/env.sh" ## verify docker is installed @@ -47,37 +48,15 @@ if (( "$#" )); then ROLL_ENV_PATH="$(locateEnvPath 2>/dev/null)" || true ROLL_ENV_TYPE="$(renderEnvType 2>/dev/null)" || true - if [[ -f "${ROLL_HOME_DIR}/reclu/${1}.cmd" ]]; then + # Use registry system for command discovery + COMMAND_RESULT="$(findCommand "$1")" + + if [[ "$COMMAND_RESULT" =~ ^found: ]]; then ROLL_CMD_VERB="$1" - ROLL_CMD_EXEC="${ROLL_HOME_DIR}/reclu/${1}.cmd" - ROLL_CMD_HELP="${ROLL_HOME_DIR}/reclu/${1}.help" - shift - elif [[ -f "${ROLL_HOME_DIR}/reclu/${ROLL_ENV_TYPE}/${1}.cmd" ]]; then - ROLL_CMD_VERB="$1" - ROLL_CMD_EXEC="${ROLL_HOME_DIR}/reclu/${ROLL_ENV_TYPE}/${1}.cmd" - ROLL_CMD_HELP="${ROLL_HOME_DIR}/reclu/${ROLL_ENV_TYPE}/${1}.help" - shift - elif [[ -f "${ROLL_DIR}/commands/${1}.cmd" ]]; then - ROLL_CMD_VERB="$1" - ROLL_CMD_EXEC="${ROLL_DIR}/commands/${1}.cmd" - ROLL_CMD_HELP="${ROLL_DIR}/commands/${1}.help" - shift - elif [[ -f "${ROLL_DIR}/commands/${ROLL_ENV_TYPE}/${1}.cmd" ]]; then - ROLL_CMD_VERB="$1" - ROLL_CMD_EXEC="${ROLL_DIR}/commands/${ROLL_ENV_TYPE}/${1}.cmd" - ROLL_CMD_HELP="${ROLL_DIR}/commands/${ROLL_ENV_TYPE}/${1}.help" - shift - elif [[ -f "${ROLL_ENV_PATH}/.roll/commands/${1}.cmd" ]]; then - ROLL_CMD_VERB="$1" - ROLL_CMD_ANYARGS+=("$1") - ROLL_CMD_EXEC="${ROLL_ENV_PATH}/.roll/commands/${1}.cmd" - ROLL_CMD_HELP="${ROLL_ENV_PATH}/.roll/commands/${1}.help" - shift - elif [[ -f "${ROLL_HOME_DIR}/commands/${1}.cmd" ]]; then - ROLL_CMD_VERB="$1" - ROLL_CMD_ANYARGS+=("$1") - ROLL_CMD_EXEC="${ROLL_HOME_DIR}/commands/${1}.cmd" - ROLL_CMD_HELP="${ROLL_HOME_DIR}/commands/${1}.help" + # Extract cmd_path and help_path from "found:cmd_path:help_path" + COMMAND_RESULT="${COMMAND_RESULT#found:}" + ROLL_CMD_EXEC="${COMMAND_RESULT%%:*}" + ROLL_CMD_HELP="${COMMAND_RESULT#*:}" shift else ROLL_HELP=1 diff --git a/commands/registry.cmd b/commands/registry.cmd new file mode 100644 index 0000000..903e7e7 --- /dev/null +++ b/commands/registry.cmd @@ -0,0 +1,202 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +if (( ${#ROLL_PARAMS[@]} == 0 )) || [[ "${ROLL_PARAMS[0]}" == "help" ]]; then + roll registry --help || exit $? && exit $? +fi + +## Sub-command execution +case "${ROLL_PARAMS[0]}" in + list) + # List all available commands + filter="${ROLL_PARAMS[1]:-}" + category="${ROLL_PARAMS[2]:-}" + + initializeRegistry + + if [[ -n "$category" ]]; then + echo -e "\033[33mCommands in '${category}' category:\033[0m" + listRegisteredCommands "$filter" "$category" + elif [[ -n "$filter" ]]; then + echo -e "\033[33mCommands matching '${filter}':\033[0m" + listRegisteredCommands "$filter" + else + echo -e "\033[33mAll registered commands:\033[0m" + listRegisteredCommands + fi + ;; + + categories) + # List commands organized by category + category="${ROLL_PARAMS[1]:-}" + + initializeRegistry + + if [[ -n "$category" ]]; then + echo -e "\033[33m${category^} Commands:\033[0m" + listCommandsByCategory "$category" + else + listCommandsByCategory + fi + ;; + + info) + # Show detailed information about a specific command + if [[ ${#ROLL_PARAMS[@]} -lt 2 ]]; then + error "Usage: roll registry info " + exit 1 + fi + + command="${ROLL_PARAMS[1]}" + + initializeRegistry + + if ! isCommandRegistered "$command"; then + error "Command '$command' not found in registry" + exit 1 + fi + + echo -e "\033[33mCommand Information: $command\033[0m" + echo " Path: $(getCommandInfo "$command" "path")" + echo " Help File: $(getCommandInfo "$command" "help")" + echo " Category: $(getCommandInfo "$command" "category")" + echo " Priority: $(getCommandInfo "$command" "priority")" + echo " Description: $(getCommandInfo "$command" "description")" + ;; + + search) + # Search for commands by name or description + if [[ ${#ROLL_PARAMS[@]} -lt 2 ]]; then + error "Usage: roll registry search " + exit 1 + fi + + pattern="${ROLL_PARAMS[1]}" + + initializeRegistry + + echo -e "\033[33mSearching for commands matching: '$pattern'\033[0m" + echo "" + + found=0 + i=0 + while [[ $i -lt ${#ROLL_REGISTRY_COMMANDS[@]} ]]; do + command="${ROLL_REGISTRY_COMMANDS[$i]}" + description="${ROLL_REGISTRY_DESCRIPTIONS[$i]}" + category="${ROLL_REGISTRY_CATEGORIES[$i]}" + + if [[ "$command" =~ $pattern ]] || [[ "$description" =~ $pattern ]]; then + printf " %-20s %-10s %s\n" "$command" "[$category]" "$description" + found=1 + fi + i=$((i + 1)) + done + + if [[ $found -eq 0 ]]; then + info "No commands found matching '$pattern'" + fi + ;; + + stats) + # Display registry statistics + showRegistryStats + ;; + + refresh) + # Refresh the command registry + info "Refreshing command registry..." + refreshRegistry + success "Command registry refreshed" + showRegistryStats + ;; + + export) + # Export command list in various formats + format="${ROLL_PARAMS[1]:-simple}" + + case "$format" in + json|csv|simple) + exportCommands "$format" + ;; + *) + error "Unsupported export format: $format" + echo "Supported formats: simple, json, csv" + exit 1 + ;; + esac + ;; + + validate) + # Validate registry integrity + initializeRegistry + + info "Validating command registry integrity..." + + errors=0 + i=0 + while [[ $i -lt ${#ROLL_REGISTRY_COMMANDS[@]} ]]; do + command="${ROLL_REGISTRY_COMMANDS[$i]}" + cmd_path="${ROLL_REGISTRY_PATHS[$i]}" + help_path="${ROLL_REGISTRY_HELP_PATHS[$i]}" + + # Check if command file exists + if [[ ! -f "$cmd_path" ]]; then + error "Command file missing: $cmd_path (for command: $command)" + errors=$((errors + 1)) + fi + + # Check if help file exists (warning only) + if [[ ! -f "$help_path" ]]; then + warning "Help file missing: $help_path (for command: $command)" + fi + + i=$((i + 1)) + done + + if [[ $errors -eq 0 ]]; then + success "Registry validation passed" + else + error "Registry validation failed with $errors errors" + exit 1 + fi + ;; + + paths) + # Show command search paths and their priorities + echo -e "\033[33mCommand Search Paths (by priority):\033[0m" + echo "" + + echo -e "\033[36mGlobal Command Paths:\033[0m" + for search_path in "${ROLL_COMMAND_SEARCH_PATHS[@]}"; do + priority="${search_path%%:*}" + directory="${search_path##*:}" + status="❌" + [[ -d "$directory" ]] && status="✅" + + printf " %s Priority %s: %s\n" "$status" "$priority" "$directory" + done + + if [[ -n "${ROLL_ENV_TYPE}" ]]; then + echo "" + echo -e "\033[36mEnvironment-Specific Paths (${ROLL_ENV_TYPE}):\033[0m" + while IFS= read -r env_path; do + [[ -z "$env_path" ]] && continue + priority="${env_path%%:*}" + directory="${env_path##*:}" + status="❌" + [[ -d "$directory" ]] && status="✅" + + printf " %s Priority %s: %s\n" "$status" "$priority" "$directory" + done < <(getEnvCommandPaths) + else + echo "" + info "No environment loaded - environment-specific paths not shown" + fi + ;; + + *) + error "Unknown registry command: ${ROLL_PARAMS[0]}" + echo "Available commands: list, categories, info, search, stats, refresh, export, validate, paths" + exit 1 + ;; +esac \ No newline at end of file diff --git a/commands/registry.help b/commands/registry.help new file mode 100644 index 0000000..c3f7702 --- /dev/null +++ b/commands/registry.help @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +echo -e "\033[33mUsage:\033[0m" +echo " registry [options]" +echo "" +echo -e "\033[33mCommands:\033[0m" +echo " list [filter] [category] List all registered commands" +echo " Optional filter to match command names (regex)" +echo " Optional category to filter by category" +echo "" +echo " categories [category] List commands organized by category" +echo " Show specific category if provided" +echo "" +echo " info Show detailed information about a specific command" +echo " Including path, help file, category, and priority" +echo "" +echo " search Search for commands by name or description" +echo " Pattern can be regex for flexible matching" +echo "" +echo " stats Display registry statistics and category counts" +echo "" +echo " refresh Refresh the command registry by rescanning directories" +echo " Useful after adding new commands" +echo "" +echo " export [format] Export command list in specified format" +echo " Formats: simple (default), json, csv" +echo "" +echo " validate Validate registry integrity" +echo " Check if command and help files exist" +echo "" +echo " paths Show command search paths and their priorities" +echo " Displays which directories are scanned" +echo "" +echo -e "\033[33mExamples:\033[0m" +echo " roll registry list # List all commands" +echo " roll registry list config # List commands matching 'config'" +echo " roll registry list \"\" magento2 # List commands in magento2 category" +echo " roll registry categories # Show all categories with commands" +echo " roll registry info config # Show details about config command" +echo " roll registry search database # Search for database-related commands" +echo " roll registry export json # Export commands as JSON" +echo " roll registry validate # Check registry integrity" +echo "" +echo -e "\033[33mOptions:\033[0m" +echo " -h, --help Display this help menu" +echo "" +echo -e "\033[33mNotes:\033[0m" +echo " - Commands are discovered from multiple directories with priorities" +echo " - Lower priority numbers have higher precedence" +echo " - Environment-specific commands override global commands" +echo " - The registry caches command information for performance" +echo " - Use 'refresh' if you add new commands and they don't appear" \ No newline at end of file diff --git a/utils/core.sh b/utils/core.sh index 410fca0..fb31a62 100644 --- a/utils/core.sh +++ b/utils/core.sh @@ -121,6 +121,7 @@ function disconnectPeeredServices { done } -function isOnline { - ping -q -c1 google.com &>/dev/null && echo "true" || echo "false" +# Main logic with the timeout function +function isOnline() { + (ping -q -c1 -t 2 8.8.8.8 &>/dev/null && echo "true") || (ping -q -c1 -t 2 1.1.1.1 &>/dev/null && echo "true") || echo "false" } \ No newline at end of file diff --git a/utils/registry.sh b/utils/registry.sh new file mode 100644 index 0000000..96b56d5 --- /dev/null +++ b/utils/registry.sh @@ -0,0 +1,415 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +## Command Registry System +## Discovers and manages commands across all Roll directories +## Compatible with Bash 3.2+ (macOS default) + +# Command registry cache using indexed arrays for Bash 3.2 compatibility +ROLL_REGISTRY_COMMANDS=() +ROLL_REGISTRY_PATHS=() +ROLL_REGISTRY_HELP_PATHS=() +ROLL_REGISTRY_CATEGORIES=() +ROLL_REGISTRY_DESCRIPTIONS=() +ROLL_REGISTRY_PRIORITIES=() +ROLL_REGISTRY_INITIALIZED=0 + +# Command search paths with priorities (lower number = higher priority) +ROLL_COMMAND_SEARCH_PATHS=( + "2:${ROLL_HOME_DIR}/commands" + "3:${ROLL_HOME_DIR}/reclu" + "4:${ROLL_DIR}/commands" +) + +# Environment-specific command paths (added dynamically if env is available) +function getEnvCommandPaths() { + local env_paths=() + + # Add project-local commands if ROLL_ENV_PATH is available + if [[ -n "${ROLL_ENV_PATH}" && -d "${ROLL_ENV_PATH}/.roll/commands" ]]; then + env_paths+=("1:${ROLL_ENV_PATH}/.roll/commands") + fi + + # Add environment-specific commands if ROLL_ENV_TYPE is available + if [[ -n "${ROLL_ENV_TYPE}" ]]; then + [[ -d "${ROLL_HOME_DIR}/reclu/${ROLL_ENV_TYPE}" ]] && env_paths+=("1:${ROLL_HOME_DIR}/reclu/${ROLL_ENV_TYPE}") + [[ -d "${ROLL_DIR}/commands/${ROLL_ENV_TYPE}" ]] && env_paths+=("2:${ROLL_DIR}/commands/${ROLL_ENV_TYPE}") + fi + + printf '%s\n' "${env_paths[@]}" +} + +## Helper function to find command index in registry +function findCommandIndex() { + local command="$1" + local i=0 + for registered_command in "${ROLL_REGISTRY_COMMANDS[@]}"; do + if [[ "$registered_command" == "$command" ]]; then + echo $i + return 0 + fi + i=$((i + 1)) + done + echo -1 +} + +## Extract command metadata from help file +function extractCommandMetadata() { + local help_file="$1" + local metadata_type="$2" + + if [[ ! -f "$help_file" ]]; then + echo "" + return 0 + fi + + case "$metadata_type" in + description) + # Simple approach - just return empty for now + echo "" + ;; + category) + # Simple approach - just return general for now + echo "general" + ;; + *) + echo "" + ;; + esac + + return 0 +} + +## Register a single command in the registry +function registerCommand() { + local command="$1" + local cmd_path="$2" + local help_path="$3" + local priority="$4" + local category="${5:-general}" + + local existing_index=$(findCommandIndex "$command") + + if [[ $existing_index -ge 0 ]]; then + # Command already exists, check priority + local existing_priority="${ROLL_REGISTRY_PRIORITIES[$existing_index]}" + if [[ $priority -lt $existing_priority ]]; then + # New command has higher priority, replace it + ROLL_REGISTRY_PATHS[$existing_index]="$cmd_path" + ROLL_REGISTRY_HELP_PATHS[$existing_index]="$help_path" + ROLL_REGISTRY_PRIORITIES[$existing_index]="$priority" + ROLL_REGISTRY_CATEGORIES[$existing_index]="$category" + ROLL_REGISTRY_DESCRIPTIONS[$existing_index]="$(extractCommandMetadata "$help_path" "description")" + fi + else + # New command, add to registry + ROLL_REGISTRY_COMMANDS+=("$command") + ROLL_REGISTRY_PATHS+=("$cmd_path") + ROLL_REGISTRY_HELP_PATHS+=("$help_path") + ROLL_REGISTRY_PRIORITIES+=("$priority") + ROLL_REGISTRY_CATEGORIES+=("$category") + ROLL_REGISTRY_DESCRIPTIONS+=("$(extractCommandMetadata "$help_path" "description")") + fi +} + +## Scan a directory for commands +function scanCommandDirectory() { + local search_entry="$1" + local priority="${search_entry%%:*}" + local directory="${search_entry##*:}" + local category="${2:-general}" + + # Skip if directory doesn't exist + if [[ ! -d "$directory" ]]; then + return 0 + fi + + local cmd_file help_file command_name + + # Find all .cmd files in directory + for cmd_file in "$directory"/*.cmd; do + # Skip if no .cmd files found (glob didn't match) + [[ ! -f "$cmd_file" ]] && continue + + command_name="$(basename "$cmd_file" .cmd)" + help_file="$directory/$command_name.help" + + # Extract category from help file if available + if [[ -f "$help_file" ]]; then + local extracted_category="$(extractCommandMetadata "$help_file" "category")" + [[ -n "$extracted_category" && "$extracted_category" != "general" ]] && category="$extracted_category" + fi + + registerCommand "$command_name" "$cmd_file" "$help_file" "$priority" "$category" + done +} + +## Initialize command registry by scanning all directories +function initializeRegistry() { + # Skip if already initialized + if [[ $ROLL_REGISTRY_INITIALIZED -eq 1 ]]; then + return 0 + fi + + # Clear existing registry + ROLL_REGISTRY_COMMANDS=() + ROLL_REGISTRY_PATHS=() + ROLL_REGISTRY_HELP_PATHS=() + ROLL_REGISTRY_CATEGORIES=() + ROLL_REGISTRY_DESCRIPTIONS=() + ROLL_REGISTRY_PRIORITIES=() + + # Scan environment-specific directories first (highest priority) + while IFS= read -r env_path; do + [[ -n "$env_path" ]] && scanCommandDirectory "$env_path" "environment" + done < <(getEnvCommandPaths) + + # Scan global command directories + local search_path + for search_path in "${ROLL_COMMAND_SEARCH_PATHS[@]}"; do + scanCommandDirectory "$search_path" "global" + done + + ROLL_REGISTRY_INITIALIZED=1 +} + +## Get command information from registry +function getCommandInfo() { + local command="$1" + local info_type="$2" + + local index=$(findCommandIndex "$command") + [[ $index -eq -1 ]] && return 1 + + case "$info_type" in + path) + echo "${ROLL_REGISTRY_PATHS[$index]}" + ;; + help) + echo "${ROLL_REGISTRY_HELP_PATHS[$index]}" + ;; + category) + echo "${ROLL_REGISTRY_CATEGORIES[$index]}" + ;; + description) + echo "${ROLL_REGISTRY_DESCRIPTIONS[$index]}" + ;; + priority) + echo "${ROLL_REGISTRY_PRIORITIES[$index]}" + ;; + *) + return 1 + ;; + esac +} + +## Check if command exists in registry +function isCommandRegistered() { + local command="$1" + local index=$(findCommandIndex "$command") + [[ $index -ge 0 ]] +} + +## List all registered commands +function listRegisteredCommands() { + local filter="${1:-}" + local category_filter="${2:-}" + + local i=0 + while [[ $i -lt ${#ROLL_REGISTRY_COMMANDS[@]} ]]; do + local command="${ROLL_REGISTRY_COMMANDS[$i]}" + local category="${ROLL_REGISTRY_CATEGORIES[$i]}" + + # Apply filters + if [[ -n "$filter" && ! "$command" =~ $filter ]]; then + i=$((i + 1)) + continue + fi + + if [[ -n "$category_filter" && "$category" != "$category_filter" ]]; then + i=$((i + 1)) + continue + fi + + echo "$command" + i=$((i + 1)) + done +} + +## List commands by category +function listCommandsByCategory() { + local target_category="${1:-}" + + # Get unique categories if no specific category requested + if [[ -z "$target_category" ]]; then + local categories=() + local i=0 + while [[ $i -lt ${#ROLL_REGISTRY_CATEGORIES[@]} ]]; do + local category="${ROLL_REGISTRY_CATEGORIES[$i]}" + local found=0 + local existing_category + for existing_category in "${categories[@]}"; do + if [[ "$existing_category" == "$category" ]]; then + found=1 + break + fi + done + [[ $found -eq 0 ]] && categories+=("$category") + i=$((i + 1)) + done + + # Display all categories + for category in "${categories[@]}"; do + echo -e "\033[33m${category^} Commands:\033[0m" + listCommandsByCategory "$category" + echo "" + done + return 0 + fi + + # List commands in specific category + local i=0 + while [[ $i -lt ${#ROLL_REGISTRY_COMMANDS[@]} ]]; do + local command="${ROLL_REGISTRY_COMMANDS[$i]}" + local category="${ROLL_REGISTRY_CATEGORIES[$i]}" + local description="${ROLL_REGISTRY_DESCRIPTIONS[$i]}" + + if [[ "$category" == "$target_category" ]]; then + printf " %-20s %s\n" "$command" "$description" + fi + i=$((i + 1)) + done +} + +## Find command and return its execution details +function findCommand() { + local command="$1" + + # Initialize registry if needed + initializeRegistry + + # Check registry first + if isCommandRegistered "$command"; then + local cmd_path="$(getCommandInfo "$command" "path")" + local help_path="$(getCommandInfo "$command" "help")" + + # Return in format: "found:cmd_path:help_path" + echo "found:$cmd_path:$help_path" + return 0 + fi + + # Command not found + echo "notfound" + return 0 +} + +## Refresh registry (useful after adding new commands) +function refreshRegistry() { + ROLL_REGISTRY_INITIALIZED=0 + initializeRegistry +} + +## Display registry statistics +function showRegistryStats() { + initializeRegistry + + echo -e "\033[33mCommand Registry Statistics:\033[0m" + echo " Total commands: ${#ROLL_REGISTRY_COMMANDS[@]}" + + # Count by category + local categories=() + local category_counts=() + local i=0 + + while [[ $i -lt ${#ROLL_REGISTRY_CATEGORIES[@]} ]]; do + local category="${ROLL_REGISTRY_CATEGORIES[$i]}" + local found_index=-1 + local j=0 + + # Find existing category + for existing_category in "${categories[@]}"; do + if [[ "$existing_category" == "$category" ]]; then + found_index=$j + break + fi + j=$((j + 1)) + done + + if [[ $found_index -ge 0 ]]; then + # Increment existing category count + category_counts[$found_index]=$((${category_counts[$found_index]} + 1)) + else + # Add new category + categories+=("$category") + category_counts+=(1) + fi + + i=$((i + 1)) + done + + # Display category counts + i=0 + while [[ $i -lt ${#categories[@]} ]]; do + printf " %-15s: %d commands\n" "${categories[$i]^}" "${category_counts[$i]}" + i=$((i + 1)) + done +} + +## Export command list for external tools +function exportCommands() { + local format="${1:-simple}" + + initializeRegistry + + case "$format" in + json) + echo "[" + local i=0 + while [[ $i -lt ${#ROLL_REGISTRY_COMMANDS[@]} ]]; do + local command="${ROLL_REGISTRY_COMMANDS[$i]}" + local path="${ROLL_REGISTRY_PATHS[$i]}" + local help_path="${ROLL_REGISTRY_HELP_PATHS[$i]}" + local category="${ROLL_REGISTRY_CATEGORIES[$i]}" + local description="${ROLL_REGISTRY_DESCRIPTIONS[$i]}" + local priority="${ROLL_REGISTRY_PRIORITIES[$i]}" + + echo " {" + echo " \"command\": \"$command\"," + echo " \"path\": \"$path\"," + echo " \"help_path\": \"$help_path\"," + echo " \"category\": \"$category\"," + echo " \"description\": \"$description\"," + echo " \"priority\": $priority" + if [[ $i -eq $((${#ROLL_REGISTRY_COMMANDS[@]} - 1)) ]]; then + echo " }" + else + echo " }," + fi + i=$((i + 1)) + done + echo "]" + ;; + csv) + echo "command,path,help_path,category,description,priority" + local i=0 + while [[ $i -lt ${#ROLL_REGISTRY_COMMANDS[@]} ]]; do + local command="${ROLL_REGISTRY_COMMANDS[$i]}" + local path="${ROLL_REGISTRY_PATHS[$i]}" + local help_path="${ROLL_REGISTRY_HELP_PATHS[$i]}" + local category="${ROLL_REGISTRY_CATEGORIES[$i]}" + local description="${ROLL_REGISTRY_DESCRIPTIONS[$i]}" + local priority="${ROLL_REGISTRY_PRIORITIES[$i]}" + + echo "$command,$path,$help_path,$category,\"$description\",$priority" + i=$((i + 1)) + done + ;; + simple|*) + local i=0 + while [[ $i -lt ${#ROLL_REGISTRY_COMMANDS[@]} ]]; do + echo "${ROLL_REGISTRY_COMMANDS[$i]}" + i=$((i + 1)) + done + ;; + esac +} \ No newline at end of file From f4cba0dbf9a8f20c9504650bbc183e9eaaf024eb Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 09:55:18 +0200 Subject: [PATCH 05/69] added docs for new registry system --- commands/usage.help | 1 + docs/index.md | 1 + docs/registry.md | 328 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 330 insertions(+) create mode 100644 docs/registry.md diff --git a/commands/usage.help b/commands/usage.help index 9892e7e..0059778 100755 --- a/commands/usage.help +++ b/commands/usage.help @@ -50,6 +50,7 @@ RollDev version $(cat ${ROLL_DIR}/version) env-init Configure environment by adding \033[31m'.env.roll'\033[0m file to the current working directory env Controls an environment from any point within the root project directory config Manage and validate Roll configuration (see \033[31m'roll config -h'\033[0m for details) + registry Manage and inspect command registry (see \033[31m'roll registry -h'\033[0m for details) db Interacts with the db service on an environment (see \033[31m'roll db -h'\033[0m for details) redis Interacts with the redis service on an environment (see \033[31m'roll redis -h'\033[0m for details) install Initializes or updates roll configuration on host machine diff --git a/docs/index.md b/docs/index.md index 6482bbd..e85ba90 100644 --- a/docs/index.md +++ b/docs/index.md @@ -29,6 +29,7 @@ caption: Getting Started installing services usage +registry environments configuration ``` diff --git a/docs/registry.md b/docs/registry.md new file mode 100644 index 0000000..cf06b72 --- /dev/null +++ b/docs/registry.md @@ -0,0 +1,328 @@ +# Command Registry System + +The Roll Docker Stack includes a powerful command registry system that provides automatic command discovery, organization, and management capabilities. This system modernizes command handling while maintaining full backward compatibility. + +## Overview + +The registry system automatically discovers and catalogs all Roll commands from multiple directories, providing: + +* **Automatic command discovery** from multiple search paths +* **Priority-based command resolution** for overrides and customization +* **Command categorization and metadata extraction** +* **Environment-specific command loading** +* **Comprehensive command inspection and validation tools** + +## Registry Commands + +All registry operations are accessed through the `roll registry` command: + +```bash +roll registry [options] +``` + +### List Commands + +Display all registered commands: + +```bash +roll registry list +``` + +Filter commands by name pattern: + +```bash +roll registry list config +``` + +Filter commands by category: + +```bash +roll registry list "" environment +``` + +### Browse by Category + +View commands organized by category: + +```bash +roll registry categories +``` + +Show commands in a specific category: + +```bash +roll registry categories environment +``` + +### Command Information + +Get detailed information about a specific command: + +```bash +roll registry info config +``` + +This displays: +* Command file path +* Help file path +* Category +* Priority level +* Description + +### Search Commands + +Search commands by name or description: + +```bash +roll registry search database +roll registry search ssl +``` + +### Registry Statistics + +View registry statistics and command counts: + +```bash +roll registry stats +``` + +### Validate Registry + +Check registry integrity and validate all command files: + +```bash +roll registry validate +``` + +This checks for: +* Missing command files +* Missing help files (warnings only) +* Registry consistency + +### Export Commands + +Export command list in various formats: + +```bash +# Simple list (default) +roll registry export simple + +# JSON format +roll registry export json + +# CSV format +roll registry export csv +``` + +### View Search Paths + +Display command search paths and their priorities: + +```bash +roll registry paths +``` + +### Refresh Registry + +Refresh the command registry cache: + +```bash +roll registry refresh +``` + +## Command Discovery + +The registry system searches for commands in multiple directories with priority-based resolution: + +### Search Path Priority + +1. **Priority 1**: Project-local commands (`.roll/commands` in project directory) +2. **Priority 1**: Environment-specific commands (`~/.roll/reclu/{env_type}`) +3. **Priority 2**: User home commands (`~/.roll/commands`) +4. **Priority 3**: User reclu commands (`~/.roll/reclu`) +5. **Priority 4**: System commands (`{roll_install}/commands`) + +Lower priority numbers have higher precedence. This allows for easy command customization and overrides. + +### Environment-Specific Discovery + +The registry automatically includes environment-specific commands when an environment is loaded: + +* Commands from `~/.roll/reclu/{environment_type}` (e.g., `~/.roll/reclu/magento2`) +* Commands from `{roll_install}/commands/{environment_type}` +* Project-local commands from `.roll/commands` + +## Command Categories + +Commands are automatically categorized based on their help file metadata or directory structure: + +* **Environment Setup**: Installation and initialization commands +* **Environment Management**: Start, stop, configuration commands +* **Development Tools**: Database, debugging, shell access +* **Information**: Version, help, status commands +* **General**: Uncategorized commands + +### Setting Command Category + +Add a category comment to your command's help file: + +```bash +#!/usr/bin/env bash +# Category: Development Tools + +ROLL_USAGE=$(cat <` comments in help files +2. `# TYPE: ` comments in help files +3. Directory-based categorization + +## Creating Custom Commands + +### Command File Structure + +Create a command file with the `.cmd` extension: + +```bash +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +# Your command logic here +echo "Hello from custom command!" +``` + +### Help File Structure + +Create a corresponding `.help` file: + +```bash +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_USAGE=$(cat < commands.json +``` + +## Troubleshooting + +### Registry Validation Fails + +If `roll registry validate` shows errors: + +1. Check that command files exist and are executable +2. Verify help files follow the correct format +3. Ensure directory permissions are correct + +### Commands Not Found + +If commands aren't being discovered: + +1. Run `roll registry refresh` to clear the cache +2. Check `roll registry paths` to verify search directories +3. Verify file extensions are `.cmd` for commands and `.help` for help files + +### Performance Issues + +If command discovery is slow: + +1. Reduce the number of directories in search paths +2. Remove unused command directories +3. Use `roll registry stats` to check command counts + +## Migration from Legacy System + +The registry system is fully backward compatible. No migration is required, but you can: + +1. Run `roll registry validate` to check for missing help files +2. Add category metadata to help files for better organization +3. Use `roll registry stats` to understand your command inventory + +For more information, run `roll registry --help` or `roll registry --help` for specific command details. \ No newline at end of file From 0410ba42ac85595599d54b45e3bf2c5ca629bc65 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 10:01:05 +0200 Subject: [PATCH 06/69] update to new workflows --- .github/workflows/build-documentation.yml | 18 +++++++++++----- .github/workflows/pages.yml | 24 ++++++++++++++-------- .github/workflows/push-release-to-brew.yml | 2 +- .github/workflows/tag-release.yml | 2 +- 4 files changed, 31 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build-documentation.yml b/.github/workflows/build-documentation.yml index b6d5a8b..0410b72 100644 --- a/.github/workflows/build-documentation.yml +++ b/.github/workflows/build-documentation.yml @@ -13,13 +13,21 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v3 - - name: Run Sphinx documentation build - uses: ammaraskar/sphinx-action@0.4 + uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 with: - docs-folder: "docs/" + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r docs/requirements.txt + - name: Build documentation with Sphinx + run: | + cd docs + sphinx-build -b html . _build/html - name: Upload documentation artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: documentation path: docs/_build/html/ diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml index 95e9e28..2d41469 100644 --- a/.github/workflows/pages.yml +++ b/.github/workflows/pages.yml @@ -8,6 +8,7 @@ on: paths: - docs/** - .github/workflows/build-documentation.yml + - .github/workflows/pages.yml - CHANGELOG.md # Allows you to run this workflow manually from the Actions tab @@ -33,18 +34,25 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Pages - uses: actions/configure-pages@v3 - - name: Run Sphinx documentation build - uses: ammaraskar/sphinx-action@0.4 + uses: actions/configure-pages@v5 + - name: Set up Python + uses: actions/setup-python@v5 with: - docs-folder: "docs/" + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r docs/requirements.txt + - name: Build documentation with Sphinx + run: | + cd docs + sphinx-build -b html . _build/html - name: Upload artifact - uses: actions/upload-pages-artifact@v1 + uses: actions/upload-pages-artifact@v3 with: - # Upload entire repository path: 'docs/_build/html/' - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v1 + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/push-release-to-brew.yml b/.github/workflows/push-release-to-brew.yml index 8c0c64a..e4529a3 100644 --- a/.github/workflows/push-release-to-brew.yml +++ b/.github/workflows/push-release-to-brew.yml @@ -6,7 +6,7 @@ jobs: update-homebrew: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: token: ${{ secrets.TOKEN }} repository: dockergiant/homebrew-roll diff --git a/.github/workflows/tag-release.yml b/.github/workflows/tag-release.yml index c73c148..04fbfd7 100644 --- a/.github/workflows/tag-release.yml +++ b/.github/workflows/tag-release.yml @@ -10,7 +10,7 @@ jobs: create-tag: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Update Files env: VERSION: ${{ inputs.version }} From 0c9a7012453310fb81ce97bc1857af93e5f067c9 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 10:50:27 +0200 Subject: [PATCH 07/69] update backup and restore commands --- bin/roll | 2 +- commands/backup.cmd | 761 +++++++++++++++++++++++++++++++++++++----- commands/backup.help | 60 +++- commands/restore.cmd | 732 ++++++++++++++++++++++++++++++++-------- commands/restore.help | 72 +++- docs/index.md | 2 + docs/usage.md | 33 ++ 7 files changed, 1432 insertions(+), 230 deletions(-) diff --git a/bin/roll b/bin/roll index ad6162c..7392208 100755 --- a/bin/roll +++ b/bin/roll @@ -40,7 +40,7 @@ declare ROLL_PARAMS=() declare ROLL_CMD_VERB= declare ROLL_CMD_EXEC= declare ROLL_CMD_HELP= -declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun) +declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore) ## parse first argument as command and determine validity if (( "$#" )); then diff --git a/commands/backup.cmd b/commands/backup.cmd index 42b4390..7d6f588 100755 --- a/commands/backup.cmd +++ b/commands/backup.cmd @@ -1,107 +1,696 @@ #!/usr/bin/env bash [[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 +# Load core utilities and configuration ROLL_ENV_PATH="$(locateEnvPath)" || exit $? loadEnvConfig "${ROLL_ENV_PATH}" || exit $? assertDockerRunning -if (( ${#ROLL_PARAMS[@]} == 0 )) || [[ "${ROLL_PARAMS[0]}" == "help" ]]; then - roll backup --help || exit $? && exit $? +# Default configuration values +BACKUP_COMPRESSION="gzip" # Options: gzip, xz, lz4, none +BACKUP_ENCRYPT="" +BACKUP_EXCLUDE_LOGS=1 +BACKUP_INCLUDE_SOURCE=0 +BACKUP_PARALLEL=1 +BACKUP_RETENTION_DAYS=30 +BACKUP_VERIFY=1 +BACKUP_QUIET=0 +BACKUP_NAME="" +BACKUP_DESCRIPTION="" +PROGRESS=1 + +# Parse command line arguments +POSITIONAL_ARGS=() +# Start with any arguments passed from the main roll script +if [[ -n "${ROLL_PARAMS[*]}" ]]; then + POSITIONAL_ARGS+=("${ROLL_PARAMS[@]}") fi -### load connection information for the mysql service -DB_VOLUME="${ROLL_ENV_NAME}_dbdata" -REDIS_VOLUME="${ROLL_ENV_NAME}_redis" -ES_VOLUME="${ROLL_ENV_NAME}_esdata" -CONTAINER_NAME="${ROLL_ENV_NAME}_backup" -ENV_PHP_LOC="$(pwd)/app/etc/env.php" -AUTH_LOC="$(pwd)/auth.json" +while [[ $# -gt 0 ]]; do + case "$1" in + --help|-h) + roll backup --help + exit 0 + ;; + --compression=*) + BACKUP_COMPRESSION="${1#*=}" + shift + ;; + --encrypt=*) + BACKUP_ENCRYPT="${1#*=}" + shift + ;; + --no-compression) + BACKUP_COMPRESSION="none" + shift + ;; + --include-logs) + BACKUP_EXCLUDE_LOGS=0 + shift + ;; + --include-source) + BACKUP_INCLUDE_SOURCE=1 + shift + ;; + --no-parallel) + BACKUP_PARALLEL=0 + shift + ;; + --retention=*) + BACKUP_RETENTION_DAYS="${1#*=}" + shift + ;; + --no-verify) + BACKUP_VERIFY=0 + shift + ;; + --quiet|-q) + BACKUP_QUIET=1 + PROGRESS=0 + shift + ;; + --name=*) + BACKUP_NAME="${1#*=}" + shift + ;; + --description=*) + BACKUP_DESCRIPTION="${1#*=}" + shift + ;; + --no-progress) + PROGRESS=0 + shift + ;; + --) + shift + break + ;; + -*) + error "Unknown option: $1" + exit 1 + ;; + *) + # Collect positional arguments (commands) + POSITIONAL_ARGS+=("$1") + shift + ;; + esac +done -"${ROLL_DIR}/bin/roll" env down +# Add any remaining arguments after -- to positional args +POSITIONAL_ARGS+=("$@") -if [ ! -d ".roll/" ]; then - mkdir .roll/ -fi +# Set remaining parameters from positional arguments - use backup-specific variable name +BACKUP_COMMAND_PARAMS=("${POSITIONAL_ARGS[@]}") -if [ ! -d ".roll/backups" ]; then - mkdir .roll/backups/ +if (( ${#BACKUP_COMMAND_PARAMS[@]} == 0 )); then + BACKUP_COMMAND_PARAMS=("all") fi -ID=$(date +%s) -BACKUP_LOC="$(pwd)/.roll/backups/$ID/" -mkdir $BACKUP_LOC - -echo "" -echo "" -echo "------------------ STARTING BACKUP IN: $BACKUP_LOC (no output nor progress) ---------------------" -echo "" - -case "${ROLL_PARAMS[0]}" in - db) - - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$DB_VOLUME,target=/data -v \ - $BACKUP_LOC:/backup ubuntu bash \ - -c "tar -czvf /backup/db.tar.gz /data" - - - ;; - redis) - - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$REDIS_VOLUME,target=/data -v \ - $BACKUP_LOC:/backup ubuntu bash \ - -c "tar -czvf /backup/redis.tar.gz /data" - - ;; - elasticserach) - - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$ES_VOLUME,target=/data -v \ - $BACKUP_LOC:/backup ubuntu bash \ - -c "tar -czvf /backup/es.tar.gz /data" - - ;; - all) - - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$DB_VOLUME,target=/data -v \ - $BACKUP_LOC:/backup ubuntu bash \ - -c "tar -czvf /backup/db.tar.gz /data" - - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$REDIS_VOLUME,target=/data -v \ - $BACKUP_LOC:/backup ubuntu bash \ - -c "tar -czvf /backup/redis.tar.gz /data" - - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$ES_VOLUME,target=/data -v \ - $BACKUP_LOC:/backup ubuntu bash \ - -c "tar -czvf /backup/es.tar.gz /data" - - if [ -f "$ENV_PHP_LOC" ]; then - cp $ENV_PHP_LOC $BACKUP_LOC +# Utility functions for backup operations +function showProgress() { + [[ $PROGRESS -eq 0 ]] && return + local current=$1 + local total=$2 + local description="$3" + local percent=$((current * 100 / total)) + local bar_length=30 + local filled_length=$((percent * bar_length / 100)) + + printf "\r[" + printf "%*s" $filled_length | tr ' ' '=' + printf "%*s" $((bar_length - filled_length)) | tr ' ' '-' + printf "] %d%% %s" $percent "$description" + + if [[ $current -eq $total ]]; then + echo "" + fi +} + +function logMessage() { + [[ $BACKUP_QUIET -eq 1 ]] && return + local level="$1" + shift + case "$level" in + INFO) info "$@" ;; + SUCCESS) success "$@" ;; + WARNING) warning "$@" ;; + ERROR) error "$@" ;; + esac +} + +function validateCompression() { + case "$BACKUP_COMPRESSION" in + gzip|xz|lz4|none) return 0 ;; + *) + error "Invalid compression format: $BACKUP_COMPRESSION. Supported: gzip, xz, lz4, none" + return 1 + ;; + esac +} + +function getCompressionExtension() { + case "$BACKUP_COMPRESSION" in + gzip) echo ".tar.gz" ;; + xz) echo ".tar.xz" ;; + lz4) echo ".tar.lz4" ;; + none) echo ".tar" ;; + esac +} + +function getCompressionCommand() { + case "$BACKUP_COMPRESSION" in + gzip) echo "gzip" ;; + xz) echo "xz -9" ;; + lz4) echo "lz4 -9" ;; + none) echo "cat" ;; + esac +} + +function detectEnabledServices() { + local services=() + + # Check database services + if [[ ${ROLL_DB:-1} -eq 1 ]]; then + case "${DB_DISTRIBUTION:-mariadb}" in + mysql|mariadb) services+=("db:mysql:dbdata") ;; + postgres) services+=("db:postgres:dbdata") ;; + esac + fi + + # Check Redis/Dragonfly + if [[ ${ROLL_REDIS:-0} -eq 1 ]]; then + services+=("redis:redis:redis") + elif [[ ${ROLL_DRAGONFLY:-0} -eq 1 ]]; then + services+=("dragonfly:dragonfly:dragonfly") + fi + + # Check Elasticsearch/OpenSearch + if [[ ${ROLL_ELASTICSEARCH:-0} -eq 1 ]]; then + services+=("elasticsearch:elasticsearch:esdata") + elif [[ ${ROLL_OPENSEARCH:-0} -eq 1 ]]; then + services+=("opensearch:opensearch:osdata") + fi + + # Check MongoDB + if [[ ${ROLL_MONGODB:-0} -eq 1 ]]; then + services+=("mongodb:mongodb:mongodb") + fi + + # Check RabbitMQ + if [[ ${ROLL_RABBITMQ:-0} -eq 1 ]]; then + services+=("rabbitmq:rabbitmq:rabbitmq") + fi + + # Check Varnish (cache data) + if [[ ${ROLL_VARNISH:-0} -eq 1 ]]; then + services+=("varnish:varnish:varnish") + fi + + echo "${services[@]}" +} + +function createBackupDirectory() { + local timestamp=${1:-$(date +%s)} + local backup_dir="$(pwd)/.roll/backups/$timestamp" + + # Create backup directories + mkdir -p "$backup_dir"/{volumes,config,metadata,logs} + + echo "$backup_dir" +} + +function generateBackupMetadata() { + local backup_dir="$1" + local services=("${@:2}") + + cat > "$backup_dir/metadata/backup.json" </dev/null || echo 'unknown')", + "services": [$(printf '"%s",' "${services[@]}" | sed 's/,$//')], + "compression": "${BACKUP_COMPRESSION}", + "encrypted": $([ -n "$BACKUP_ENCRYPT" ] && echo "true" || echo "false"), + "name": "${BACKUP_NAME}", + "description": "${BACKUP_DESCRIPTION}", + "include_source": ${BACKUP_INCLUDE_SOURCE}, + "exclude_logs": ${BACKUP_EXCLUDE_LOGS}, + "docker_compose_version": "$(docker compose version 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+' | head -1)", + "platform": "$(uname -s)", + "architecture": "$(uname -m)" +} +EOF + + # Create checksums file + touch "$backup_dir/metadata/checksums.sha256" +} + +function backupVolume() { + local service_name="$1" + local volume_name="$2" + local backup_dir="$3" + local step="$4" + local total="$5" + + local full_volume_name="${ROLL_ENV_NAME}_${volume_name}" + local output_file="$backup_dir/volumes/${service_name}$(getCompressionExtension)" + local temp_container="${ROLL_ENV_NAME}_backup_${service_name}_$$" + + showProgress $step $total "Backing up $service_name volume" + + # Check if volume exists + if ! docker volume inspect "$full_volume_name" >/dev/null 2>&1; then + logMessage WARNING "Volume $full_volume_name does not exist, skipping" + return 0 + fi + + # Create backup with appropriate user permissions based on service + local user_id="0:0" # Default to root + case "$service_name" in + elasticsearch|opensearch) user_id="1000:1000" ;; + mysql|mariadb|postgres) user_id="999:999" ;; + esac + + local tar_cmd="tar -cf - /data" + if [[ $BACKUP_EXCLUDE_LOGS -eq 1 ]]; then + tar_cmd="tar --exclude='*.log' --exclude='*log*' --exclude='*.tmp' -cf - /data" + fi + + # Execute backup with progress and error handling + if docker run --rm --name "$temp_container" \ + --mount source="$full_volume_name",target=/data,readonly \ + --user "$user_id" \ + alpine:latest \ + sh -c "$tar_cmd" | $(getCompressionCommand) > "$output_file" 2>/dev/null; then + + # Generate checksum + local checksum=$(sha256sum "$output_file" | cut -d' ' -f1) + echo "$checksum volumes/${service_name}$(getCompressionExtension)" >> "$backup_dir/metadata/checksums.sha256" + + logMessage SUCCESS "Successfully backed up $service_name volume ($(du -h "$output_file" | cut -f1))" + return 0 + else + logMessage ERROR "Failed to backup $service_name volume" + return 1 + fi +} + +function backupConfigurations() { + local backup_dir="$1" + local step="$2" + local total="$3" + + showProgress $step $total "Backing up configuration files" + + local config_files=() + + # Environment-specific configuration files + if [[ -f "$(pwd)/.env.roll" ]]; then + config_files+=(".env.roll") + fi + + if [[ -f "$(pwd)/app/etc/env.php" ]]; then + config_files+=("app/etc/env.php") + fi + + if [[ -f "$(pwd)/auth.json" ]]; then + config_files+=("auth.json") + fi + + if [[ -f "$(pwd)/composer.json" ]]; then + config_files+=("composer.json") + fi + + if [[ -f "$(pwd)/composer.lock" ]]; then + config_files+=("composer.lock") + fi + + # Additional framework-specific configs + if [[ -f "$(pwd)/.env" ]]; then + config_files+=(".env") + fi + + if [[ -f "$(pwd)/config/database.yml" ]]; then + config_files+=("config/database.yml") + fi + + # Copy configuration files + for config_file in "${config_files[@]}"; do + if [[ -f "$(pwd)/$config_file" ]]; then + local target_dir="$backup_dir/config/$(dirname "$config_file")" + mkdir -p "$target_dir" + cp "$(pwd)/$config_file" "$target_dir/" + logMessage INFO "Backed up $config_file" + fi + done + + # Backup docker-compose overrides if they exist + if [[ -f "$(pwd)/.roll/roll-env.yml" ]]; then + cp "$(pwd)/.roll/roll-env.yml" "$backup_dir/config/" + logMessage INFO "Backed up roll-env.yml" + fi + + logMessage SUCCESS "Configuration backup completed" +} + +function backupSourceCode() { + local backup_dir="$1" + local step="$2" + local total="$3" + + showProgress $step $total "Backing up source code" + + local exclude_patterns=( + "--exclude=.git" + "--exclude=node_modules" + "--exclude=vendor" + "--exclude=var/cache" + "--exclude=var/log" + "--exclude=var/session" + "--exclude=var/tmp" + "--exclude=storage/logs" + "--exclude=storage/framework/cache" + "--exclude=storage/framework/sessions" + "--exclude=storage/framework/views" + "--exclude=.roll/backups" + "--exclude=*.log" + ) + + tar "${exclude_patterns[@]}" -cf - . | $(getCompressionCommand) > "$backup_dir/source$(getCompressionExtension)" 2>/dev/null + + if [[ $? -eq 0 ]]; then + local checksum=$(sha256sum "$backup_dir/source$(getCompressionExtension)" | cut -d' ' -f1) + echo "$checksum source$(getCompressionExtension)" >> "$backup_dir/metadata/checksums.sha256" + logMessage SUCCESS "Source code backup completed ($(du -h "$backup_dir/source$(getCompressionExtension)" | cut -f1))" + else + logMessage ERROR "Failed to backup source code" + return 1 + fi +} + +function encryptBackup() { + local backup_dir="$1" + local passphrase="$2" + + if [[ -z "$passphrase" ]]; then + return 0 + fi + + logMessage INFO "Encrypting backup files..." + + find "$backup_dir" -name "*.tar*" -type f | while read -r file; do + if command -v gpg >/dev/null 2>&1; then + gpg --batch --yes --cipher-algo AES256 --compress-algo 1 \ + --symmetric --passphrase "$passphrase" \ + --output "$file.gpg" "$file" && rm "$file" + else + logMessage WARNING "GPG not available, skipping encryption" + return 1 fi + done + + logMessage SUCCESS "Backup encryption completed" +} - if [ -f "$AUTH_LOC" ]; then - cp "$AUTH_LOC" $BACKUP_LOC +function verifyBackup() { + local backup_dir="$1" + + if [[ $BACKUP_VERIFY -eq 0 ]]; then + return 0 + fi + + logMessage INFO "Verifying backup integrity..." + + if [[ -f "$backup_dir/metadata/checksums.sha256" ]]; then + if (cd "$backup_dir" && sha256sum -c metadata/checksums.sha256 >/dev/null 2>&1); then + logMessage SUCCESS "Backup verification passed" + return 0 + else + logMessage ERROR "Backup verification failed" + return 1 fi + else + logMessage WARNING "No checksums found, skipping verification" + return 0 + fi +} + +function cleanupOldBackups() { + local backup_base_dir="$(pwd)/.roll/backups" + + if [[ $BACKUP_RETENTION_DAYS -le 0 ]]; then + return 0 + fi + + logMessage INFO "Cleaning up backups older than $BACKUP_RETENTION_DAYS days..." + + find "$backup_base_dir" -maxdepth 1 -type d -name '[0-9]*' -mtime +$BACKUP_RETENTION_DAYS | while read -r old_backup; do + logMessage INFO "Removing old backup: $(basename "$old_backup")" + rm -rf "$old_backup" + done + + # Also clean up old compressed backups + find "$backup_base_dir" -maxdepth 1 -name "*.tar*" -mtime +$BACKUP_RETENTION_DAYS -delete + + logMessage INFO "Cleanup completed" +} - ;; +function performBackup() { + local backup_type="$1" + + # Validate inputs + validateCompression || exit 1 + + # Detect enabled services + local enabled_services=($(detectEnabledServices)) + if [[ ${#enabled_services[@]} -eq 0 ]]; then + logMessage ERROR "No services enabled for backup" + exit 1 + fi + + # Stop environment to ensure consistent backup + logMessage INFO "Stopping environment for consistent backup..." + "${ROLL_DIR}/bin/roll" env down >/dev/null 2>&1 + + # Create backup directory + local timestamp=$(date +%s) + local backup_dir=$(createBackupDirectory "$timestamp") + + logMessage INFO "Starting backup to: $backup_dir" + logMessage INFO "Backup type: $backup_type, Compression: $BACKUP_COMPRESSION" + + # Calculate total steps + local total_steps=2 # metadata + config + case "$backup_type" in + all) + total_steps=$((${#enabled_services[@]} + 3)) # services + config + source + metadata + if [[ $BACKUP_INCLUDE_SOURCE -eq 1 ]]; then + ((total_steps++)) + fi + ;; + db|database) + total_steps=3 + ;; + *) + total_steps=3 + ;; + esac + + local current_step=0 + + # Generate metadata + ((current_step++)) + generateBackupMetadata "$backup_dir" "${enabled_services[@]}" + showProgress $current_step $total_steps "Generating metadata" + + # Backup based on type + case "$backup_type" in + all) + # Backup all enabled services + for service_info in "${enabled_services[@]}"; do + IFS=':' read -r service_name service_type volume_name <<< "$service_info" + ((current_step++)) + backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + done + + # Backup configurations + ((current_step++)) + backupConfigurations "$backup_dir" $current_step $total_steps + + # Backup source code if requested + if [[ $BACKUP_INCLUDE_SOURCE -eq 1 ]]; then + ((current_step++)) + backupSourceCode "$backup_dir" $current_step $total_steps + fi + ;; + db|database) + # Find database service + for service_info in "${enabled_services[@]}"; do + IFS=':' read -r service_name service_type volume_name <<< "$service_info" + if [[ "$service_type" =~ ^(mysql|mariadb|postgres)$ ]]; then + ((current_step++)) + backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + break + fi + done + ;; + redis|dragonfly) + # Find Redis/Dragonfly service + for service_info in "${enabled_services[@]}"; do + IFS=':' read -r service_name service_type volume_name <<< "$service_info" + if [[ "$service_type" =~ ^(redis|dragonfly)$ ]]; then + ((current_step++)) + backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + break + fi + done + ;; + elasticsearch|opensearch) + # Find search service + for service_info in "${enabled_services[@]}"; do + IFS=':' read -r service_name service_type volume_name <<< "$service_info" + if [[ "$service_type" =~ ^(elasticsearch|opensearch)$ ]]; then + ((current_step++)) + backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + break + fi + done + ;; + mongodb) + # Find MongoDB service + for service_info in "${enabled_services[@]}"; do + IFS=':' read -r service_name service_type volume_name <<< "$service_info" + if [[ "$service_type" == "mongodb" ]]; then + ((current_step++)) + backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + break + fi + done + ;; + config|configuration) + # Only backup configuration files + ((current_step++)) + backupConfigurations "$backup_dir" $current_step $total_steps + ;; + *) + logMessage ERROR "Unknown backup type: $backup_type" + exit 1 + ;; + esac + + # Encrypt if requested + if [[ -n "$BACKUP_ENCRYPT" ]]; then + encryptBackup "$backup_dir" "$BACKUP_ENCRYPT" + fi + + # Verify backup + verifyBackup "$backup_dir" + + # Create compressed archive for the entire backup + local archive_name="backup_${ROLL_ENV_NAME}_${timestamp}$(getCompressionExtension)" + logMessage INFO "Creating final backup archive: $archive_name" + + (cd "$(pwd)/.roll/backups" && tar -cf - "$timestamp" | $(getCompressionCommand) > "$archive_name") + + if [[ $? -eq 0 ]]; then + # Update latest symlink + (cd "$(pwd)/.roll/backups" && ln -sf "$archive_name" "latest$(getCompressionExtension)") + + logMessage SUCCESS "Backup completed successfully!" + logMessage INFO "Backup ID: $timestamp" + logMessage INFO "Archive: $archive_name ($(du -h "$(pwd)/.roll/backups/$archive_name" | cut -f1))" + logMessage INFO "Location: $(pwd)/.roll/backups/" + + # Clean up directory version (keep archive) + rm -rf "$backup_dir" + else + logMessage ERROR "Failed to create final backup archive" + exit 1 + fi + + # Cleanup old backups + cleanupOldBackups + + logMessage SUCCESS "Backup process completed!" +} + +# Main execution +case "${BACKUP_COMMAND_PARAMS[0]}" in + all|db|database|redis|dragonfly|elasticsearch|opensearch|mongodb|config|configuration) + performBackup "${BACKUP_COMMAND_PARAMS[0]}" + ;; + list|ls) + echo "Available backups:" + if [[ -d "$(pwd)/.roll/backups" ]]; then + ls -la "$(pwd)/.roll/backups/" | grep -E '^d.*[0-9]{10}$|^-.*backup_.*\.tar' + else + echo "No backups found." + fi + ;; + info) + if [[ -n "${BACKUP_COMMAND_PARAMS[1]}" ]]; then + backup_id="${BACKUP_COMMAND_PARAMS[1]}" + + # First check if directory exists (uncompressed backup) + metadata_file="$(pwd)/.roll/backups/$backup_id/metadata/backup.json" + if [[ -f "$metadata_file" ]]; then + cat "$metadata_file" | jq '.' 2>/dev/null || cat "$metadata_file" + else + # Look for compressed archive + archive_file="" + for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do + potential_file="$(pwd)/.roll/backups/backup_${ROLL_ENV_NAME}_${backup_id}${ext}" + if [[ -f "$potential_file" ]]; then + archive_file="$potential_file" + break + fi + done + + if [[ -z "$archive_file" ]]; then + # Also check for generic archive names + archive_file=$(ls "$(pwd)/.roll/backups"/*"$backup_id"*.tar* 2>/dev/null | head -1) + fi + + if [[ -n "$archive_file" ]]; then + # Extract metadata from archive + logMessage INFO "Extracting metadata from $(basename "$archive_file")..." + + # Determine decompression command + case "$archive_file" in + *.tar.gz) decompress_cmd="gzip -dc" ;; + *.tar.xz) decompress_cmd="xz -dc" ;; + *.tar.lz4) decompress_cmd="lz4 -dc" ;; + *.tar) decompress_cmd="cat" ;; + *) decompress_cmd="cat" ;; + esac + + # Extract and display metadata + if metadata_content=$($decompress_cmd "$archive_file" | tar -xOf - "$backup_id/metadata/backup.json" 2>/dev/null); then + echo "$metadata_content" | jq '.' 2>/dev/null || echo "$metadata_content" + else + error "Could not extract metadata from backup archive" + exit 1 + fi + else + error "Backup not found for ID: $backup_id" + exit 1 + fi + fi + else + error "Please specify a backup ID" + exit 1 + fi + ;; + clean) + days=${BACKUP_COMMAND_PARAMS[1]:-30} + BACKUP_RETENTION_DAYS=$days + cleanupOldBackups + ;; *) - fatal "The command \"${ROLL_PARAMS[0]}\" does not exist. Please use --help for usage." + error "Unknown command: ${BACKUP_COMMAND_PARAMS[0]}" + echo "Available commands: all, db, redis, elasticsearch, mongodb, config, list, info, clean" + exit 1 ;; esac - -tar -czvf $(pwd)/.roll/backups/latest.tar.gz $BACKUP_LOC - -echo "" -echo "" -echo "------------------ FNISHED BACKUP WITH ID: $ID ---------------------" -echo "" diff --git a/commands/backup.help b/commands/backup.help index 8e44c1c..5d14c2d 100755 --- a/commands/backup.help +++ b/commands/backup.help @@ -1,16 +1,60 @@ - #!/usr/bin/env bash [[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 ROLL_USAGE=$(cat < Show backup metadata and information + clean [days] Clean up backups older than specified days (default: 30) + +\033[33mOptions:\033[0m + -h, --help Display this help menu + -q, --quiet Suppress output messages + -f, --force Force backup creation (overwrite existing) + --dry-run Show what would be backed up without doing it + --no-progress Disable progress indicators + +\033[33mBackup Options:\033[0m + --compression=FORMAT Compression format: gzip (default), xz, lz4, none + --no-compression Disable compression (same as --compression=none) + --encrypt=PASSPHRASE Encrypt backup with GPG using passphrase + --include-logs Include log files in backup (excluded by default) + --include-source Include source code in backup + --no-parallel Disable parallel backup operations + --retention=DAYS Auto-cleanup backups older than N days (default: 30) + --no-verify Skip backup integrity verification + --name=NAME Give backup a custom name + --description=TEXT Add description to backup metadata + +\033[33mExamples:\033[0m + backup # Backup all enabled services + backup db # Backup database only + backup all --include-source # Full backup including source code + backup --compression=xz --quiet # High compression, minimal output + backup --encrypt=mypassword # Encrypted backup + backup --name="before-upgrade" # Named backup for reference + backup redis --no-verify # Quick Redis backup without verification + backup list # Show available backups + backup info 1672531200 # Show backup details + backup clean 7 # Remove backups older than 7 days + +\033[33mNotes:\033[0m + • Backups are stored in .roll/backups/ directory + • Service detection is automatic based on environment configuration + • All backups include metadata for restoration tracking + • Use 'roll restore' command to restore from backups + • Encrypted backups require GPG for decryption during restore + • Source code backup excludes common cache/vendor directories EOF ) diff --git a/commands/restore.cmd b/commands/restore.cmd index 52d65b5..62bdb9f 100755 --- a/commands/restore.cmd +++ b/commands/restore.cmd @@ -1,159 +1,627 @@ #!/usr/bin/env bash [[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 - -CURRENT_DIR=$(pwd) -if [[ ! -f "$CURRENT_DIR/.env.roll" ]]; then - if [[ -f "$CURRENT_DIR/.env" ]]; then - sed -i.warden 's/WARDEN/ROLL/g' "$CURRENT_DIR/.env" - if [[ -d "$CURRENT_DIR/.warden" ]]; then - mv "$CURRENT_DIR/.warden" "$CURRENT_DIR/.roll" - if [[ -f "$CURRENT_DIR/.roll/warden-env.yml" ]]; then - mv "$CURRENT_DIR/.roll/warden-env.yml" "$CURRENT_DIR/.roll/roll-env.yml" - sed -i.warden 's/WARDEN/ROLL/g;s/warden/roll/g' "$CURRENT_DIR/.roll/roll-env.yml" - fi - fi - - if [[ -n "$(grep -r 'ROLL_NO_STATIC_CACHING' "$CURRENT_DIR/.env")" ]]; then - perl -i -pe's/.*ROLL_NO_STATIC_CACHING.*$/ROLL_NO_STATIC_CACHING\=1/g' "$CURRENT_DIR/.env" - else - echo "ROLL_NO_STATIC_CACHING=1" >> "$CURRENT_DIR/.env" - fi - - if [[ -n "$(grep -r 'ROLL_' "$CURRENT_DIR/.env")" ]]; then - mv "$CURRENT_DIR/.env" "$CURRENT_DIR/.env.roll" - fi - fi -fi - +# Load core utilities and configuration ROLL_ENV_PATH="$(locateEnvPath)" || exit $? loadEnvConfig "${ROLL_ENV_PATH}" || exit $? assertDockerRunning -if [[ ${ROLL_DB:-1} -eq 0 ]]; then - fatal "Database environment is not used (ROLL_DB=0)." -fi - -### load information -DOCKER_COMPOSER_V=$( docker compose version | grep -Eo '[0-9]\.([0-9][0-9]|[0-9])\.[0-9]+') - -DB_VOLUME_NAME="dbdata" -REDIS_VOLUME_NAME="redis" -ES_VOLUME_NAME="esdata" -DB_VOLUME="${ROLL_ENV_NAME}_${DB_VOLUME_NAME}" -REDIS_VOLUME="${ROLL_ENV_NAME}_${REDIS_VOLUME_NAME}" -ES_VOLUME="${ROLL_ENV_NAME}_${ES_VOLUME_NAME}" -CONTAINER_NAME="${ROLL_ENV_NAME}_restore" -LATEST_TIMESTAMP=$(ls "$(pwd)/.roll/backups/" | sort -n | tail -1) -ENV_PHP_LOC="$(pwd)/app/etc/env.php" -AUTH_LOC="$(pwd)/auth.json" -SKIP_DB=0 -SKIP_REDIS=0 -SKIP_ELASTICSEARCH=0 - - -if [ ! -d ".roll/backups/$LATEST_TIMESTAMP" ]; then - fatal "No backups available in the directory .roll/backups/" -fi - -if [ ! -f ".roll/backups/$LATEST_TIMESTAMP/db.tar.gz" ]; then - SKIP_DB=1 -fi - -if [ ! -f ".roll/backups/$LATEST_TIMESTAMP/redis.tar.gz" ]; then - SKIP_REDIS=1 -fi +# Default configuration values +RESTORE_BACKUP_ID="" +RESTORE_SERVICES=() +RESTORE_CONFIG=1 +RESTORE_VERIFY=1 +RESTORE_FORCE=0 +RESTORE_DRY_RUN=0 +RESTORE_QUIET=0 +RESTORE_DECRYPT="" +PROGRESS=1 -if [ ! -f ".roll/backups/$LATEST_TIMESTAMP/es.tar.gz" ]; then - SKIP_ELASTICSEARCH=1 -fi +# Legacy migration support +RESTORE_LEGACY_MIGRATION=1 +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case "$1" in + --help|-h) + roll restore --help + exit 0 + ;; + --backup-id=*|--backup=*) + RESTORE_BACKUP_ID="${1#*=}" + shift + ;; + --services=*) + IFS=',' read -ra RESTORE_SERVICES <<< "${1#*=}" + shift + ;; + --no-config) + RESTORE_CONFIG=0 + shift + ;; + --no-verify) + RESTORE_VERIFY=0 + shift + ;; + --force|-f) + RESTORE_FORCE=1 + shift + ;; + --dry-run) + RESTORE_DRY_RUN=1 + shift + ;; + --quiet|-q) + RESTORE_QUIET=1 + PROGRESS=0 + shift + ;; + --decrypt=*) + RESTORE_DECRYPT="${1#*=}" + shift + ;; + --no-progress) + PROGRESS=0 + shift + ;; + --no-legacy-migration) + RESTORE_LEGACY_MIGRATION=0 + shift + ;; + --) + shift + break + ;; + -*) + error "Unknown option: $1" + exit 1 + ;; + *) + # If no backup ID specified yet, use this as the backup ID + if [[ -z "$RESTORE_BACKUP_ID" ]]; then + RESTORE_BACKUP_ID="$1" + fi + shift + ;; + esac +done -echo "" -echo "" -echo "------------------ STARTING INITIALIZATION ---------------------" -echo "" +# Utility functions for restore operations +function showProgress() { + [[ $PROGRESS -eq 0 ]] && return + local current=$1 + local total=$2 + local description="$3" + local percent=$((current * 100 / total)) + local bar_length=30 + local filled_length=$((percent * bar_length / 100)) + + printf "\r[" + printf "%*s" $filled_length | tr ' ' '=' + printf "%*s" $((bar_length - filled_length)) | tr ' ' '-' + printf "] %d%% %s" $percent "$description" + + if [[ $current -eq $total ]]; then + echo "" + fi +} -RUNNING_CONTAINERS=$(roll env ps --services --filter "status=running" | grep 'php-fpm' | sed 's/ *$//g') -if [[ ! -z "$RUNNING_CONTAINERS" ]]; then - "${ROLL_DIR}/bin/roll" env down -fi +function logMessage() { + [[ $RESTORE_QUIET -eq 1 ]] && return + local level="$1" + shift + case "$level" in + INFO) info "$@" ;; + SUCCESS) success "$@" ;; + WARNING) warning "$@" ;; + ERROR) error "$@" ;; + esac +} -echo "" -echo "------ CREATING CONTAINER (if necessary) ---------------------" -echo "" - -if [[ $SKIP_DB -eq 0 ]]; then - if [ ! -z "$(docker volume ls | grep -w $DB_VOLUME)" ]; then - docker volume rm $DB_VOLUME && true # dont fail - fi - docker volume create $DB_VOLUME \ - --label com.docker.compose.project=$ROLL_ENV_NAME \ - --label com.docker.compose.version=$DOCKER_COMPOSER_V \ - --label com.docker.compose.volume=$DB_VOLUME_NAME -fi +function performLegacyMigration() { + if [[ $RESTORE_LEGACY_MIGRATION -eq 0 ]]; then + return 0 + fi + + local current_dir="$(pwd)" + + # Handle Warden to Roll migration + if [[ ! -f "$current_dir/.env.roll" ]]; then + if [[ -f "$current_dir/.env" ]]; then + logMessage INFO "Performing legacy Warden to Roll migration..." + + # Create backup of original .env + cp "$current_dir/.env" "$current_dir/.env.backup.$(date +%s)" + + # Convert WARDEN to ROLL + sed -i.warden 's/WARDEN/ROLL/g' "$current_dir/.env" + + # Migrate .warden directory to .roll + if [[ -d "$current_dir/.warden" ]]; then + mv "$current_dir/.warden" "$current_dir/.roll" + + if [[ -f "$current_dir/.roll/warden-env.yml" ]]; then + mv "$current_dir/.roll/warden-env.yml" "$current_dir/.roll/roll-env.yml" + sed -i.warden 's/WARDEN/ROLL/g;s/warden/roll/g' "$current_dir/.roll/roll-env.yml" + fi + fi + + # Ensure ROLL_NO_STATIC_CACHING is set + if [[ -n "$(grep -r 'ROLL_NO_STATIC_CACHING' "$current_dir/.env")" ]]; then + perl -i -pe's/.*ROLL_NO_STATIC_CACHING.*$/ROLL_NO_STATIC_CACHING\=1/g' "$current_dir/.env" + else + echo "ROLL_NO_STATIC_CACHING=1" >> "$current_dir/.env" + fi + + # Move to .env.roll if it contains ROLL_ variables + if [[ -n "$(grep -r 'ROLL_' "$current_dir/.env")" ]]; then + mv "$current_dir/.env" "$current_dir/.env.roll" + fi + + logMessage SUCCESS "Legacy migration completed" + fi + fi +} -if [[ $SKIP_REDIS -eq 0 ]]; then - if [ ! -z "$(docker volume ls | grep -w $REDIS_VOLUME)" ]; then - docker volume rm $REDIS_VOLUME && true # dont fail +function findLatestBackup() { + local backup_dir="$(pwd)/.roll/backups" + + if [[ ! -d "$backup_dir" ]]; then + return 1 fi - docker volume create $REDIS_VOLUME \ - --label com.docker.compose.project=$ROLL_ENV_NAME \ - --label com.docker.compose.version=$DOCKER_COMPOSER_V \ - --label com.docker.compose.volume=$REDIS_VOLUME_NAME -fi + + # Look for timestamped directories first (new format) + local latest_dir=$(ls "$backup_dir" 2>/dev/null | grep '^[0-9]\{10\}$' | sort -n | tail -1) + if [[ -n "$latest_dir" ]]; then + echo "$latest_dir" + return 0 + fi + + # Look for compressed archives + local latest_archive=$(ls "$backup_dir"/backup_*_*.tar* 2>/dev/null | sort | tail -1) + if [[ -n "$latest_archive" ]]; then + # Extract timestamp from filename + local timestamp=$(basename "$latest_archive" | grep -o '[0-9]\{10\}') + echo "$timestamp" + return 0 + fi + + return 1 +} -if [[ $SKIP_ELASTICSEARCH -eq 0 ]]; then - if [ ! -z "$(docker volume ls | grep -w $ES_VOLUME)" ]; then - docker volume rm $ES_VOLUME && true # dont fail +function extractBackupArchive() { + local backup_id="$1" + local backup_dir="$(pwd)/.roll/backups" + local extract_dir="$backup_dir/${backup_id}_extracted" + + # Check if already extracted + if [[ -d "$extract_dir" ]]; then + echo "$extract_dir" + return 0 fi - docker volume create $ES_VOLUME \ - --label com.docker.compose.project=$ROLL_ENV_NAME \ - --label com.docker.compose.version=$DOCKER_COMPOSER_V \ - --label com.docker.compose.volume=$ES_VOLUME_NAME -fi + + # Find the archive file + local archive_file="" + for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do + local potential_file="$backup_dir/backup_${ROLL_ENV_NAME}_${backup_id}${ext}" + if [[ -f "$potential_file" ]]; then + archive_file="$potential_file" + break + fi + done + + # Also check for generic archive names + if [[ -z "$archive_file" ]]; then + archive_file=$(ls "$backup_dir"/*"$backup_id"*.tar* 2>/dev/null | head -1) + fi + + if [[ -z "$archive_file" ]]; then + logMessage ERROR "Backup archive not found for ID: $backup_id" + return 1 + fi + + logMessage INFO "Extracting backup archive: $(basename "$archive_file")" + + mkdir -p "$extract_dir" + + # Determine decompression command based on file extension + local decompress_cmd="cat" + case "$archive_file" in + *.tar.gz) decompress_cmd="gzip -d" ;; + *.tar.xz) decompress_cmd="xz -d" ;; + *.tar.lz4) decompress_cmd="lz4 -d" ;; + esac + + if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1; then + echo "$extract_dir" + return 0 + else + logMessage ERROR "Failed to extract backup archive" + rm -rf "$extract_dir" + return 1 + fi +} +function validateBackup() { + local backup_path="$1" + + if [[ $RESTORE_VERIFY -eq 0 ]]; then + return 0 + fi + + logMessage INFO "Validating backup integrity..." + + # Check if backup metadata exists + if [[ ! -f "$backup_path/metadata/backup.json" ]]; then + logMessage WARNING "Backup metadata not found, proceeding with legacy format" + return 0 + fi + + # Verify checksums if available + if [[ -f "$backup_path/metadata/checksums.sha256" ]]; then + if (cd "$backup_path" && sha256sum -c metadata/checksums.sha256 >/dev/null 2>&1); then + logMessage SUCCESS "Backup integrity verified" + return 0 + else + logMessage ERROR "Backup integrity check failed" + return 1 + fi + fi + + logMessage SUCCESS "Backup validation completed" + return 0 +} +function getBackupMetadata() { + local backup_path="$1" + local metadata_file="$backup_path/metadata/backup.json" + + if [[ -f "$metadata_file" ]]; then + cat "$metadata_file" + else + # Return empty JSON for legacy backups + echo "{}" + fi +} -echo "" -echo "" -echo "------------------ RESTORING BACKUP FROM: .roll/backups/$LATEST_TIMESTAMP/ (no output nor progress) ---------------------" -echo "" +function detectBackupServices() { + local backup_path="$1" + local services=() + + # Check for volume backups + if [[ -d "$backup_path/volumes" ]]; then + for volume_file in "$backup_path/volumes"/*; do + if [[ -f "$volume_file" ]]; then + local service_name=$(basename "$volume_file" | sed 's/\.tar.*//') + services+=("$service_name") + fi + done + else + # Legacy format detection + if [[ -f "$backup_path/db.tar.gz" ]]; then + services+=("db") + fi + if [[ -f "$backup_path/redis.tar.gz" ]]; then + services+=("redis") + fi + if [[ -f "$backup_path/es.tar.gz" ]]; then + services+=("elasticsearch") + fi + fi + + echo "${services[@]}" +} -if [[ $SKIP_DB -eq 0 ]]; then - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$DB_VOLUME,target=/data \ - -v $(pwd)/.roll/backups/$LATEST_TIMESTAMP/:/backup ubuntu bash \ - -c "cd /data && tar -xvf /backup/db.tar.gz --strip 1 && chown -R 999:root /data" -fi +function stopEnvironment() { + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would stop environment" + return 0 + fi + + logMessage INFO "Stopping environment for consistent restore..." + + local running_containers=$(roll env ps --services --filter "status=running" 2>/dev/null | grep 'php-fpm' | sed 's/ *$//g') + if [[ -n "$running_containers" ]]; then + "${ROLL_DIR}/bin/roll" env down >/dev/null 2>&1 + fi +} -if [[ $SKIP_REDIS -eq 0 ]]; then - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$REDIS_VOLUME,target=/data \ - -v $(pwd)/.roll/backups/$LATEST_TIMESTAMP/:/backup ubuntu bash \ - -c "cd /data && tar -xvf /backup/redis.tar.gz --strip 1 && chown -R 999:root /data" -fi +function getVolumeMapping() { + local service_name="$1" + + case "$service_name" in + db) + case "${DB_DISTRIBUTION:-mariadb}" in + mysql|mariadb) echo "${ROLL_ENV_NAME}_dbdata:mysql" ;; + postgres) echo "${ROLL_ENV_NAME}_dbdata:postgres" ;; + *) echo "${ROLL_ENV_NAME}_dbdata:mysql" ;; + esac + ;; + redis) echo "${ROLL_ENV_NAME}_redis:redis" ;; + dragonfly) echo "${ROLL_ENV_NAME}_dragonfly:dragonfly" ;; + elasticsearch) echo "${ROLL_ENV_NAME}_esdata:elasticsearch" ;; + opensearch) echo "${ROLL_ENV_NAME}_osdata:opensearch" ;; + mongodb) echo "${ROLL_ENV_NAME}_mongodb:mongodb" ;; + rabbitmq) echo "${ROLL_ENV_NAME}_rabbitmq:rabbitmq" ;; + varnish) echo "${ROLL_ENV_NAME}_varnish:varnish" ;; + *) echo "${ROLL_ENV_NAME}_${service_name}:generic" ;; + esac +} -if [[ $SKIP_ELASTICSEARCH -eq 0 ]]; then - docker run \ - --rm --name $CONTAINER_NAME \ - --mount source=$ES_VOLUME,target=/data \ - -v $(pwd)/.roll/backups/$LATEST_TIMESTAMP/:/backup ubuntu bash \ - -c "cd /data && tar -xvf /backup/es.tar.gz --strip 1 && chown -R 1000:root /data" -fi +function restoreVolume() { + local service_name="$1" + local backup_path="$2" + local step="$3" + local total="$4" + + showProgress $step $total "Restoring $service_name volume" + + local volume_mapping=$(getVolumeMapping "$service_name") + IFS=':' read -r volume_name service_type <<< "$volume_mapping" + + # Determine backup file location + local backup_file="" + if [[ -f "$backup_path/volumes/${service_name}.tar.gz" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.gz" + elif [[ -f "$backup_path/volumes/${service_name}.tar.xz" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.xz" + elif [[ -f "$backup_path/volumes/${service_name}.tar.lz4" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.lz4" + elif [[ -f "$backup_path/volumes/${service_name}.tar" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar" + elif [[ -f "$backup_path/${service_name}.tar.gz" ]]; then + # Legacy format + backup_file="$backup_path/${service_name}.tar.gz" + else + logMessage WARNING "Backup file not found for service: $service_name" + return 0 + fi + + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would restore $service_name from $backup_file to volume $volume_name" + return 0 + fi + + # Get Docker Compose version for proper labeling + local docker_compose_version=$(docker compose version 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+' | head -1) + local volume_base_name=$(echo "$volume_name" | sed "s/${ROLL_ENV_NAME}_//") + + # Remove existing volume if it exists + if docker volume inspect "$volume_name" >/dev/null 2>&1; then + if [[ $RESTORE_FORCE -eq 1 ]]; then + logMessage INFO "Removing existing volume: $volume_name" + docker volume rm "$volume_name" >/dev/null 2>&1 + else + logMessage ERROR "Volume $volume_name already exists. Use --force to overwrite." + return 1 + fi + fi + + # Create new volume with proper labels + docker volume create "$volume_name" \ + --label com.docker.compose.project="$ROLL_ENV_NAME" \ + --label com.docker.compose.version="$docker_compose_version" \ + --label com.docker.compose.volume="$volume_base_name" >/dev/null 2>&1 + + # Determine decompression command and user permissions + local decompress_cmd="cat" + case "$backup_file" in + *.tar.gz) decompress_cmd="gzip -d" ;; + *.tar.xz) decompress_cmd="xz -d" ;; + *.tar.lz4) decompress_cmd="lz4 -d" ;; + esac + + local user_id="0:0" # Default to root + case "$service_type" in + elasticsearch|opensearch) user_id="1000:1000" ;; + mysql|mariadb|postgres) user_id="999:999" ;; + esac + + # Restore the volume data + local temp_container="${ROLL_ENV_NAME}_restore_${service_name}_$$" + + if $decompress_cmd < "$backup_file" | docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + --user "$user_id" \ + -i alpine:latest \ + sh -c "cd /data && tar -xf - --strip-components=1 && chown -R $user_id /data" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi +} +function restoreConfigurations() { + local backup_path="$1" + local step="$2" + local total="$3" + + if [[ $RESTORE_CONFIG -eq 0 ]]; then + return 0 + fi + + showProgress $step $total "Restoring configuration files" + + local config_source_dir="$backup_path/config" + local current_dir="$(pwd)" + + # Legacy format support + if [[ ! -d "$config_source_dir" ]]; then + # Check for legacy files in backup root + local legacy_files=("env.php" "auth.json") + for file in "${legacy_files[@]}"; do + if [[ -f "$backup_path/$file" ]]; then + local target_path="" + case "$file" in + env.php) target_path="$current_dir/app/etc/env.php" ;; + auth.json) target_path="$current_dir/auth.json" ;; + esac + + if [[ -n "$target_path" ]]; then + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would restore $file to $target_path" + else + mkdir -p "$(dirname "$target_path")" + cp "$backup_path/$file" "$target_path" + logMessage INFO "Restored $file" + fi + fi + fi + done + return 0 + fi + + # New format with structured config directory + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would restore configuration files from $config_source_dir" + return 0 + fi + + # Restore configuration files + if [[ -d "$config_source_dir" ]]; then + find "$config_source_dir" -type f | while read -r config_file; do + local relative_path="${config_file#$config_source_dir/}" + local target_path="$current_dir/$relative_path" + + # Create target directory if needed + mkdir -p "$(dirname "$target_path")" + + # Backup existing file if it exists and is different + if [[ -f "$target_path" ]] && ! cmp -s "$config_file" "$target_path"; then + cp "$target_path" "$target_path.backup.$(date +%s)" + logMessage INFO "Backed up existing $relative_path" + fi + + cp "$config_file" "$target_path" + logMessage INFO "Restored $relative_path" + done + fi + + logMessage SUCCESS "Configuration restore completed" +} -if [ -f ".roll/backups/$LATEST_TIMESTAMP/env.php" ]; then - cp .roll/backups/$LATEST_TIMESTAMP/env.php $ENV_PHP_LOC -fi +function performRestore() { + local backup_id="$1" + + # Perform legacy migration if needed + performLegacyMigration + + # Validate database environment + if [[ ${ROLL_DB:-1} -eq 0 ]]; then + logMessage ERROR "Database environment is not enabled (ROLL_DB=0)" + exit 1 + fi + + # Find backup if not specified + if [[ -z "$backup_id" ]]; then + backup_id=$(findLatestBackup) + if [[ -z "$backup_id" ]]; then + logMessage ERROR "No backups found and no backup ID specified" + exit 1 + fi + logMessage INFO "Using latest backup: $backup_id" + fi + + # Determine backup path + local backup_path="$(pwd)/.roll/backups/$backup_id" + + # Check if backup exists as directory + if [[ ! -d "$backup_path" ]]; then + # Try to extract from archive + backup_path=$(extractBackupArchive "$backup_id") + if [[ $? -ne 0 ]]; then + logMessage ERROR "Backup not found: $backup_id" + exit 1 + fi + fi + + # Validate backup + validateBackup "$backup_path" || exit 1 + + # Get backup metadata + local metadata=$(getBackupMetadata "$backup_path") + logMessage INFO "Restoring backup: $backup_id" + + # Detect available services in backup + local available_services=($(detectBackupServices "$backup_path")) + if [[ ${#available_services[@]} -eq 0 ]]; then + logMessage ERROR "No services found in backup" + exit 1 + fi + + logMessage INFO "Available services in backup: ${available_services[*]}" + + # Determine which services to restore + local services_to_restore=() + if [[ ${#RESTORE_SERVICES[@]} -gt 0 ]]; then + # Use specified services + for service in "${RESTORE_SERVICES[@]}"; do + if containsElement "$service" "${available_services[@]}"; then + services_to_restore+=("$service") + else + logMessage WARNING "Service $service not found in backup, skipping" + fi + done + else + # Restore all available services + services_to_restore=("${available_services[@]}") + fi + + if [[ ${#services_to_restore[@]} -eq 0 ]]; then + logMessage ERROR "No services to restore" + exit 1 + fi + + logMessage INFO "Restoring services: ${services_to_restore[*]}" + + # Stop environment + stopEnvironment + + # Calculate total steps + local total_steps=${#services_to_restore[@]} + if [[ $RESTORE_CONFIG -eq 1 ]]; then + ((total_steps++)) + fi + + local current_step=0 + + # Restore volumes + for service in "${services_to_restore[@]}"; do + ((current_step++)) + restoreVolume "$service" "$backup_path" $current_step $total_steps + done + + # Restore configurations + if [[ $RESTORE_CONFIG -eq 1 ]]; then + ((current_step++)) + restoreConfigurations "$backup_path" $current_step $total_steps + fi + + # Clean up extracted backup if it was temporary + if [[ "$backup_path" =~ _extracted$ ]]; then + rm -rf "$backup_path" + fi + + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage SUCCESS "Dry run completed successfully!" + else + logMessage SUCCESS "Restore completed successfully!" + logMessage INFO "You can now start your environment with: roll env up" + fi +} -if [ -f ".roll/backups/$LATEST_TIMESTAMP/auth.json" ]; then - cp .roll/backups/$LATEST_TIMESTAMP/auth.json $AUTH_LOC +# Main execution +if [[ -z "$RESTORE_BACKUP_ID" ]]; then + # If no backup ID provided, use the latest + RESTORE_BACKUP_ID=$(findLatestBackup) + if [[ -z "$RESTORE_BACKUP_ID" ]]; then + error "No backups found. Please create a backup first with: roll backup" + exit 1 + fi + logMessage INFO "No backup ID specified, using latest: $RESTORE_BACKUP_ID" fi -echo "" -echo "" -echo "------------------ FNISHED BACKUP FROM: .roll/backups/$LATEST_TIMESTAMP/ ---------------------" -echo "" +performRestore "$RESTORE_BACKUP_ID" diff --git a/commands/restore.help b/commands/restore.help index 13d3c12..472ba96 100755 --- a/commands/restore.help +++ b/commands/restore.help @@ -1,11 +1,77 @@ - #!/usr/bin/env bash [[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 ROLL_USAGE=$(cat <' + • Use --force to overwrite existing volumes if they conflict + • Use --no-verify to skip checksums if backup is known to be good + • Check .roll/backups/ directory for available backups + • Legacy backups (old format) are automatically detected and supported EOF ) diff --git a/docs/index.md b/docs/index.md index e85ba90..1555338 100644 --- a/docs/index.md +++ b/docs/index.md @@ -29,6 +29,8 @@ caption: Getting Started installing services usage +backup-restore +backup-restore-quick-reference registry environments configuration diff --git a/docs/usage.md b/docs/usage.md index f123d1a..eb289e1 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -53,6 +53,39 @@ Remove volumes completely: roll env down -v +## Backup and Restore Commands + +Create a backup of all enabled services: + + roll backup + +Create a backup of specific services: + + roll backup db + roll backup redis + +List available backups: + + roll backup list + +Show backup information: + + roll backup info 1672531200 + +Restore the latest backup: + + roll restore + +Restore a specific backup: + + roll restore 1672531200 + +Preview what would be restored: + + roll restore --dry-run + +For detailed backup and restore documentation, see the [Backup and Restore](backup-restore.md) page. + ## Further Information Run `roll help` and `roll env -h` for more details and useful command information. From 31846370fae26691ee95c1947351b76e582b81a7 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 10:50:48 +0200 Subject: [PATCH 08/69] update backup and restore commands docs --- docs/backup-restore-quick-reference.md | 122 +++++++++ docs/backup-restore.md | 335 +++++++++++++++++++++++++ 2 files changed, 457 insertions(+) create mode 100644 docs/backup-restore-quick-reference.md create mode 100644 docs/backup-restore.md diff --git a/docs/backup-restore-quick-reference.md b/docs/backup-restore-quick-reference.md new file mode 100644 index 0000000..00a4ad2 --- /dev/null +++ b/docs/backup-restore-quick-reference.md @@ -0,0 +1,122 @@ +# Backup & Restore Quick Reference + +## Quick Commands Cheat Sheet + +### Backup Commands +```bash +# Basic backups +roll backup # Backup all enabled services +roll backup db # Database only +roll backup redis # Redis only +roll backup config # Configuration files only + +# Advanced backups +roll backup --quiet # Silent operation +roll backup --compression=xz # High compression +roll backup --encrypt=password # Encrypted backup +roll backup --include-source # Include source code +roll backup --name="pre-upgrade" # Named backup +roll backup --retention=7 # Auto-cleanup after 7 days + +# Management +roll backup list # List all backups +roll backup info 1672531200 # Show backup details +roll backup clean 30 # Remove backups older than 30 days +``` + +### Restore Commands +```bash +# Basic restore +roll restore # Restore latest backup +roll restore 1672531200 # Restore specific backup + +# Advanced restore +roll restore --dry-run # Preview only +roll restore --force # Overwrite existing +roll restore --services=db,redis # Selective restore +roll restore --no-config # Skip configuration +roll restore --decrypt=password # Decrypt backup +roll restore --quiet # Silent operation +``` + +## Common Use Cases + +### Daily Development +```bash +# Quick backup before major changes +roll backup --name="pre-refactor" --quiet + +# Database backup before migration +roll backup db --compression=xz +``` + +### Emergency Recovery +```bash +# Quick restore with force +roll restore --force + +# Preview what would be restored +roll restore --dry-run + +# Restore only database +roll restore --services=db --force +``` + +### Production Data +```bash +# Encrypted backup +roll backup --encrypt=strongpassword --compression=xz + +# Restore encrypted backup +roll restore --decrypt=strongpassword +``` + +## Backup Information + +### Service Types +- `db` - Database (MySQL/MariaDB/PostgreSQL) +- `redis` - Redis cache +- `dragonfly` - Dragonfly cache +- `elasticsearch` - Elasticsearch +- `opensearch` - OpenSearch +- `mongodb` - MongoDB +- `rabbitmq` - RabbitMQ +- `varnish` - Varnish cache +- `config` - Configuration files + +### Compression Options +- `gzip` - Default, good balance +- `xz` - Best compression, slower +- `lz4` - Fastest, larger files +- `none` - No compression + +### File Locations +- Backups: `.roll/backups/` +- Latest: `.roll/backups/latest.tar.gz` +- Archives: `backup__.tar.gz` + +## Tips & Tricks + +### Automation +```bash +# Add to crontab for daily backups +0 2 * * * cd /path/to/project && roll backup --quiet --retention=7 +``` + +### Size Optimization +```bash +# Small backup (no logs, high compression) +roll backup --compression=xz + +# Minimal backup (config only) +roll backup config +``` + +### Safety Checks +```bash +# Always test restore first +roll restore --dry-run + +# Verify backup integrity +roll backup info +``` \ No newline at end of file diff --git a/docs/backup-restore.md b/docs/backup-restore.md new file mode 100644 index 0000000..e445936 --- /dev/null +++ b/docs/backup-restore.md @@ -0,0 +1,335 @@ +# Backup and Restore + +RollDev provides powerful backup and restore capabilities for your development environments. The system automatically detects enabled services and creates comprehensive backups with integrity verification, metadata tracking, and flexible restoration options. + +> **Quick Reference**: See the [Backup & Restore Quick Reference](backup-restore-quick-reference.md) for a command cheat sheet. + +## Overview + +The backup and restore system supports: +- **Automatic Service Detection**: Detects all enabled services in your environment +- **Multiple Compression Formats**: gzip, xz, lz4, or no compression +- **Encryption Support**: GPG encryption with passphrase protection +- **Progress Tracking**: Real-time progress indicators +- **Integrity Verification**: Automatic checksum generation and verification +- **Rich Metadata**: JSON metadata with environment and service information +- **Selective Operations**: Backup/restore specific services only +- **Legacy Compatibility**: Works with old backup formats + +## Supported Services + +The backup system automatically detects and supports: + +| Service | Description | Volume Name | +|---------|-------------|-------------| +| Database | MySQL, MariaDB, PostgreSQL | `dbdata` | +| Redis | Redis cache | `redis` | +| Dragonfly | Dragonfly cache | `dragonfly` | +| Elasticsearch | Search engine | `esdata` | +| OpenSearch | Search engine | `osdata` | +| MongoDB | Document database | `mongodb` | +| RabbitMQ | Message queue | `rabbitmq` | +| Varnish | HTTP cache | `varnish` | + +## Backup Commands + +### Basic Usage + +```bash +# Backup all enabled services (default) +roll backup + +# Backup all services explicitly +roll backup all + +# Backup specific services +roll backup db # Database only +roll backup redis # Redis only +roll backup elasticsearch # Elasticsearch only +roll backup mongodb # MongoDB only +roll backup config # Configuration files only +``` + +### Advanced Options + +```bash +# Compression options +roll backup --compression=gzip # Default compression +roll backup --compression=xz # High compression +roll backup --compression=lz4 # Fast compression +roll backup --no-compression # No compression + +# Include additional data +roll backup --include-source # Include source code +roll backup --include-logs # Include log files (excluded by default) + +# Encryption +roll backup --encrypt=mypassword # Encrypt with GPG + +# Named backups +roll backup --name="pre-upgrade" --description="Before major update" + +# Quiet operation +roll backup --quiet + +# Retention management +roll backup --retention=7 # Auto-cleanup after 7 days +``` + +### Management Commands + +```bash +# List available backups +roll backup list + +# Show backup information +roll backup info 1672531200 + +# Clean up old backups +roll backup clean 30 # Remove backups older than 30 days +``` + +## Restore Commands + +### Basic Usage + +```bash +# Restore latest backup (all services) +roll restore + +# Restore specific backup by timestamp +roll restore 1672531200 + +# Restore with explicit backup ID +roll restore --backup-id=1672531200 +``` + +### Selective Restoration + +```bash +# Restore specific services only +roll restore --services=db,redis + +# Restore without configuration files +roll restore --no-config + +# Restore specific backup with service selection +roll restore 1672531200 --services=db +``` + +### Advanced Options + +```bash +# Preview what would be restored +roll restore --dry-run + +# Force overwrite existing volumes +roll restore --force + +# Decrypt encrypted backup +roll restore --decrypt=mypassword + +# Quiet operation +roll restore --quiet + +# Skip integrity verification +roll restore --no-verify + +# Skip legacy migration +roll restore --no-legacy-migration +``` + +## Backup Structure + +RollDev creates organized backup archives with the following structure: + +``` +.roll/backups/ +├── 1672531200/ # Timestamp-based directory +│ ├── volumes/ # Service volume backups +│ │ ├── db.tar.gz +│ │ ├── redis.tar.gz +│ │ └── elasticsearch.tar.gz +│ ├── config/ # Configuration files +│ │ ├── .env.roll +│ │ ├── app/etc/env.php +│ │ └── auth.json +│ └── metadata/ # Backup metadata +│ ├── backup.json # Rich metadata +│ └── checksums.sha256 # Integrity checksums +├── backup_envname_1672531200.tar.gz # Final compressed archive +└── latest.tar.gz -> backup_envname_1672531200.tar.gz +``` + +## Configuration Files + +The system automatically backs up relevant configuration files: + +### Framework-Agnostic Files +- `.env.roll` - RollDev environment configuration +- `.env` - Application environment file +- `composer.json` and `composer.lock` - PHP dependencies +- `auth.json` - Composer authentication + +### Magento-Specific Files +- `app/etc/env.php` - Magento configuration +- `.roll/roll-env.yml` - Docker Compose overrides + +### Framework-Specific Files +- `config/database.yml` - Rails database configuration +- Other framework-specific configuration files + +## Metadata and Integrity + +Each backup includes comprehensive metadata: + +```json +{ + "timestamp": 1672531200, + "date": "2023-01-01T12:00:00+00:00", + "environment": "myproject", + "version": "0.2.6.5", + "services": ["db:mysql:dbdata", "redis:redis:redis"], + "compression": "gzip", + "encrypted": false, + "name": "pre-upgrade", + "description": "Before major update", + "include_source": false, + "exclude_logs": true, + "docker_compose_version": "2.36.2", + "platform": "Darwin", + "architecture": "arm64" +} +``` + +## Examples + +### Daily Development Backup + +```bash +# Quick backup of current state +roll backup --quiet --name="daily-$(date +%Y%m%d)" +``` + +### Pre-Deployment Backup + +```bash +# Comprehensive backup before deployment +roll backup all --include-source --name="pre-deploy-v2.1" \ + --description="Full backup before version 2.1 deployment" +``` + +### Database Migration Backup + +```bash +# Database-only backup before migration +roll backup db --name="pre-migration" --compression=xz +``` + +### Emergency Restore + +```bash +# Quick restore of latest backup +roll restore --force + +# Restore specific service only +roll restore --services=db --force + +# Preview restore without changes +roll restore --dry-run +``` + +### Encrypted Backup for Production Data + +```bash +# Create encrypted backup +roll backup --encrypt=secretpassword --compression=xz \ + --name="production-data" + +# Restore encrypted backup +roll restore --decrypt=secretpassword --backup-id=1672531200 +``` + +## Automation + +### Scheduled Backups + +Add to your crontab for automated backups: + +```bash +# Daily backup at 2 AM with 7-day retention +0 2 * * * cd /path/to/project && roll backup --quiet --retention=7 + +# Weekly full backup with source code +0 2 * * 0 cd /path/to/project && roll backup --include-source --quiet \ + --name="weekly-$(date +%Y%W)" --retention=30 +``` + +### CI/CD Integration + +```bash +# Pre-deployment backup in CI/CD +roll backup --name="pre-deploy-${CI_COMMIT_SHA:0:8}" --quiet + +# Post-deployment verification +roll backup info $(roll backup list | tail -1 | awk '{print $9}' | grep -o '[0-9]\{10\}') +``` + +## Troubleshooting + +### Common Issues + +**Backup fails with permission errors:** +```bash +# Ensure Docker is running and accessible +docker system info + +# Check volume permissions +docker volume inspect ${ROLL_ENV_NAME}_dbdata +``` + +**Restore fails with existing volumes:** +```bash +# Use force flag to overwrite +roll restore --force + +# Or remove volumes manually +docker volume rm ${ROLL_ENV_NAME}_dbdata +``` + +**Encrypted backup won't decrypt:** +```bash +# Ensure GPG is installed +which gpg + +# Verify passphrase +roll restore --decrypt=yourpassword --dry-run +``` + +### Best Practices + +1. **Regular Backups**: Create automated daily backups with retention policies +2. **Test Restores**: Periodically test restore procedures in development +3. **Use Descriptive Names**: Name backups with meaningful descriptions +4. **Verify Integrity**: Always verify backup integrity before critical operations +5. **Secure Passwords**: Use strong passphrases for encrypted backups +6. **Monitor Storage**: Keep an eye on backup storage usage +7. **Document Procedures**: Document your backup and restore procedures for your team + +### Performance Tips + +1. **Exclude Logs**: Use `--exclude-logs` (default) to reduce backup size +2. **Choose Compression**: Use `lz4` for speed, `xz` for size, `gzip` for balance +3. **Selective Backups**: Backup only what you need with service selection +4. **Parallel Operations**: Enable parallel operations for faster backups (default) +5. **Local Storage**: Keep backups on fast local storage for quick access + +## Legacy Migration + +The restore command automatically handles migration from legacy formats: + +- **Warden to Roll**: Automatically converts Warden environments to Roll format +- **Old Backup Format**: Supports backups created with the previous backup system +- **Configuration Migration**: Updates configuration files during restoration + +This ensures seamless upgrades and backward compatibility with existing backup archives. \ No newline at end of file From e2179765c24aae9e91a39d2f83b82364e7836396 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 11:20:25 +0200 Subject: [PATCH 09/69] fix issue with user permissions --- utils/config.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/config.sh b/utils/config.sh index a6f5d40..dcbb70a 100644 --- a/utils/config.sh +++ b/utils/config.sh @@ -339,6 +339,7 @@ function loadRollConfig() { # Set system-specific exports export USER_ID="$(id -u)" export GROUP_ID="$(id -g)" + export OSTYPE="${OSTYPE}" # Set defaults for unset values local i=0 From a7dab3d1c5f5d86d9da2ed7b4d08bca6350ec4e4 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 12:39:47 +0200 Subject: [PATCH 10/69] fix encryption for backups --- commands/backup.cmd | 60 ++++++++++++++++++++++++++++++++++++++---- docs/backup-restore.md | 40 +++++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 6 deletions(-) diff --git a/commands/backup.cmd b/commands/backup.cmd index 7d6f588..4b661d5 100755 --- a/commands/backup.cmd +++ b/commands/backup.cmd @@ -400,16 +400,49 @@ function encryptBackup() { logMessage INFO "Encrypting backup files..." - find "$backup_dir" -name "*.tar*" -type f | while read -r file; do + # Create temporary file for new checksums + local temp_checksums="$backup_dir/metadata/checksums.sha256.new" + [[ -f "$backup_dir/metadata/checksums.sha256" ]] && cp "$backup_dir/metadata/checksums.sha256" "$temp_checksums" + + # Process each tar file for encryption + while IFS= read -r -d '' file; do if command -v gpg >/dev/null 2>&1; then - gpg --batch --yes --cipher-algo AES256 --compress-algo 1 \ + local encrypted_file="${file}.gpg" + if gpg --batch --yes --cipher-algo AES256 --compress-algo 1 \ --symmetric --passphrase "$passphrase" \ - --output "$file.gpg" "$file" && rm "$file" + --output "$encrypted_file" "$file"; then + + # Generate new checksum for encrypted file + local relative_path="${file#$backup_dir/}" + local encrypted_relative_path="${relative_path}.gpg" + local checksum=$(sha256sum "$encrypted_file" | cut -d' ' -f1) + + # Update checksums file to replace original with encrypted version + if [[ -f "$temp_checksums" ]]; then + sed -i.bak "s|^[a-f0-9]* ${relative_path}$|${checksum} ${encrypted_relative_path}|" "$temp_checksums" + rm -f "$temp_checksums.bak" + fi + + # Remove original file + rm "$file" + + logMessage INFO "Encrypted $(basename "$file")" + else + logMessage ERROR "Failed to encrypt $file" + rm -f "$temp_checksums" + return 1 + fi else logMessage WARNING "GPG not available, skipping encryption" + rm -f "$temp_checksums" return 1 fi - done + done < <(find "$backup_dir" -name "*.tar*" -type f -print0) + + # Replace original checksums file with updated one + if [[ -f "$temp_checksums" ]]; then + mv "$temp_checksums" "$backup_dir/metadata/checksums.sha256" + fi logMessage SUCCESS "Backup encryption completed" } @@ -424,11 +457,28 @@ function verifyBackup() { logMessage INFO "Verifying backup integrity..." if [[ -f "$backup_dir/metadata/checksums.sha256" ]]; then - if (cd "$backup_dir" && sha256sum -c metadata/checksums.sha256 >/dev/null 2>&1); then + # Verify checksums with detailed output + local verify_output + if verify_output=$(cd "$backup_dir" && sha256sum -c metadata/checksums.sha256 2>&1); then logMessage SUCCESS "Backup verification passed" return 0 else logMessage ERROR "Backup verification failed" + + # Show which files failed verification + local failed_files=$(echo "$verify_output" | grep -E "(No such file|FAILED)" | head -5) + if [[ -n "$failed_files" ]]; then + logMessage ERROR "Failed files:" + echo "$failed_files" | while read -r line; do + logMessage ERROR " $line" + done + + # Check if this might be an encryption issue + if echo "$verify_output" | grep -q "No such file or directory"; then + logMessage INFO "Files may be encrypted. Use --no-verify to skip verification for encrypted backups." + fi + fi + return 1 fi else diff --git a/docs/backup-restore.md b/docs/backup-restore.md index e445936..ae82bcb 100644 --- a/docs/backup-restore.md +++ b/docs/backup-restore.md @@ -332,4 +332,42 @@ The restore command automatically handles migration from legacy formats: - **Old Backup Format**: Supports backups created with the previous backup system - **Configuration Migration**: Updates configuration files during restoration -This ensures seamless upgrades and backward compatibility with existing backup archives. \ No newline at end of file +This ensures seamless upgrades and backward compatibility with existing backup archives. + +# Encryption Support + +RollDev supports GPG encryption for backup files using AES256 cipher with passphrase protection: + +```bash +# Create encrypted backup +roll backup --encrypt=mypassword + +# Restore encrypted backup +roll restore --decrypt=mypassword +``` + +## Encryption Behavior + +- **File Encryption**: All `.tar.gz` files are encrypted to `.tar.gz.gpg` format +- **Checksum Updates**: Checksums are automatically recalculated for encrypted files +- **Verification**: Integrity verification works seamlessly with encrypted backups +- **Security**: Uses GPG with AES256 cipher and compression + +## Troubleshooting Encryption + +If you encounter issues with encrypted backups: + +```bash +# Skip verification for problematic encrypted backups +roll backup --encrypt=password --no-verify + +# Check GPG availability +which gpg + +# Restore with explicit decryption +roll restore --decrypt=password --backup-id=1672531200 +``` + +**Note**: Encrypted backups require the same passphrase for restoration. Store your passphrase securely! + +## Backup Structure \ No newline at end of file From f42e9960ca14bc6eb921e36799b268478784ff94 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 17:24:44 +0200 Subject: [PATCH 11/69] fixes in backup / restore commands --- commands/backup.cmd | 187 +++++++++++++++++++++++++++----- commands/backup.help | 9 +- commands/restore.cmd | 244 +++++++++++++++++++++++++++++++++++++----- commands/restore.help | 6 +- 4 files changed, 389 insertions(+), 57 deletions(-) diff --git a/commands/backup.cmd b/commands/backup.cmd index 4b661d5..0734d5d 100755 --- a/commands/backup.cmd +++ b/commands/backup.cmd @@ -15,8 +15,11 @@ BACKUP_PARALLEL=1 BACKUP_RETENTION_DAYS=30 BACKUP_VERIFY=1 BACKUP_QUIET=0 +BACKUP_OUTPUT_ID=0 BACKUP_NAME="" BACKUP_DESCRIPTION="" +BACKUP_DUPLICATE_NAME="" # New environment name for duplication +BACKUP_DUPLICATE_DOMAIN="" # New domain for duplication PROGRESS=1 # Parse command line arguments @@ -40,6 +43,11 @@ while [[ $# -gt 0 ]]; do BACKUP_ENCRYPT="${1#*=}" shift ;; + --encrypt) + # Flag without value - will prompt for password later + BACKUP_ENCRYPT="PROMPT" + shift + ;; --no-compression) BACKUP_COMPRESSION="none" shift @@ -69,6 +77,12 @@ while [[ $# -gt 0 ]]; do PROGRESS=0 shift ;; + --output-id) + BACKUP_OUTPUT_ID=1 + BACKUP_QUIET=1 + PROGRESS=0 + shift + ;; --name=*) BACKUP_NAME="${1#*=}" shift @@ -77,6 +91,14 @@ while [[ $# -gt 0 ]]; do BACKUP_DESCRIPTION="${1#*=}" shift ;; + --duplicate-name=*) + BACKUP_DUPLICATE_NAME="${1#*=}" + shift + ;; + --duplicate-domain=*) + BACKUP_DUPLICATE_DOMAIN="${1#*=}" + shift + ;; --no-progress) PROGRESS=0 shift @@ -108,6 +130,39 @@ if (( ${#BACKUP_COMMAND_PARAMS[@]} == 0 )); then fi # Utility functions for backup operations +function promptPassword() { + local prompt="$1" + local password="" + local confirm="" + + # Don't prompt in quiet mode or non-interactive shells + if [[ $BACKUP_QUIET -eq 1 ]] || [[ ! -t 0 ]]; then + error "Password required but running in non-interactive mode. Use --encrypt=password instead." + exit 1 + fi + + echo -n "$prompt: " >&2 + read -s password + echo >&2 + + if [[ -z "$password" ]]; then + error "Password cannot be empty" + exit 1 + fi + + # Confirm password for security + echo -n "Confirm password: " >&2 + read -s confirm + echo >&2 + + if [[ "$password" != "$confirm" ]]; then + error "Passwords do not match" + exit 1 + fi + + echo "$password" +} + function showProgress() { [[ $PROGRESS -eq 0 ]] && return local current=$1 @@ -122,9 +177,8 @@ function showProgress() { printf "%*s" $((bar_length - filled_length)) | tr ' ' '-' printf "] %d%% %s" $percent "$description" - if [[ $current -eq $total ]]; then - echo "" - fi + # Always end with a newline for clean output + echo "" } function logMessage() { @@ -263,7 +317,7 @@ function backupVolume() { # Check if volume exists if ! docker volume inspect "$full_volume_name" >/dev/null 2>&1; then logMessage WARNING "Volume $full_volume_name does not exist, skipping" - return 0 + return 1 # Return failure so this service won't be included in successful_services fi # Create backup with appropriate user permissions based on service @@ -341,8 +395,27 @@ function backupConfigurations() { if [[ -f "$(pwd)/$config_file" ]]; then local target_dir="$backup_dir/config/$(dirname "$config_file")" mkdir -p "$target_dir" - cp "$(pwd)/$config_file" "$target_dir/" - logMessage INFO "Backed up $config_file" + + # Check if this is a duplication backup and needs environment name replacement + if [[ -n "$BACKUP_DUPLICATE_NAME" && "$config_file" == ".env.roll" ]]; then + # Create a modified version of .env.roll with new environment names + local temp_env_file="$target_dir/$(basename "$config_file")" + cp "$(pwd)/$config_file" "$temp_env_file" + + # Replace ROLL_ENV_NAME and TRAEFIK_DOMAIN for duplication + sed_inplace "s/^ROLL_ENV_NAME=.*/ROLL_ENV_NAME=${BACKUP_DUPLICATE_NAME}/" "$temp_env_file" + if [[ -n "$BACKUP_DUPLICATE_DOMAIN" ]]; then + sed_inplace "s/^TRAEFIK_DOMAIN=.*/TRAEFIK_DOMAIN=${BACKUP_DUPLICATE_DOMAIN}/" "$temp_env_file" + else + sed_inplace "s/^TRAEFIK_DOMAIN=.*/TRAEFIK_DOMAIN=${BACKUP_DUPLICATE_NAME}.test/" "$temp_env_file" + fi + + logMessage INFO "Backed up $config_file (modified for duplication: ${BACKUP_DUPLICATE_NAME})" + else + # Copy file as-is for non-duplication backups or other files + cp "$(pwd)/$config_file" "$target_dir/" + logMessage INFO "Backed up $config_file" + fi fi done @@ -404,7 +477,7 @@ function encryptBackup() { local temp_checksums="$backup_dir/metadata/checksums.sha256.new" [[ -f "$backup_dir/metadata/checksums.sha256" ]] && cp "$backup_dir/metadata/checksums.sha256" "$temp_checksums" - # Process each tar file for encryption + # Process tar files for encryption while IFS= read -r -d '' file; do if command -v gpg >/dev/null 2>&1; then local encrypted_file="${file}.gpg" @@ -419,8 +492,7 @@ function encryptBackup() { # Update checksums file to replace original with encrypted version if [[ -f "$temp_checksums" ]]; then - sed -i.bak "s|^[a-f0-9]* ${relative_path}$|${checksum} ${encrypted_relative_path}|" "$temp_checksums" - rm -f "$temp_checksums.bak" + sed_inplace "s|^[a-f0-9]* ${relative_path}$|${checksum} ${encrypted_relative_path}|" "$temp_checksums" fi # Remove original file @@ -439,6 +511,42 @@ function encryptBackup() { fi done < <(find "$backup_dir" -name "*.tar*" -type f -print0) + # Process configuration files for encryption + if [[ -d "$backup_dir/config" ]]; then + while IFS= read -r -d '' file; do + if command -v gpg >/dev/null 2>&1; then + local encrypted_file="${file}.gpg" + if gpg --batch --yes --cipher-algo AES256 --compress-algo 1 \ + --symmetric --passphrase "$passphrase" \ + --output "$encrypted_file" "$file"; then + + # Generate checksum for encrypted config file + local relative_path="${file#$backup_dir/}" + local encrypted_relative_path="${relative_path}.gpg" + local checksum=$(sha256sum "$encrypted_file" | cut -d' ' -f1) + + # Add checksum entry for the encrypted config file + if [[ -f "$temp_checksums" ]]; then + echo "$checksum $encrypted_relative_path" >> "$temp_checksums" + fi + + # Remove original file + rm "$file" + + logMessage INFO "Encrypted config file $(basename "$file")" + else + logMessage ERROR "Failed to encrypt config file $file" + rm -f "$temp_checksums" + return 1 + fi + else + logMessage WARNING "GPG not available, skipping encryption" + rm -f "$temp_checksums" + return 1 + fi + done < <(find "$backup_dir/config" -type f -print0) + fi + # Replace original checksums file with updated one if [[ -f "$temp_checksums" ]]; then mv "$temp_checksums" "$backup_dir/metadata/checksums.sha256" @@ -513,6 +621,11 @@ function performBackup() { # Validate inputs validateCompression || exit 1 + # Handle interactive password prompt if needed + if [[ "$BACKUP_ENCRYPT" == "PROMPT" ]]; then + BACKUP_ENCRYPT=$(promptPassword "Enter encryption password") + fi + # Detect enabled services local enabled_services=($(detectEnabledServices)) if [[ ${#enabled_services[@]} -eq 0 ]]; then @@ -531,6 +644,9 @@ function performBackup() { logMessage INFO "Starting backup to: $backup_dir" logMessage INFO "Backup type: $backup_type, Compression: $BACKUP_COMPRESSION" + # Track successfully backed up services + local successful_services=() + # Calculate total steps local total_steps=2 # metadata + config case "$backup_type" in @@ -550,11 +666,6 @@ function performBackup() { local current_step=0 - # Generate metadata - ((current_step++)) - generateBackupMetadata "$backup_dir" "${enabled_services[@]}" - showProgress $current_step $total_steps "Generating metadata" - # Backup based on type case "$backup_type" in all) @@ -562,7 +673,9 @@ function performBackup() { for service_info in "${enabled_services[@]}"; do IFS=':' read -r service_name service_type volume_name <<< "$service_info" ((current_step++)) - backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + if backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps; then + successful_services+=("$service_info") + fi done # Backup configurations @@ -581,7 +694,9 @@ function performBackup() { IFS=':' read -r service_name service_type volume_name <<< "$service_info" if [[ "$service_type" =~ ^(mysql|mariadb|postgres)$ ]]; then ((current_step++)) - backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + if backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps; then + successful_services+=("$service_info") + fi break fi done @@ -592,7 +707,9 @@ function performBackup() { IFS=':' read -r service_name service_type volume_name <<< "$service_info" if [[ "$service_type" =~ ^(redis|dragonfly)$ ]]; then ((current_step++)) - backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + if backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps; then + successful_services+=("$service_info") + fi break fi done @@ -603,7 +720,9 @@ function performBackup() { IFS=':' read -r service_name service_type volume_name <<< "$service_info" if [[ "$service_type" =~ ^(elasticsearch|opensearch)$ ]]; then ((current_step++)) - backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + if backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps; then + successful_services+=("$service_info") + fi break fi done @@ -614,7 +733,9 @@ function performBackup() { IFS=':' read -r service_name service_type volume_name <<< "$service_info" if [[ "$service_type" == "mongodb" ]]; then ((current_step++)) - backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps + if backupVolume "$service_name" "$volume_name" "$backup_dir" $current_step $total_steps; then + successful_services+=("$service_info") + fi break fi done @@ -630,6 +751,11 @@ function performBackup() { ;; esac + # Generate metadata with successfully backed up services + ((current_step++)) + generateBackupMetadata "$backup_dir" "${successful_services[@]}" + showProgress $current_step $total_steps "Generating metadata" + # Encrypt if requested if [[ -n "$BACKUP_ENCRYPT" ]]; then encryptBackup "$backup_dir" "$BACKUP_ENCRYPT" @@ -648,10 +774,15 @@ function performBackup() { # Update latest symlink (cd "$(pwd)/.roll/backups" && ln -sf "$archive_name" "latest$(getCompressionExtension)") - logMessage SUCCESS "Backup completed successfully!" - logMessage INFO "Backup ID: $timestamp" - logMessage INFO "Archive: $archive_name ($(du -h "$(pwd)/.roll/backups/$archive_name" | cut -f1))" - logMessage INFO "Location: $(pwd)/.roll/backups/" + if [[ $BACKUP_OUTPUT_ID -eq 1 ]]; then + # Only output the backup ID for programmatic use + echo "$timestamp" + else + logMessage SUCCESS "Backup completed successfully!" + logMessage INFO "Backup ID: $timestamp" + logMessage INFO "Archive: $archive_name ($(du -h "$(pwd)/.roll/backups/$archive_name" | cut -f1))" + logMessage INFO "Location: $(pwd)/.roll/backups/" + fi # Clean up directory version (keep archive) rm -rf "$backup_dir" @@ -661,9 +792,13 @@ function performBackup() { fi # Cleanup old backups - cleanupOldBackups - - logMessage SUCCESS "Backup process completed!" + if [[ $BACKUP_OUTPUT_ID -eq 0 ]]; then + cleanupOldBackups + logMessage SUCCESS "Backup process completed!" + else + # Silent cleanup for output-id mode + cleanupOldBackups >/dev/null 2>&1 + fi } # Main execution diff --git a/commands/backup.help b/commands/backup.help index 5d14c2d..9ae2bf9 100755 --- a/commands/backup.help +++ b/commands/backup.help @@ -29,6 +29,7 @@ ROLL_USAGE=$(cat <&2 + read -s password + echo >&2 + + if [[ -z "$password" ]]; then + error "Password cannot be empty" + exit 1 + fi + + echo "$password" +} + +function detectEncryptedBackup() { + local backup_path="$1" + + # Check if backup contains .gpg files + if [[ -d "$backup_path" ]]; then + # Directory format - check for .gpg files + if find "$backup_path" -name "*.gpg" -type f | head -1 | grep -q .; then + return 0 # Encrypted + fi + else + # Archive format - check if archive contains .gpg files + local archive_file="$backup_path" + if [[ -f "$archive_file" ]]; then + # Determine decompression command + local decompress_cmd="cat" + case "$archive_file" in + *.tar.gz) decompress_cmd="gzip -dc" ;; + *.tar.xz) decompress_cmd="xz -dc" ;; + *.tar.lz4) decompress_cmd="lz4 -dc" ;; + esac + + # Check if archive contains .gpg files + if $decompress_cmd "$archive_file" | tar -tf - 2>/dev/null | grep -q "\.gpg$"; then + return 0 # Encrypted + fi + fi + fi + + return 1 # Not encrypted +} + function showProgress() { [[ $PROGRESS -eq 0 ]] && return local current=$1 @@ -101,9 +159,8 @@ function showProgress() { printf "%*s" $((bar_length - filled_length)) | tr ' ' '-' printf "] %d%% %s" $percent "$description" - if [[ $current -eq $total ]]; then - echo "" - fi + # Always end with a newline for clean output + echo "" } function logMessage() { @@ -359,9 +416,25 @@ function restoreVolume() { local volume_mapping=$(getVolumeMapping "$service_name") IFS=':' read -r volume_name service_type <<< "$volume_mapping" - # Determine backup file location + # Determine backup file location (check for both encrypted and unencrypted) local backup_file="" - if [[ -f "$backup_path/volumes/${service_name}.tar.gz" ]]; then + local is_encrypted=false + + # Check for encrypted files first (.gpg extension) + if [[ -f "$backup_path/volumes/${service_name}.tar.gz.gpg" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.gz.gpg" + is_encrypted=true + elif [[ -f "$backup_path/volumes/${service_name}.tar.xz.gpg" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.xz.gpg" + is_encrypted=true + elif [[ -f "$backup_path/volumes/${service_name}.tar.lz4.gpg" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.lz4.gpg" + is_encrypted=true + elif [[ -f "$backup_path/volumes/${service_name}.tar.gpg" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.gpg" + is_encrypted=true + # Check for unencrypted files + elif [[ -f "$backup_path/volumes/${service_name}.tar.gz" ]]; then backup_file="$backup_path/volumes/${service_name}.tar.gz" elif [[ -f "$backup_path/volumes/${service_name}.tar.xz" ]]; then backup_file="$backup_path/volumes/${service_name}.tar.xz" @@ -378,10 +451,22 @@ function restoreVolume() { fi if [[ $RESTORE_DRY_RUN -eq 1 ]]; then - logMessage INFO "[DRY RUN] Would restore $service_name from $backup_file to volume $volume_name" + if [[ $is_encrypted == true ]]; then + logMessage INFO "[DRY RUN] Would decrypt and restore $service_name from $backup_file to volume $volume_name" + else + logMessage INFO "[DRY RUN] Would restore $service_name from $backup_file to volume $volume_name" + fi return 0 fi + # Validate decryption password if file is encrypted + if [[ $is_encrypted == true ]]; then + if [[ -z "$RESTORE_DECRYPT" ]]; then + logMessage ERROR "Encrypted backup file found but no decryption password provided" + return 1 + fi + fi + # Get Docker Compose version for proper labeling local docker_compose_version=$(docker compose version 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+' | head -1) local volume_base_name=$(echo "$volume_name" | sed "s/${ROLL_ENV_NAME}_//") @@ -405,7 +490,14 @@ function restoreVolume() { # Determine decompression command and user permissions local decompress_cmd="cat" - case "$backup_file" in + local original_file="$backup_file" + + # Remove .gpg extension to determine original compression + if [[ $is_encrypted == true ]]; then + original_file="${backup_file%.gpg}" + fi + + case "$original_file" in *.tar.gz) decompress_cmd="gzip -d" ;; *.tar.xz) decompress_cmd="xz -d" ;; *.tar.lz4) decompress_cmd="lz4 -d" ;; @@ -417,20 +509,33 @@ function restoreVolume() { mysql|mariadb|postgres) user_id="999:999" ;; esac - # Restore the volume data + # Restore the volume data with decryption if needed local temp_container="${ROLL_ENV_NAME}_restore_${service_name}_$$" - if $decompress_cmd < "$backup_file" | docker run --rm --name "$temp_container" \ - --mount source="$volume_name",target=/data \ - --user "$user_id" \ - -i alpine:latest \ - sh -c "cd /data && tar -xf - --strip-components=1 && chown -R $user_id /data" 2>/dev/null; then + if [[ $is_encrypted == true ]]; then + # Decrypt and decompress pipeline - extract as root first, then fix permissions + local restore_cmd="gpg --batch --yes --quiet --passphrase \"$RESTORE_DECRYPT\" --decrypt \"$backup_file\" | $decompress_cmd | docker run --rm --name \"$temp_container\" --mount source=\"$volume_name\",target=/data -i alpine:latest sh -c \"cd /data && tar -xf - --strip-components=1 && chown -R $user_id /data\"" - logMessage SUCCESS "Successfully restored $service_name volume" - return 0 + if eval "$restore_cmd" 2>/dev/null; then + logMessage SUCCESS "Successfully restored and decrypted $service_name volume" + return 0 + else + logMessage ERROR "Failed to decrypt and restore $service_name volume" + return 1 + fi else - logMessage ERROR "Failed to restore $service_name volume" - return 1 + # Regular restore without decryption + if $decompress_cmd < "$backup_file" | docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -i alpine:latest \ + sh -c "cd /data && tar -xf - --strip-components=1 && chown -R $user_id /data" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi fi } @@ -450,10 +555,21 @@ function restoreConfigurations() { # Legacy format support if [[ ! -d "$config_source_dir" ]]; then - # Check for legacy files in backup root + # Check for legacy files in backup root (both encrypted and unencrypted) local legacy_files=("env.php" "auth.json") for file in "${legacy_files[@]}"; do - if [[ -f "$backup_path/$file" ]]; then + local source_file="" + local is_encrypted=false + + # Check for encrypted version first + if [[ -f "$backup_path/${file}.gpg" ]]; then + source_file="$backup_path/${file}.gpg" + is_encrypted=true + elif [[ -f "$backup_path/$file" ]]; then + source_file="$backup_path/$file" + fi + + if [[ -n "$source_file" ]]; then local target_path="" case "$file" in env.php) target_path="$current_dir/app/etc/env.php" ;; @@ -462,11 +578,31 @@ function restoreConfigurations() { if [[ -n "$target_path" ]]; then if [[ $RESTORE_DRY_RUN -eq 1 ]]; then - logMessage INFO "[DRY RUN] Would restore $file to $target_path" + if [[ $is_encrypted == true ]]; then + logMessage INFO "[DRY RUN] Would decrypt and restore $file to $target_path" + else + logMessage INFO "[DRY RUN] Would restore $file to $target_path" + fi else mkdir -p "$(dirname "$target_path")" - cp "$backup_path/$file" "$target_path" - logMessage INFO "Restored $file" + + if [[ $is_encrypted == true ]]; then + # Decrypt the file directly to target location + if [[ -n "$RESTORE_DECRYPT" ]]; then + if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$source_file" > "$target_path"; then + logMessage INFO "Decrypted and restored $file" + else + logMessage ERROR "Failed to decrypt $file" + return 1 + fi + else + logMessage ERROR "Encrypted config file found but no decryption password provided" + return 1 + fi + else + cp "$source_file" "$target_path" + logMessage INFO "Restored $file" + fi fi fi fi @@ -480,23 +616,55 @@ function restoreConfigurations() { return 0 fi - # Restore configuration files + # Restore configuration files (both encrypted and unencrypted) if [[ -d "$config_source_dir" ]]; then + # Process all files including .gpg files find "$config_source_dir" -type f | while read -r config_file; do local relative_path="${config_file#$config_source_dir/}" + local is_encrypted=false + + # Check if file is encrypted + if [[ "$config_file" == *.gpg ]]; then + is_encrypted=true + # Remove .gpg extension for target path + relative_path="${relative_path%.gpg}" + fi + local target_path="$current_dir/$relative_path" # Create target directory if needed mkdir -p "$(dirname "$target_path")" - # Backup existing file if it exists and is different - if [[ -f "$target_path" ]] && ! cmp -s "$config_file" "$target_path"; then - cp "$target_path" "$target_path.backup.$(date +%s)" - logMessage INFO "Backed up existing $relative_path" + # Backup existing file if it exists + if [[ -f "$target_path" ]]; then + if [[ $is_encrypted == true ]]; then + # For encrypted files, we can't easily compare so always backup + cp "$target_path" "$target_path.backup.$(date +%s)" + logMessage INFO "Backed up existing $relative_path" + elif ! cmp -s "$config_file" "$target_path"; then + cp "$target_path" "$target_path.backup.$(date +%s)" + logMessage INFO "Backed up existing $relative_path" + fi fi - cp "$config_file" "$target_path" - logMessage INFO "Restored $relative_path" + if [[ $is_encrypted == true ]]; then + # Decrypt the file + if [[ -n "$RESTORE_DECRYPT" ]]; then + if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$config_file" > "$target_path"; then + logMessage INFO "Decrypted and restored $relative_path" + else + logMessage ERROR "Failed to decrypt $relative_path" + return 1 + fi + else + logMessage ERROR "Encrypted config file found but no decryption password provided" + return 1 + fi + else + # Copy unencrypted file + cp "$config_file" "$target_path" + logMessage INFO "Restored $relative_path" + fi done fi @@ -538,6 +706,24 @@ function performRestore() { fi fi + # Detect if backup is encrypted and handle password prompting + if detectEncryptedBackup "$backup_path"; then + if [[ -z "$RESTORE_DECRYPT" ]]; then + # No password provided, prompt for it + RESTORE_DECRYPT=$(promptPassword "Encrypted backup detected. Enter decryption password") + elif [[ "$RESTORE_DECRYPT" == "PROMPT" ]]; then + # Explicit prompt requested + RESTORE_DECRYPT=$(promptPassword "Enter decryption password") + fi + + if [[ -z "$RESTORE_DECRYPT" ]]; then + logMessage ERROR "Encrypted backup requires a password. Use --decrypt=password or --decrypt to prompt." + exit 1 + fi + + logMessage INFO "Encrypted backup detected, will decrypt during restoration" + fi + # Validate backup validateBackup "$backup_path" || exit 1 diff --git a/commands/restore.help b/commands/restore.help index 472ba96..36b6b75 100755 --- a/commands/restore.help +++ b/commands/restore.help @@ -28,6 +28,7 @@ ROLL_USAGE=$(cat < Date: Wed, 4 Jun 2025 17:25:00 +0200 Subject: [PATCH 12/69] fix in parameters in printf --- utils/core.sh | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/utils/core.sh b/utils/core.sh index fb31a62..49b564e 100644 --- a/utils/core.sh +++ b/utils/core.sh @@ -6,19 +6,19 @@ DOCKER_PEERED_SERVICES=("traefik" "tunnel" "mailhog") ## messaging functions function success { - >&2 printf "\033[32mSUCCESS\033[0m: $@\n" + >&2 printf "\033[32mSUCCESS\033[0m: %s\n" "$*" } function info { - >&2 printf "\033[33mINFO\033[0m: $@\n" + >&2 printf "\033[33mINFO\033[0m: %s\n" "$*" } function warning { - >&2 printf "\033[33mWARNING\033[0m: $@\n" + >&2 printf "\033[33mWARNING\033[0m: %s\n" "$*" } function error { - >&2 printf "\033[31mERROR\033[0m: $@\n" + >&2 printf "\033[31mERROR\033[0m: %s\n" "$*" } function fatal { @@ -124,4 +124,25 @@ function disconnectPeeredServices { # Main logic with the timeout function function isOnline() { (ping -q -c1 -t 2 8.8.8.8 &>/dev/null && echo "true") || (ping -q -c1 -t 2 1.1.1.1 &>/dev/null && echo "true") || echo "false" +} + +## cross-platform sed in-place editing function +## works on both macOS (BSD sed) and Linux (GNU sed) +function sed_inplace() { + local pattern="$1" + local file="$2" + local backup_ext="${3:-.bak}" + + if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS (BSD sed) - requires backup extension + sed -i "$backup_ext" "$pattern" "$file" + else + # Linux (GNU sed) - backup extension is optional + sed -i"$backup_ext" "$pattern" "$file" + fi + + # Remove backup file if it exists and we used .bak extension + if [[ "$backup_ext" == ".bak" && -f "${file}${backup_ext}" ]]; then + rm -f "${file}${backup_ext}" + fi } \ No newline at end of file From ef85c2878d6ca0e1995fceab9c228587f73a82f1 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 4 Jun 2025 18:45:09 +0200 Subject: [PATCH 13/69] Add environment duplication command --- bin/roll | 2 +- commands/duplicate.cmd | 599 ++++++++++++++++++++++++++++++++++++++++ commands/duplicate.help | 33 +++ docs/backup-restore.md | 31 ++- 4 files changed, 661 insertions(+), 4 deletions(-) create mode 100644 commands/duplicate.cmd create mode 100644 commands/duplicate.help diff --git a/bin/roll b/bin/roll index 7392208..ef26c45 100755 --- a/bin/roll +++ b/bin/roll @@ -40,7 +40,7 @@ declare ROLL_PARAMS=() declare ROLL_CMD_VERB= declare ROLL_CMD_EXEC= declare ROLL_CMD_HELP= -declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore) +declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore duplicate) ## parse first argument as command and determine validity if (( "$#" )); then diff --git a/commands/duplicate.cmd b/commands/duplicate.cmd new file mode 100644 index 0000000..963c342 --- /dev/null +++ b/commands/duplicate.cmd @@ -0,0 +1,599 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +# Load core utilities and configuration +ROLL_ENV_PATH="$(locateEnvPath)" || exit $? +loadEnvConfig "${ROLL_ENV_PATH}" || exit $? +assertDockerRunning + +# Default configuration values +DUPLICATE_NAME="" +DUPLICATE_INCLUDE_SOURCE=1 +DUPLICATE_ENCRYPT="" +DUPLICATE_START_ENV=1 +DUPLICATE_UPDATE_URLS=1 +DUPLICATE_DRY_RUN=0 +DUPLICATE_QUIET=0 +DUPLICATE_FORCE=0 +PROGRESS=1 + +# Parse command line arguments +POSITIONAL_ARGS=() +# Start with any arguments passed from the main roll script +if [[ -n "${ROLL_PARAMS[*]}" ]]; then + POSITIONAL_ARGS+=("${ROLL_PARAMS[@]}") +fi + +while [[ $# -gt 0 ]]; do + case "$1" in + --help|-h) + roll duplicate --help + exit 0 + ;; + --encrypt=*) + DUPLICATE_ENCRYPT="${1#*=}" + shift + ;; + --encrypt) + # Flag without value - will prompt for password later + DUPLICATE_ENCRYPT="PROMPT" + shift + ;; + --no-source) + DUPLICATE_INCLUDE_SOURCE=0 + shift + ;; + --no-start) + DUPLICATE_START_ENV=0 + shift + ;; + --no-urls) + DUPLICATE_UPDATE_URLS=0 + shift + ;; + --dry-run) + DUPLICATE_DRY_RUN=1 + shift + ;; + --quiet|-q) + DUPLICATE_QUIET=1 + PROGRESS=0 + shift + ;; + --force|-f) + DUPLICATE_FORCE=1 + shift + ;; + --no-progress) + PROGRESS=0 + shift + ;; + --) + shift + break + ;; + -*) + error "Unknown option: $1" + exit 1 + ;; + *) + # Collect positional arguments + POSITIONAL_ARGS+=("$1") + shift + ;; + esac +done + +# Add any remaining arguments after -- to positional args +POSITIONAL_ARGS+=("$@") + +# Extract environment name from positional arguments +if [[ ${#POSITIONAL_ARGS[@]} -gt 0 ]]; then + DUPLICATE_NAME="${POSITIONAL_ARGS[0]}" +fi + +# Utility functions for duplicate operations +function promptPassword() { + local prompt="$1" + local password="" + local confirm="" + + # Don't prompt in quiet mode or non-interactive shells + if [[ $DUPLICATE_QUIET -eq 1 ]] || [[ ! -t 0 ]]; then + error "Password required but running in non-interactive mode. Use --encrypt=password instead." + exit 1 + fi + + echo -n "$prompt: " >&2 + read -s password + echo >&2 + + if [[ -z "$password" ]]; then + error "Password cannot be empty" + exit 1 + fi + + # Confirm password for security + echo -n "Confirm password: " >&2 + read -s confirm + echo >&2 + + if [[ "$password" != "$confirm" ]]; then + error "Passwords do not match" + exit 1 + fi + + echo "$password" +} + +function showProgress() { + [[ $PROGRESS -eq 0 ]] && return + local current=$1 + local total=$2 + local description="$3" + local percent=$((current * 100 / total)) + local bar_length=30 + local filled_length=$((percent * bar_length / 100)) + + printf "\r[" + printf "%*s" $filled_length | tr ' ' '=' + printf "%*s" $((bar_length - filled_length)) | tr ' ' '-' + printf "] %d%% %s" $percent "$description" + + # Always end with a newline for clean output + echo "" +} + +function logMessage() { + [[ $DUPLICATE_QUIET -eq 1 ]] && return + local level="$1" + shift + case "$level" in + INFO) info "$@" ;; + SUCCESS) success "$@" ;; + WARNING) warning "$@" ;; + ERROR) error "$@" ;; + esac +} + +function validateDuplicateName() { + local name="$1" + + if [[ -z "$name" ]]; then + error "Duplicate environment name is required" + return 1 + fi + + # Check if name is valid (alphanumeric, hyphens, underscores) + if [[ ! "$name" =~ ^[a-zA-Z0-9_-]+$ ]]; then + error "Environment name must contain only letters, numbers, hyphens, and underscores" + return 1 + fi + + # Check if directory already exists + local target_dir="$(dirname "$(pwd)")/${name}" + if [[ -d "$target_dir" ]] && [[ $DUPLICATE_FORCE -eq 0 ]]; then + error "Directory $target_dir already exists. Use --force to overwrite." + return 1 + fi + + return 0 +} + +function createBackupForDuplicate() { + local step="$1" + local total="$2" + + showProgress $step $total "Creating backup of current environment" + + local backup_args=("all") + + # Note: We don't include source code in backup since we copy it directly + # This makes backup/restore faster and more reliable for large codebases + if [[ $DUPLICATE_INCLUDE_SOURCE -eq 1 ]]; then + logMessage INFO "Source code will be copied directly (not via backup)" + fi + + if [[ -n "$DUPLICATE_ENCRYPT" ]]; then + if [[ "$DUPLICATE_ENCRYPT" == "PROMPT" ]]; then + backup_args+=("--encrypt") + else + backup_args+=("--encrypt=${DUPLICATE_ENCRYPT}") + fi + fi + + # Add duplication parameters to modify environment names in backup + backup_args+=("--duplicate-name=${DUPLICATE_NAME}") + backup_args+=("--duplicate-domain=${DUPLICATE_NAME}.test") + + backup_args+=("--output-id") + backup_args+=("--name=duplicate-data") + backup_args+=("--description=Data backup created for duplicating to ${DUPLICATE_NAME}") + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would create backup with: roll backup ${backup_args[*]}" + echo "latest-backup-id" + return 0 + fi + + # Create backup and get the backup ID directly + local backup_id + if backup_id=$("${ROLL_DIR}/bin/roll" backup "${backup_args[@]}" 2>/dev/null); then + # Remove any whitespace + backup_id=$(echo "$backup_id" | tr -d ' \n') + + if [[ -n "$backup_id" ]]; then + logMessage SUCCESS "Data backup created with ID: $backup_id" + echo "$backup_id" + return 0 + else + logMessage ERROR "Failed to get backup ID from backup command" + return 1 + fi + else + logMessage ERROR "Failed to create backup" + return 1 + fi +} + +function setupNewEnvironment() { + local new_name="$1" + local step="$2" + local total="$3" + + showProgress $step $total "Setting up new environment directory" + + local current_dir="$(pwd)" + local target_dir="$(dirname "$current_dir")/${new_name}" + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would create directory: $target_dir" + logMessage INFO "[DRY RUN] Would copy environment files and source code" + return 0 + fi + + # Remove target directory if it exists and force is enabled + if [[ -d "$target_dir" ]] && [[ $DUPLICATE_FORCE -eq 1 ]]; then + logMessage INFO "Removing existing directory: $target_dir" + rm -rf "$target_dir" + fi + + # Create new directory + mkdir -p "$target_dir" + + # Copy all source code files and directories, excluding certain paths + logMessage INFO "Copying source code files..." + local exclude_patterns=( + "--exclude=.roll/backups" + "--exclude=var/cache" + "--exclude=var/log" + "--exclude=var/session" + "--exclude=var/tmp" + "--exclude=storage/logs" + "--exclude=storage/framework/cache" + "--exclude=storage/framework/sessions" + "--exclude=storage/framework/views" + "--exclude=node_modules" + "--exclude=vendor/bin" + "--exclude=*.log" + ) + + # Use rsync for efficient copying with exclusions + if command -v rsync >/dev/null 2>&1; then + rsync -a "${exclude_patterns[@]}" "$current_dir/" "$target_dir/" + logMessage SUCCESS "Source code copied using rsync" + else + # Fallback to cp if rsync is not available + cp -r "$current_dir"/* "$target_dir/" 2>/dev/null || true + cp -r "$current_dir"/.* "$target_dir/" 2>/dev/null || true + + # Remove excluded directories if they were copied + rm -rf "$target_dir/.roll/backups" 2>/dev/null || true + rm -rf "$target_dir/var/cache" 2>/dev/null || true + rm -rf "$target_dir/var/log" 2>/dev/null || true + rm -rf "$target_dir/node_modules" 2>/dev/null || true + + logMessage SUCCESS "Source code copied using cp" + fi + + # Ensure .roll/backups directory exists but is empty + mkdir -p "$target_dir/.roll/backups" + + # Update environment name in .env.roll + if [[ -f "$target_dir/.env.roll" ]]; then + sed_inplace "s/^ROLL_ENV_NAME=.*/ROLL_ENV_NAME=${new_name}/" "$target_dir/.env.roll" + sed_inplace "s/^TRAEFIK_DOMAIN=.*/TRAEFIK_DOMAIN=${new_name}.test/" "$target_dir/.env.roll" + logMessage INFO "Updated ROLL_ENV_NAME to: $new_name" + logMessage INFO "Updated TRAEFIK_DOMAIN to: ${new_name}.test" + fi + + logMessage SUCCESS "New environment directory created: $target_dir" + echo "$target_dir" +} + +function restoreBackupToNewEnvironment() { + local backup_id="$1" + local target_dir="$2" + local step="$3" + local total="$4" + + showProgress $step $total "Restoring backup to new environment" + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would restore backup $backup_id to $target_dir" + return 0 + fi + + # Verify target directory exists before changing to it + if [[ ! -d "$target_dir" ]]; then + logMessage ERROR "Target directory does not exist: $target_dir" + return 1 + fi + + # Copy backup archive from original environment to new environment + local source_backup_dir="$(pwd)/.roll/backups" + local target_backup_dir="$target_dir/.roll/backups" + + # Find the backup archive in the source directory + local backup_archive="" + for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do + local potential_file="$source_backup_dir/backup_${ROLL_ENV_NAME}_${backup_id}${ext}" + if [[ -f "$potential_file" ]]; then + backup_archive="$potential_file" + break + fi + done + + if [[ -z "$backup_archive" ]]; then + logMessage ERROR "Backup archive not found for ID: $backup_id in $source_backup_dir" + return 1 + fi + + # Ensure target backup directory exists + mkdir -p "$target_backup_dir" + + # Copy backup archive to new environment + local archive_name="$(basename "$backup_archive")" + logMessage INFO "Copying backup archive: $archive_name" + if ! cp "$backup_archive" "$target_backup_dir/"; then + logMessage ERROR "Failed to copy backup archive to new environment" + return 1 + fi + + # Change to target directory for restore + if ! cd "$target_dir"; then + logMessage ERROR "Failed to change to directory: $target_dir" + return 1 + fi + + local restore_args=("--backup-id=${backup_id}") + + if [[ -n "$DUPLICATE_ENCRYPT" ]]; then + if [[ "$DUPLICATE_ENCRYPT" == "PROMPT" ]]; then + restore_args+=("--decrypt") + else + restore_args+=("--decrypt=${DUPLICATE_ENCRYPT}") + fi + fi + + restore_args+=("--force") + restore_args+=("--quiet") + + if "${ROLL_DIR}/bin/roll" restore "${restore_args[@]}"; then + logMessage SUCCESS "Backup restored to new environment" + return 0 + else + logMessage ERROR "Failed to restore backup to new environment" + return 1 + fi +} + +function generateNewCertificates() { + local new_name="$1" + local target_dir="$2" + local step="$3" + local total="$4" + + showProgress $step $total "Generating new SSL certificates" + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would generate new certificates for *.${new_name}.test" + return 0 + fi + + # Change to target directory + cd "$target_dir" + + # Generate wildcard certificates for the new domain + if "${ROLL_DIR}/bin/roll" sign-certificate "*.${new_name}.test" >/dev/null 2>&1; then + logMessage SUCCESS "New SSL certificates generated for *.${new_name}.test" + return 0 + else + logMessage WARNING "Failed to generate SSL certificates (you may need to do this manually)" + return 0 # Don't fail the whole process for certificate issues + fi +} + +function updateDatabaseUrls() { + local new_name="$1" + local target_dir="$2" + local step="$3" + local total="$4" + + if [[ $DUPLICATE_UPDATE_URLS -eq 0 ]]; then + return 0 + fi + + showProgress $step $total "Updating database URLs" + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would update URLs in database to use ${new_name}.test" + return 0 + fi + + # Change to target directory + cd "$target_dir" + + # Start the environment to update URLs + "${ROLL_DIR}/bin/roll" env up -d >/dev/null 2>&1 + + # Wait for services to be ready + sleep 5 + + # Update URLs based on environment type + local env_type="" + if [[ -f ".env.roll" ]]; then + env_type=$(grep "^ROLL_ENV_TYPE=" .env.roll | cut -d= -f2) + fi + + case "$env_type" in + magento2) + updateMagento2Urls "$new_name" + ;; + magento1) + updateMagento1Urls "$new_name" + ;; + wordpress) + updateWordPressUrls "$new_name" + ;; + *) + logMessage INFO "Unknown environment type, skipping URL updates" + ;; + esac + + logMessage SUCCESS "Database URLs updated" +} + +function updateMagento2Urls() { + local new_name="$1" + local new_url="https://app.${new_name}.test/" + + # Update core_config_data + echo "UPDATE core_config_data SET value = '${new_url}' WHERE path IN ('web/unsecure/base_url', 'web/secure/base_url');" | \ + "${ROLL_DIR}/bin/roll" db import >/dev/null 2>&1 + + logMessage INFO "Updated Magento 2 URLs to: $new_url" +} + +function updateMagento1Urls() { + local new_name="$1" + local new_url="https://app.${new_name}.test/" + + # Update core_config_data + echo "UPDATE core_config_data SET value = '${new_url}' WHERE path IN ('web/unsecure/base_url', 'web/secure/base_url');" | \ + "${ROLL_DIR}/bin/roll" db import >/dev/null 2>&1 + + logMessage INFO "Updated Magento 1 URLs to: $new_url" +} + +function updateWordPressUrls() { + local new_name="$1" + local new_url="https://${new_name}.test" + + # Update WordPress options + echo "UPDATE wp_options SET option_value = '${new_url}' WHERE option_name IN ('home', 'siteurl');" | \ + "${ROLL_DIR}/bin/roll" db import >/dev/null 2>&1 + + logMessage INFO "Updated WordPress URLs to: $new_url" +} + +function startNewEnvironment() { + local target_dir="$1" + local step="$2" + local total="$3" + + if [[ $DUPLICATE_START_ENV -eq 0 ]]; then + return 0 + fi + + showProgress $step $total "Starting new environment" + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would start new environment" + return 0 + fi + + # Change to target directory + cd "$target_dir" + + # Start the environment + if "${ROLL_DIR}/bin/roll" env up -d >/dev/null 2>&1; then + logMessage SUCCESS "New environment started successfully" + return 0 + else + logMessage ERROR "Failed to start new environment" + return 1 + fi +} + +function performDuplicate() { + local new_name="$1" + + # Validate inputs + validateDuplicateName "$new_name" || exit 1 + + # Handle interactive password prompt if needed + if [[ "$DUPLICATE_ENCRYPT" == "PROMPT" ]]; then + DUPLICATE_ENCRYPT=$(promptPassword "Enter encryption password for backup") + fi + + local current_env_name="$ROLL_ENV_NAME" + local total_steps=6 + local current_step=0 + + logMessage INFO "Duplicating environment '$current_env_name' to '$new_name'" + + # Step 1: Create backup + ((current_step++)) + local backup_id + if ! backup_id=$(createBackupForDuplicate $current_step $total_steps); then + logMessage ERROR "Failed to create backup" + exit 1 + fi + + # Step 2: Setup new environment directory + ((current_step++)) + local target_dir + if ! target_dir=$(setupNewEnvironment "$new_name" $current_step $total_steps); then + logMessage ERROR "Failed to setup new environment directory" + exit 1 + fi + + # Step 3: Restore backup to new environment + ((current_step++)) + if ! restoreBackupToNewEnvironment "$backup_id" "$target_dir" $current_step $total_steps; then + logMessage ERROR "Failed to restore backup to new environment" + exit 1 + fi + + # Step 4: Generate new certificates + ((current_step++)) + generateNewCertificates "$new_name" "$target_dir" $current_step $total_steps + + # Step 5: Update database URLs + ((current_step++)) + updateDatabaseUrls "$new_name" "$target_dir" $current_step $total_steps + + # Step 6: Start new environment + ((current_step++)) + startNewEnvironment "$target_dir" $current_step $total_steps + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage SUCCESS "Dry run completed successfully!" + logMessage INFO "Target directory would be: $target_dir" + else + logMessage SUCCESS "Environment duplication completed successfully!" + logMessage SUCCESS "New environment '$new_name' is ready at: $target_dir" + logMessage INFO "You can access it at: https://app.${new_name}.test" + logMessage INFO "To switch to the new environment: cd $target_dir" + fi +} + +# Main execution +if [[ -z "$DUPLICATE_NAME" ]]; then + error "Environment name is required. Usage: roll duplicate " + echo "Example: roll duplicate moduleshop-upgrade" + exit 1 +fi + +performDuplicate "$DUPLICATE_NAME" \ No newline at end of file diff --git a/commands/duplicate.help b/commands/duplicate.help new file mode 100644 index 0000000..9a4aca6 --- /dev/null +++ b/commands/duplicate.help @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_USAGE=$(cat < [options] + +\033[33mDescription:\033[0m + Duplicates the current Roll environment to a new environment with a different name. + +\033[33mArguments:\033[0m + Name for the new environment (required) + +\033[33mOptions:\033[0m + -h, --help Display this help menu + -q, --quiet Suppress output messages + -f, --force Overwrite existing target directory + --dry-run Show what would be done without executing + --encrypt Encrypt backup with interactive password prompt + --no-source Don't include source code in duplication + --no-start Don't start the new environment automatically + +\033[33mExamples:\033[0m + duplicate moduleshop-upgrade # Basic duplication + duplicate moduleshop-staging --encrypt # With encryption + duplicate moduleshop-upgrade --dry-run # Preview what would happen + +\033[33mNotes:\033[0m + • Creates new environment in ../new-environment-name/ + • Generates new SSL certificates automatically + • Updates database URLs for new environment +EOF +) \ No newline at end of file diff --git a/docs/backup-restore.md b/docs/backup-restore.md index ae82bcb..62fff64 100644 --- a/docs/backup-restore.md +++ b/docs/backup-restore.md @@ -339,11 +339,20 @@ This ensures seamless upgrades and backward compatibility with existing backup a RollDev supports GPG encryption for backup files using AES256 cipher with passphrase protection: ```bash -# Create encrypted backup +# Create encrypted backup with explicit password roll backup --encrypt=mypassword -# Restore encrypted backup +# Create encrypted backup with interactive prompt (recommended) +roll backup --encrypt + +# Restore encrypted backup with explicit password roll restore --decrypt=mypassword + +# Restore encrypted backup with interactive prompt (recommended) +roll restore --decrypt + +# Automatic detection - encrypted backups prompt for password automatically +roll restore 1672531200 ``` ## Encryption Behavior @@ -352,6 +361,19 @@ roll restore --decrypt=mypassword - **Checksum Updates**: Checksums are automatically recalculated for encrypted files - **Verification**: Integrity verification works seamlessly with encrypted backups - **Security**: Uses GPG with AES256 cipher and compression +- **Auto-Detection**: Restore automatically detects encrypted backups and prompts for password +- **Interactive Prompts**: Use `--encrypt` or `--decrypt` without password to avoid command history + +## Security Best Practices + +```bash +# Recommended: Use interactive prompts to avoid passwords in command history +roll backup --encrypt # Will prompt securely for password +roll restore --decrypt # Will prompt securely for password + +# Avoid: Passwords visible in command history and process lists +roll backup --encrypt=mysecretpassword # NOT recommended for production +``` ## Troubleshooting Encryption @@ -359,13 +381,16 @@ If you encounter issues with encrypted backups: ```bash # Skip verification for problematic encrypted backups -roll backup --encrypt=password --no-verify +roll backup --encrypt --no-verify # Check GPG availability which gpg # Restore with explicit decryption roll restore --decrypt=password --backup-id=1672531200 + +# Test decryption in dry-run mode +roll restore --decrypt --dry-run ``` **Note**: Encrypted backups require the same passphrase for restoration. Store your passphrase securely! From e1cc791303f89534d9b92573dff0117170a74e5b Mon Sep 17 00:00:00 2001 From: Rick Date: Thu, 5 Jun 2025 11:58:37 +0200 Subject: [PATCH 14/69] Fix parsing newline in status and correct install doc url --- commands/install.cmd | 2 +- commands/status.cmd | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/commands/install.cmd b/commands/install.cmd index e8588b9..27f22cc 100644 --- a/commands/install.cmd +++ b/commands/install.cmd @@ -62,7 +62,7 @@ if [[ "$OSTYPE" == "darwin"* ]]; then echo "nameserver 127.0.0.1" | sudo tee /etc/resolver/test >/dev/null fi else - warning "Manual configuration required for Automatic DNS resolution: ttps://dockergiant.github.io/rolldev/configuration/dns-resolver.html" + warning "Manual configuration required for Automatic DNS resolution: https://dockergiant.github.io/rolldev/configuration/dns-resolver.html" fi ## generate rsa keypair for authenticating to roll sshd service diff --git a/commands/status.cmd b/commands/status.cmd index 45c3654..4e005e2 100644 --- a/commands/status.cmd +++ b/commands/status.cmd @@ -28,10 +28,10 @@ for projectNetwork in "${projectNetworkList[@]}"; do [[ -z "${container}" ]] && continue # Project is not running, skip it projectDir=$(docker container inspect --format '{{ index .Config.Labels "com.docker.compose.project.working_dir"}}' "$container") - projectName=$(cat "${projectDir}/.env.roll" | grep '^ROLL_ENV_NAME=' | sed -e 's/ROLL_ENV_NAME=[[:space:]]*//g' | tr -d -) - projectType=$(cat "${projectDir}/.env.roll" | grep '^ROLL_ENV_TYPE=' | sed -e 's/ROLL_ENV_TYPE=[[:space:]]*//g' | tr -d -) - traefikDomain=$(cat "${projectDir}/.env.roll" | grep '^TRAEFIK_DOMAIN=' | sed -e 's/TRAEFIK_DOMAIN=[[:space:]]*//g' | tr -d -) - traefikSubDomain=$(cat "${projectDir}/.env.roll" | grep '^TRAEFIK_SUBDOMAIN=' | sed -e 's/TRAEFIK_SUBDOMAIN=[[:space:]]*//g' | tr -d -) + projectName=$(cat "${projectDir}/.env.roll" | grep '^ROLL_ENV_NAME=' | sed -e 's/ROLL_ENV_NAME=[[:space:]]*//g' | tr -d '\r') + projectType=$(cat "${projectDir}/.env.roll" | grep '^ROLL_ENV_TYPE=' | sed -e 's/ROLL_ENV_TYPE=[[:space:]]*//g' | tr -d '\r') + traefikDomain=$(cat "${projectDir}/.env.roll" | grep '^TRAEFIK_DOMAIN=' | sed -e 's/TRAEFIK_DOMAIN=[[:space:]]*//g' | tr -d '\r') + traefikSubDomain=$(cat "${projectDir}/.env.roll" | grep '^TRAEFIK_SUBDOMAIN=' | sed -e 's/TRAEFIK_SUBDOMAIN=[[:space:]]*//g' | tr -d '\r') messageList+=(" \033[1;35m${projectName}\033[0m a \033[36m${projectType}\033[0m project") messageList+=(" Project Directory: \033[33m${projectDir}\033[0m") From d05afb57cf21033c758a61da2ef7b81430423377 Mon Sep 17 00:00:00 2001 From: Rick Date: Thu, 5 Jun 2025 12:04:26 +0200 Subject: [PATCH 15/69] docs: fix typo in redis usage --- docs/usage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/usage.md b/docs/usage.md index f123d1a..09b48b0 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -45,7 +45,7 @@ Flush redis completely: roll redis flushall -Run redis continous stat mode +Run redis continuous stat mode roll redis --stat From 1bfee0cd78b3b7ef42b354be33af034002d20230 Mon Sep 17 00:00:00 2001 From: Rick Date: Thu, 5 Jun 2025 12:04:35 +0200 Subject: [PATCH 16/69] Fix mongodb env vars --- environments/includes/mongodb.base.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/environments/includes/mongodb.base.yml b/environments/includes/mongodb.base.yml index 7ecad5e..f6342f0 100644 --- a/environments/includes/mongodb.base.yml +++ b/environments/includes/mongodb.base.yml @@ -7,8 +7,8 @@ services: hostname: "${ROLL_ENV_NAME}-mongodb" image: ${ROLL_IMAGE_REPOSITORY}/mongo:${MONGODB_VERSION:-7} environment: - - MONGO_INITDB_ROOT_USERNAME=${MONGODB_ROOT_PASSWORD:-app} - - MONGO_INITDB_ROOT_PASSWORD=${MONGODB_ROOT_USER:-app} + - MONGO_INITDB_ROOT_USERNAME=${MONGODB_ROOT_USER:-app} + - MONGO_INITDB_ROOT_PASSWORD=${MONGODB_ROOT_PASSWORD:-app} - MONGO_INITDB_DATABASE=${MONGODB_DATABASE:-app} volumes: - mongodb:/data/db From 0e5544bcec26c119fed555a00a361e06555cf80e Mon Sep 17 00:00:00 2001 From: Rick Date: Thu, 5 Jun 2025 12:04:43 +0200 Subject: [PATCH 17/69] Fix comment typo --- commands/env.cmd | 2 +- commands/svc.cmd | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/env.cmd b/commands/env.cmd index cf05ab7..30e063c 100644 --- a/commands/env.cmd +++ b/commands/env.cmd @@ -234,7 +234,7 @@ then roll sync pause fi -## pass ochestration through to docker-compose +## pass orchestration through to docker-compose docker compose \ --env-file "${ROLL_ENV_PATH}/.env.roll" --project-directory "${ROLL_ENV_PATH}" -p "${ROLL_ENV_NAME}" \ "${DOCKER_COMPOSE_ARGS[@]}" "${ROLL_PARAMS[@]}" "$@" diff --git a/commands/svc.cmd b/commands/svc.cmd index 1477b7c..2f4b752 100644 --- a/commands/svc.cmd +++ b/commands/svc.cmd @@ -92,7 +92,7 @@ fi ROLL_VERSION=$(cat ${ROLL_DIR}/version) -## pass ochestration through to docker-compose +## pass orchestration through to docker-compose ROLL_VERSION=${ROLL_VERSION:-"in-dev"} docker compose \ --project-directory "${ROLL_HOME_DIR}" -p roll \ "${DOCKER_COMPOSE_ARGS[@]}" "${ROLL_PARAMS[@]}" "$@" From 446e2f49c23c51b876d9f524322f5d8ca737f05a Mon Sep 17 00:00:00 2001 From: Rick Date: Thu, 5 Jun 2025 12:04:58 +0200 Subject: [PATCH 18/69] Add shellcheck workflow --- .github/workflows/shellcheck.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 .github/workflows/shellcheck.yml diff --git a/.github/workflows/shellcheck.yml b/.github/workflows/shellcheck.yml new file mode 100644 index 0000000..5c10f86 --- /dev/null +++ b/.github/workflows/shellcheck.yml @@ -0,0 +1,24 @@ +name: ShellCheck +on: + push: + paths: + - 'commands/*.cmd' + - 'utils/*.sh' + - '.github/workflows/shellcheck.yml' + pull_request: + paths: + - 'commands/*.cmd' + - 'utils/*.sh' + - '.github/workflows/shellcheck.yml' + workflow_dispatch: + +jobs: + shellcheck: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install shellcheck + run: sudo apt-get update && sudo apt-get install -y shellcheck + - name: Run shellcheck + run: shellcheck commands/*.cmd utils/*.sh From 196fe194030ff6acb3dd42d075d040ef55dc6e65 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Thu, 5 Jun 2025 12:29:15 +0200 Subject: [PATCH 19/69] fixed issues with backup and restoring + full duplication command working --- commands/backup.cmd | 82 +++++-- commands/duplicate.cmd | 528 +++++++++++++++++++++++++++-------------- commands/restore.cmd | 101 +++++--- 3 files changed, 480 insertions(+), 231 deletions(-) diff --git a/commands/backup.cmd b/commands/backup.cmd index 0734d5d..1165d24 100755 --- a/commands/backup.cmd +++ b/commands/backup.cmd @@ -320,33 +320,72 @@ function backupVolume() { return 1 # Return failure so this service won't be included in successful_services fi - # Create backup with appropriate user permissions based on service - local user_id="0:0" # Default to root - case "$service_name" in - elasticsearch|opensearch) user_id="1000:1000" ;; - mysql|mariadb|postgres) user_id="999:999" ;; + # Use the same approach as the original working backup script + # Switch back to ubuntu and use the original tar command structure + local tar_compression_flag="" + case "$BACKUP_COMPRESSION" in + gzip) tar_compression_flag="z" ;; + xz) tar_compression_flag="J" ;; + lz4) tar_compression_flag="" ;; # lz4 doesn't have direct tar support, fallback to pipe + none) tar_compression_flag="" ;; esac - local tar_cmd="tar -cf - /data" - if [[ $BACKUP_EXCLUDE_LOGS -eq 1 ]]; then - tar_cmd="tar --exclude='*.log' --exclude='*log*' --exclude='*.tmp' -cf - /data" - fi + # Create backup directory for volume if it doesn't exist + mkdir -p "$backup_dir/volumes" - # Execute backup with progress and error handling - if docker run --rm --name "$temp_container" \ - --mount source="$full_volume_name",target=/data,readonly \ - --user "$user_id" \ - alpine:latest \ - sh -c "$tar_cmd" | $(getCompressionCommand) > "$output_file" 2>/dev/null; then + # Execute backup with the original working approach - use ubuntu and direct tar compression + if [[ "$BACKUP_COMPRESSION" == "lz4" ]]; then + # Handle lz4 separately since tar doesn't support it directly + if [[ $BACKUP_OUTPUT_ID -eq 1 ]]; then + docker run --rm --name "$temp_container" \ + --mount source="$full_volume_name",target=/data \ + -v "$backup_dir/volumes":/backup \ + ubuntu bash \ + -c "tar -cf - /data | lz4 -9 > /backup/${service_name}.tar.lz4" >/dev/null 2>&1 + else + docker run --rm --name "$temp_container" \ + --mount source="$full_volume_name",target=/data \ + -v "$backup_dir/volumes":/backup \ + ubuntu bash \ + -c "tar -cf - /data | lz4 -9 > /backup/${service_name}.tar.lz4" + fi + else + # Use original working approach for gzip, xz, and none + local tar_cmd="tar -c${tar_compression_flag}vf /backup/${service_name}$(getCompressionExtension) /data" + if [[ $BACKUP_EXCLUDE_LOGS -eq 1 ]]; then + tar_cmd="tar -c${tar_compression_flag}vf /backup/${service_name}$(getCompressionExtension) --exclude='*.log' --exclude='*_log' --exclude='log_*' --exclude='*.tmp' /data" + fi + if [[ $BACKUP_OUTPUT_ID -eq 1 ]]; then + # Suppress all output when using --output-id + docker run --rm --name "$temp_container" \ + --mount source="$full_volume_name",target=/data \ + -v "$backup_dir/volumes":/backup \ + ubuntu bash \ + -c "$tar_cmd" >/dev/null 2>&1 + else + docker run --rm --name "$temp_container" \ + --mount source="$full_volume_name",target=/data \ + -v "$backup_dir/volumes":/backup \ + ubuntu bash \ + -c "$tar_cmd" + fi + fi + + # Check if backup was successful + if [[ $? -eq 0 && -f "$backup_dir/volumes/${service_name}$(getCompressionExtension)" ]]; then # Generate checksum - local checksum=$(sha256sum "$output_file" | cut -d' ' -f1) + local checksum=$(sha256sum "$backup_dir/volumes/${service_name}$(getCompressionExtension)" | cut -d' ' -f1) echo "$checksum volumes/${service_name}$(getCompressionExtension)" >> "$backup_dir/metadata/checksums.sha256" - logMessage SUCCESS "Successfully backed up $service_name volume ($(du -h "$output_file" | cut -f1))" + if [[ $BACKUP_OUTPUT_ID -eq 0 ]]; then + logMessage SUCCESS "Successfully backed up $service_name volume ($(du -h "$backup_dir/volumes/${service_name}$(getCompressionExtension)" | cut -f1))" + fi return 0 else - logMessage ERROR "Failed to backup $service_name volume" + if [[ $BACKUP_OUTPUT_ID -eq 0 ]]; then + logMessage ERROR "Failed to backup $service_name volume" + fi return 1 fi } @@ -768,7 +807,12 @@ function performBackup() { local archive_name="backup_${ROLL_ENV_NAME}_${timestamp}$(getCompressionExtension)" logMessage INFO "Creating final backup archive: $archive_name" - (cd "$(pwd)/.roll/backups" && tar -cf - "$timestamp" | $(getCompressionCommand) > "$archive_name") + # Suppress tar warnings when using --output-id + if [[ $BACKUP_OUTPUT_ID -eq 1 ]]; then + (cd "$(pwd)/.roll/backups" && tar -cf - "$timestamp" 2>/dev/null | $(getCompressionCommand) > "$archive_name") + else + (cd "$(pwd)/.roll/backups" && tar -cf - "$timestamp" | $(getCompressionCommand) > "$archive_name") + fi if [[ $? -eq 0 ]]; then # Update latest symlink diff --git a/commands/duplicate.cmd b/commands/duplicate.cmd index 963c342..81400d6 100644 --- a/commands/duplicate.cmd +++ b/commands/duplicate.cmd @@ -12,10 +12,11 @@ DUPLICATE_INCLUDE_SOURCE=1 DUPLICATE_ENCRYPT="" DUPLICATE_START_ENV=1 DUPLICATE_UPDATE_URLS=1 +DUPLICATE_RUN_MAGENTO_COMMANDS=1 DUPLICATE_DRY_RUN=0 DUPLICATE_QUIET=0 DUPLICATE_FORCE=0 -PROGRESS=1 +DUPLICATE_VERBOSE=0 # Parse command line arguments POSITIONAL_ARGS=() @@ -57,15 +58,18 @@ while [[ $# -gt 0 ]]; do ;; --quiet|-q) DUPLICATE_QUIET=1 - PROGRESS=0 shift ;; --force|-f) DUPLICATE_FORCE=1 shift ;; - --no-progress) - PROGRESS=0 + --no-magento-commands) + DUPLICATE_RUN_MAGENTO_COMMANDS=0 + shift + ;; + --verbose) + DUPLICATE_VERBOSE=1 shift ;; --) @@ -126,24 +130,6 @@ function promptPassword() { echo "$password" } -function showProgress() { - [[ $PROGRESS -eq 0 ]] && return - local current=$1 - local total=$2 - local description="$3" - local percent=$((current * 100 / total)) - local bar_length=30 - local filled_length=$((percent * bar_length / 100)) - - printf "\r[" - printf "%*s" $filled_length | tr ' ' '=' - printf "%*s" $((bar_length - filled_length)) | tr ' ' '-' - printf "] %d%% %s" $percent "$description" - - # Always end with a newline for clean output - echo "" -} - function logMessage() { [[ $DUPLICATE_QUIET -eq 1 ]] && return local level="$1" @@ -180,16 +166,40 @@ function validateDuplicateName() { return 0 } -function createBackupForDuplicate() { +function executeCommand() { + local description="$1" + shift + local cmd=("$@") + + if [[ $DUPLICATE_VERBOSE -eq 1 ]]; then + # In verbose mode, show what we're doing and don't trap output + logMessage INFO "$description" + "${cmd[@]}" + local exit_code=$? + if [[ $exit_code -ne 0 ]]; then + logMessage ERROR "$description failed with exit code: $exit_code" + fi + return $exit_code + else + # In non-verbose mode, run silently unless there's an error + local output + if output=$("${cmd[@]}" 2>&1); then + return 0 + else + local exit_code=$? + logMessage ERROR "$description failed with exit code: $exit_code" + logMessage ERROR "Output: $output" + return $exit_code + fi + fi +} + +function createBackup() { local step="$1" local total="$2" - showProgress $step $total "Creating backup of current environment" - local backup_args=("all") - # Note: We don't include source code in backup since we copy it directly - # This makes backup/restore faster and more reliable for large codebases if [[ $DUPLICATE_INCLUDE_SOURCE -eq 1 ]]; then logMessage INFO "Source code will be copied directly (not via backup)" fi @@ -205,7 +215,6 @@ function createBackupForDuplicate() { # Add duplication parameters to modify environment names in backup backup_args+=("--duplicate-name=${DUPLICATE_NAME}") backup_args+=("--duplicate-domain=${DUPLICATE_NAME}.test") - backup_args+=("--output-id") backup_args+=("--name=duplicate-data") backup_args+=("--description=Data backup created for duplicating to ${DUPLICATE_NAME}") @@ -218,147 +227,77 @@ function createBackupForDuplicate() { # Create backup and get the backup ID directly local backup_id - if backup_id=$("${ROLL_DIR}/bin/roll" backup "${backup_args[@]}" 2>/dev/null); then - # Remove any whitespace - backup_id=$(echo "$backup_id" | tr -d ' \n') - - if [[ -n "$backup_id" ]]; then - logMessage SUCCESS "Data backup created with ID: $backup_id" - echo "$backup_id" - return 0 - else - logMessage ERROR "Failed to get backup ID from backup command" - return 1 - fi - else - logMessage ERROR "Failed to create backup" - return 1 - fi -} - -function setupNewEnvironment() { - local new_name="$1" - local step="$2" - local total="$3" - - showProgress $step $total "Setting up new environment directory" + local backup_exit_code - local current_dir="$(pwd)" - local target_dir="$(dirname "$current_dir")/${new_name}" + logMessage INFO "Creating backup..." - if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then - logMessage INFO "[DRY RUN] Would create directory: $target_dir" - logMessage INFO "[DRY RUN] Would copy environment files and source code" - return 0 - fi - - # Remove target directory if it exists and force is enabled - if [[ -d "$target_dir" ]] && [[ $DUPLICATE_FORCE -eq 1 ]]; then - logMessage INFO "Removing existing directory: $target_dir" - rm -rf "$target_dir" - fi - - # Create new directory - mkdir -p "$target_dir" - - # Copy all source code files and directories, excluding certain paths - logMessage INFO "Copying source code files..." - local exclude_patterns=( - "--exclude=.roll/backups" - "--exclude=var/cache" - "--exclude=var/log" - "--exclude=var/session" - "--exclude=var/tmp" - "--exclude=storage/logs" - "--exclude=storage/framework/cache" - "--exclude=storage/framework/sessions" - "--exclude=storage/framework/views" - "--exclude=node_modules" - "--exclude=vendor/bin" - "--exclude=*.log" - ) - - # Use rsync for efficient copying with exclusions - if command -v rsync >/dev/null 2>&1; then - rsync -a "${exclude_patterns[@]}" "$current_dir/" "$target_dir/" - logMessage SUCCESS "Source code copied using rsync" + # The --output-id flag outputs ONLY the backup ID (no warnings) + if backup_id=$("${ROLL_DIR}/bin/roll" backup "${backup_args[@]}" 2>&1); then + backup_exit_code=0 + # Remove any whitespace (should just be a number) + backup_id=$(echo "$backup_id" | tr -d ' \n\r\t') else - # Fallback to cp if rsync is not available - cp -r "$current_dir"/* "$target_dir/" 2>/dev/null || true - cp -r "$current_dir"/.* "$target_dir/" 2>/dev/null || true - - # Remove excluded directories if they were copied - rm -rf "$target_dir/.roll/backups" 2>/dev/null || true - rm -rf "$target_dir/var/cache" 2>/dev/null || true - rm -rf "$target_dir/var/log" 2>/dev/null || true - rm -rf "$target_dir/node_modules" 2>/dev/null || true - - logMessage SUCCESS "Source code copied using cp" + backup_exit_code=$? + backup_id="" fi - # Ensure .roll/backups directory exists but is empty - mkdir -p "$target_dir/.roll/backups" - - # Update environment name in .env.roll - if [[ -f "$target_dir/.env.roll" ]]; then - sed_inplace "s/^ROLL_ENV_NAME=.*/ROLL_ENV_NAME=${new_name}/" "$target_dir/.env.roll" - sed_inplace "s/^TRAEFIK_DOMAIN=.*/TRAEFIK_DOMAIN=${new_name}.test/" "$target_dir/.env.roll" - logMessage INFO "Updated ROLL_ENV_NAME to: $new_name" - logMessage INFO "Updated TRAEFIK_DOMAIN to: ${new_name}.test" + if [[ $backup_exit_code -eq 0 ]] && [[ -n "$backup_id" ]] && [[ "$backup_id" =~ ^[0-9]+$ ]]; then + logMessage SUCCESS "Data backup created with ID: $backup_id" + echo "$backup_id" + return 0 + else + logMessage ERROR "Failed to create backup or get valid backup ID" + logMessage ERROR "Backup command output: '$backup_id'" + logMessage ERROR "Exit code: $backup_exit_code" + return 1 fi - - logMessage SUCCESS "New environment directory created: $target_dir" - echo "$target_dir" } -function restoreBackupToNewEnvironment() { +function restoreBackup() { local backup_id="$1" local target_dir="$2" - local step="$3" - local total="$4" - - showProgress $step $total "Restoring backup to new environment" + local original_dir="$3" + local step="$4" + local total="$5" if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then logMessage INFO "[DRY RUN] Would restore backup $backup_id to $target_dir" return 0 fi - # Verify target directory exists before changing to it + # Verify target directory exists if [[ ! -d "$target_dir" ]]; then logMessage ERROR "Target directory does not exist: $target_dir" return 1 fi - # Copy backup archive from original environment to new environment - local source_backup_dir="$(pwd)/.roll/backups" + # Verify backup file exists (should have been copied by previous step) local target_backup_dir="$target_dir/.roll/backups" - - # Find the backup archive in the source directory - local backup_archive="" - for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do - local potential_file="$source_backup_dir/backup_${ROLL_ENV_NAME}_${backup_id}${ext}" - if [[ -f "$potential_file" ]]; then - backup_archive="$potential_file" - break - fi - done - - if [[ -z "$backup_archive" ]]; then - logMessage ERROR "Backup archive not found for ID: $backup_id in $source_backup_dir" + local backup_file="" + + # Just check the most common .tar.gz format directly + backup_file="${target_backup_dir}/backup_${ROLL_ENV_NAME}_${backup_id}.tar.gz" + + if [[ ! -f "$backup_file" ]]; then + # Try other extensions if .tar.gz doesn't exist + for ext in ".tar.xz" ".tar.lz4" ".tar"; do + local test_file="${target_backup_dir}/backup_${ROLL_ENV_NAME}_${backup_id}${ext}" + if [[ -f "$test_file" ]]; then + backup_file="$test_file" + break + fi + done + fi + + if [[ -z "$backup_file" ]]; then + logMessage ERROR "Backup file not found in target directory for ID: $backup_id" + logMessage ERROR "Expected location: $target_backup_dir" + logMessage ERROR "Target backup directory contents:" + ls -la "$target_backup_dir/" || logMessage ERROR "Failed to list target backup directory" return 1 fi - # Ensure target backup directory exists - mkdir -p "$target_backup_dir" - - # Copy backup archive to new environment - local archive_name="$(basename "$backup_archive")" - logMessage INFO "Copying backup archive: $archive_name" - if ! cp "$backup_archive" "$target_backup_dir/"; then - logMessage ERROR "Failed to copy backup archive to new environment" - return 1 - fi + logMessage SUCCESS "Backup file found in target directory: $(basename "$backup_file")" # Change to target directory for restore if ! cd "$target_dir"; then @@ -377,25 +316,25 @@ function restoreBackupToNewEnvironment() { fi restore_args+=("--force") - restore_args+=("--quiet") - if "${ROLL_DIR}/bin/roll" restore "${restore_args[@]}"; then + # Execute restore command + if executeCommand "Restoring backup" "${ROLL_DIR}/bin/roll" restore "${restore_args[@]}"; then logMessage SUCCESS "Backup restored to new environment" return 0 else + local restore_exit_code=$? logMessage ERROR "Failed to restore backup to new environment" + logMessage ERROR "Restore command exit code: $restore_exit_code" return 1 fi } -function generateNewCertificates() { +function generateCertificates() { local new_name="$1" local target_dir="$2" local step="$3" local total="$4" - showProgress $step $total "Generating new SSL certificates" - if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then logMessage INFO "[DRY RUN] Would generate new certificates for *.${new_name}.test" return 0 @@ -405,7 +344,7 @@ function generateNewCertificates() { cd "$target_dir" # Generate wildcard certificates for the new domain - if "${ROLL_DIR}/bin/roll" sign-certificate "*.${new_name}.test" >/dev/null 2>&1; then + if executeCommand "Generating SSL certificates" "${ROLL_DIR}/bin/roll" sign-certificate "*.${new_name}.test"; then logMessage SUCCESS "New SSL certificates generated for *.${new_name}.test" return 0 else @@ -424,8 +363,6 @@ function updateDatabaseUrls() { return 0 fi - showProgress $step $total "Updating database URLs" - if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then logMessage INFO "[DRY RUN] Would update URLs in database to use ${new_name}.test" return 0 @@ -435,7 +372,10 @@ function updateDatabaseUrls() { cd "$target_dir" # Start the environment to update URLs - "${ROLL_DIR}/bin/roll" env up -d >/dev/null 2>&1 + if ! executeCommand "Starting environment for URL updates" "${ROLL_DIR}/bin/roll" env up -d; then + logMessage ERROR "Failed to start environment for URL updates" + return 1 + fi # Wait for services to be ready sleep 5 @@ -468,20 +408,96 @@ function updateMagento2Urls() { local new_name="$1" local new_url="https://app.${new_name}.test/" - # Update core_config_data - echo "UPDATE core_config_data SET value = '${new_url}' WHERE path IN ('web/unsecure/base_url', 'web/secure/base_url');" | \ - "${ROLL_DIR}/bin/roll" db import >/dev/null 2>&1 + # Update core_config_data for base URLs + logMessage INFO "Updating Magento 2 URLs to: $new_url" + + executeCommand "Updating base URLs in database" \ + bash -c "echo \"UPDATE core_config_data SET value = '${new_url}' WHERE path IN ('web/unsecure/base_url', 'web/secure/base_url');\" | ${ROLL_DIR}/bin/roll db import" - logMessage INFO "Updated Magento 2 URLs to: $new_url" + executeCommand "Updating base link URLs in database" \ + bash -c "echo \"UPDATE core_config_data SET value = '${new_url}' WHERE path IN ('web/unsecure/base_link_url', 'web/secure/base_link_url');\" | ${ROLL_DIR}/bin/roll db import" + + # Update app/etc/env.php configuration + updateMagentoEnvPhp "$new_url" + + logMessage INFO "Updated both database and env.php configurations" + + # Wait for services to be fully ready + logMessage INFO "Waiting for Magento services to be ready..." + sleep 10 + + # Check if PHP container is responding + local retry_count=0 + local max_retries=30 + while [ $retry_count -lt $max_retries ]; do + if executeCommand "Testing PHP container readiness" "${ROLL_DIR}/bin/roll" clinotty php -v; then + logMessage INFO "PHP container is ready" + break + fi + sleep 2 + ((retry_count++)) + done + + if [ $retry_count -eq $max_retries ]; then + logMessage WARNING "PHP container not ready after ${max_retries} attempts, skipping Magento commands" + return 0 + fi + + # Run post-duplication Magento commands + if [[ $DUPLICATE_RUN_MAGENTO_COMMANDS -eq 1 ]]; then + runMagentoCommands + else + logMessage INFO "Skipping Magento post-duplication commands (--no-magento-commands)" + fi +} + +function updateMagentoEnvPhp() { + local new_url="$1" + local env_php_file="app/etc/env.php" + + if [[ ! -f "$env_php_file" ]]; then + logMessage WARNING "app/etc/env.php not found, skipping env.php URL update" + return 0 + fi + + logMessage INFO "Updating URLs in app/etc/env.php" + + # Create backup + cp "$env_php_file" "${env_php_file}.backup.$(date +%s)" + + # Update base URLs in env.php using sed + sed_inplace "s|'base_url' => 'https://[^']*'|'base_url' => '${new_url}'|g" "$env_php_file" + + logMessage SUCCESS "Updated URLs in app/etc/env.php" +} + +function runMagentoCommands() { + logMessage INFO "Running Magento post-duplication commands..." + + # Import app configuration + executeCommand "Executing app:config:import" "${ROLL_DIR}/bin/roll" magento app:config:import + + # Setup upgrade + executeCommand "Executing setup:upgrade" "${ROLL_DIR}/bin/roll" magento setup:upgrade + + # DI compilation + executeCommand "Executing setup:di:compile" "${ROLL_DIR}/bin/roll" magento setup:di:compile + + # Clean cache + executeCommand "Executing cache:clean" "${ROLL_DIR}/bin/roll" magento cache:clean + + # Flush cache + executeCommand "Executing cache:flush" "${ROLL_DIR}/bin/roll" magento cache:flush + + logMessage SUCCESS "Magento post-duplication commands completed" } function updateMagento1Urls() { local new_name="$1" local new_url="https://app.${new_name}.test/" - # Update core_config_data - echo "UPDATE core_config_data SET value = '${new_url}' WHERE path IN ('web/unsecure/base_url', 'web/secure/base_url');" | \ - "${ROLL_DIR}/bin/roll" db import >/dev/null 2>&1 + executeCommand "Updating Magento 1 URLs" \ + bash -c "echo \"UPDATE core_config_data SET value = '${new_url}' WHERE path IN ('web/unsecure/base_url', 'web/secure/base_url');\" | ${ROLL_DIR}/bin/roll db import" logMessage INFO "Updated Magento 1 URLs to: $new_url" } @@ -490,9 +506,8 @@ function updateWordPressUrls() { local new_name="$1" local new_url="https://${new_name}.test" - # Update WordPress options - echo "UPDATE wp_options SET option_value = '${new_url}' WHERE option_name IN ('home', 'siteurl');" | \ - "${ROLL_DIR}/bin/roll" db import >/dev/null 2>&1 + executeCommand "Updating WordPress URLs" \ + bash -c "echo \"UPDATE wp_options SET option_value = '${new_url}' WHERE option_name IN ('home', 'siteurl');\" | ${ROLL_DIR}/bin/roll db import" logMessage INFO "Updated WordPress URLs to: $new_url" } @@ -506,8 +521,6 @@ function startNewEnvironment() { return 0 fi - showProgress $step $total "Starting new environment" - if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then logMessage INFO "[DRY RUN] Would start new environment" return 0 @@ -517,7 +530,7 @@ function startNewEnvironment() { cd "$target_dir" # Start the environment - if "${ROLL_DIR}/bin/roll" env up -d >/dev/null 2>&1; then + if executeCommand "Starting new environment" "${ROLL_DIR}/bin/roll" env up -d; then logMessage SUCCESS "New environment started successfully" return 0 else @@ -526,6 +539,159 @@ function startNewEnvironment() { fi } +function setupNewEnvironment() { + local new_name="$1" + local step="$2" + local total="$3" + + local current_dir="$(pwd)" + local target_dir="$(dirname "$current_dir")/${new_name}" + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would create directory: $target_dir" + logMessage INFO "[DRY RUN] Would copy environment files and source code" + echo "$target_dir" + return 0 + fi + + # Remove target directory if it exists and force is enabled + if [[ -d "$target_dir" ]] && [[ $DUPLICATE_FORCE -eq 1 ]]; then + logMessage INFO "Removing existing directory: $target_dir" + rm -rf "$target_dir" + fi + + # Create new directory + mkdir -p "$target_dir" + + # Copy all source code files and directories, excluding certain paths + logMessage INFO "Copying source code files..." + local exclude_patterns=( + "--exclude=.roll/backups" + "--exclude=var/cache" + "--exclude=var/log" + "--exclude=var/session" + "--exclude=var/tmp" + "--exclude=storage/logs" + "--exclude=storage/framework/cache" + "--exclude=storage/framework/sessions" + "--exclude=storage/framework/views" + "--exclude=node_modules" + "--exclude=vendor/bin" + "--exclude=*.log" + ) + + # Use rsync for efficient copying with exclusions + if command -v rsync >/dev/null 2>&1; then + if [[ $DUPLICATE_VERBOSE -eq 1 ]]; then + rsync -av "${exclude_patterns[@]}" "$current_dir/" "$target_dir/" >&2 + else + rsync -a "${exclude_patterns[@]}" "$current_dir/" "$target_dir/" >/dev/null 2>&1 + fi + logMessage SUCCESS "Source code copied using rsync" + else + # Fallback to cp if rsync is not available + cp -r "$current_dir"/* "$target_dir/" 2>/dev/null || true + cp -r "$current_dir"/.* "$target_dir/" 2>/dev/null || true + + # Remove excluded directories if they were copied + rm -rf "$target_dir/.roll/backups" 2>/dev/null || true + rm -rf "$target_dir/var/cache" 2>/dev/null || true + rm -rf "$target_dir/var/log" 2>/dev/null || true + rm -rf "$target_dir/node_modules" 2>/dev/null || true + + logMessage SUCCESS "Source code copied using cp" + fi + + # Ensure .roll/backups directory exists but is empty + mkdir -p "$target_dir/.roll/backups" + + # Update environment name in .env.roll + if [[ -f "$target_dir/.env.roll" ]]; then + sed_inplace "s/^ROLL_ENV_NAME=.*/ROLL_ENV_NAME=${new_name}/" "$target_dir/.env.roll" + sed_inplace "s/^TRAEFIK_DOMAIN=.*/TRAEFIK_DOMAIN=${new_name}.test/" "$target_dir/.env.roll" + logMessage INFO "Updated ROLL_ENV_NAME to: $new_name" + logMessage INFO "Updated TRAEFIK_DOMAIN to: ${new_name}.test" + fi + + logMessage SUCCESS "New environment directory created: $target_dir" + echo "$target_dir" +} + +function copyBackupToNewEnvironment() { + local backup_id="$1" + local new_name="$2" + local current_dir="$3" + local step="$4" + local total="$5" + + local target_dir="$(dirname "$current_dir")/${new_name}" + local source_backup_dir="$current_dir/.roll/backups" + local target_backup_dir="$target_dir/.roll/backups" + + if [[ $DUPLICATE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would copy backup $backup_id to $target_backup_dir" + return 0 + fi + + # Create parent target directory first, then backup subdirectory + logMessage INFO "Creating target directory structure: $target_dir" + mkdir -p "$target_dir" + mkdir -p "$target_backup_dir" + + # Find the backup archive directly (should exist immediately) + local backup_archive="" + + # Just check the most common .tar.gz format directly + backup_archive="${source_backup_dir}/backup_${ROLL_ENV_NAME}_${backup_id}.tar.gz" + + if [[ ! -f "$backup_archive" ]]; then + # Try other extensions if .tar.gz doesn't exist + for ext in ".tar.xz" ".tar.lz4" ".tar"; do + local test_file="${source_backup_dir}/backup_${ROLL_ENV_NAME}_${backup_id}${ext}" + if [[ -f "$test_file" ]]; then + backup_archive="$test_file" + break + fi + done + fi + + if [[ -z "$backup_archive" ]]; then + logMessage ERROR "Backup archive not found for ID: $backup_id in $source_backup_dir" + logMessage ERROR "Expected pattern: backup_${ROLL_ENV_NAME}_${backup_id}.*" + logMessage ERROR "Source backup directory contents:" + ls -la "$source_backup_dir/" || logMessage ERROR "Failed to list source backup directory" + return 1 + fi + + # Copy backup archive to new environment + local archive_name="$(basename "$backup_archive")" + logMessage INFO "Copying backup archive: $archive_name" + logMessage INFO "From: $backup_archive" + logMessage INFO "To: $target_backup_dir/" + + if ! cp "$backup_archive" "$target_backup_dir/"; then + logMessage ERROR "Failed to copy backup archive to new environment" + logMessage ERROR "Source exists: $(test -f "$backup_archive" && echo "YES" || echo "NO")" + logMessage ERROR "Target dir exists: $(test -d "$target_backup_dir" && echo "YES" || echo "NO")" + logMessage ERROR "Target dir writable: $(test -w "$target_backup_dir" && echo "YES" || echo "NO")" + return 1 + fi + + # Verify the backup file was copied successfully + local copied_backup_file="$target_backup_dir/$archive_name" + if [[ ! -f "$copied_backup_file" ]]; then + logMessage ERROR "Backup file was not successfully copied to: $copied_backup_file" + logMessage ERROR "Target directory contents:" + ls -la "$target_backup_dir/" || logMessage ERROR "Failed to list target backup directory" + return 1 + fi + + logMessage SUCCESS "Backup archive copied successfully" + logMessage INFO "Copied file: $copied_backup_file ($(du -h "$copied_backup_file" | cut -f1))" + + return 0 +} + function performDuplicate() { local new_name="$1" @@ -538,7 +704,8 @@ function performDuplicate() { fi local current_env_name="$ROLL_ENV_NAME" - local total_steps=6 + local current_dir="$(pwd)" + local total_steps=7 local current_step=0 logMessage INFO "Duplicating environment '$current_env_name' to '$new_name'" @@ -546,35 +713,42 @@ function performDuplicate() { # Step 1: Create backup ((current_step++)) local backup_id - if ! backup_id=$(createBackupForDuplicate $current_step $total_steps); then + if ! backup_id=$(createBackup $current_step $total_steps); then logMessage ERROR "Failed to create backup" exit 1 fi - # Step 2: Setup new environment directory + # Step 2: Setup new environment directory (rsync source code) ((current_step++)) local target_dir - if ! target_dir=$(setupNewEnvironment "$new_name" $current_step $total_steps); then + if ! target_dir=$(setupNewEnvironment "$new_name" $current_step $total_steps 2>/dev/null); then logMessage ERROR "Failed to setup new environment directory" exit 1 fi - # Step 3: Restore backup to new environment + # Step 3: Copy backup file to target location AFTER rsync ((current_step++)) - if ! restoreBackupToNewEnvironment "$backup_id" "$target_dir" $current_step $total_steps; then - logMessage ERROR "Failed to restore backup to new environment" + if ! copyBackupToNewEnvironment "$backup_id" "$new_name" "$current_dir" $current_step $total_steps; then + logMessage ERROR "Failed to copy backup file to new environment" + exit 1 + fi + + # Step 4: Restore backup from already-copied file + ((current_step++)) + if ! restoreBackup "$backup_id" "$target_dir" "$current_dir" $current_step $total_steps; then + logMessage ERROR "Backup restoration step failed - stopping duplication process" exit 1 fi - # Step 4: Generate new certificates + # Step 5: Generate new certificates ((current_step++)) - generateNewCertificates "$new_name" "$target_dir" $current_step $total_steps + generateCertificates "$new_name" "$target_dir" $current_step $total_steps - # Step 5: Update database URLs + # Step 6: Update database URLs ((current_step++)) updateDatabaseUrls "$new_name" "$target_dir" $current_step $total_steps - # Step 6: Start new environment + # Step 7: Start new environment ((current_step++)) startNewEnvironment "$target_dir" $current_step $total_steps diff --git a/commands/restore.cmd b/commands/restore.cmd index 70e4ce8..f08854e 100755 --- a/commands/restore.cmd +++ b/commands/restore.cmd @@ -488,33 +488,12 @@ function restoreVolume() { --label com.docker.compose.version="$docker_compose_version" \ --label com.docker.compose.volume="$volume_base_name" >/dev/null 2>&1 - # Determine decompression command and user permissions - local decompress_cmd="cat" - local original_file="$backup_file" - - # Remove .gpg extension to determine original compression - if [[ $is_encrypted == true ]]; then - original_file="${backup_file%.gpg}" - fi - - case "$original_file" in - *.tar.gz) decompress_cmd="gzip -d" ;; - *.tar.xz) decompress_cmd="xz -d" ;; - *.tar.lz4) decompress_cmd="lz4 -d" ;; - esac - - local user_id="0:0" # Default to root - case "$service_type" in - elasticsearch|opensearch) user_id="1000:1000" ;; - mysql|mariadb|postgres) user_id="999:999" ;; - esac - # Restore the volume data with decryption if needed local temp_container="${ROLL_ENV_NAME}_restore_${service_name}_$$" if [[ $is_encrypted == true ]]; then - # Decrypt and decompress pipeline - extract as root first, then fix permissions - local restore_cmd="gpg --batch --yes --quiet --passphrase \"$RESTORE_DECRYPT\" --decrypt \"$backup_file\" | $decompress_cmd | docker run --rm --name \"$temp_container\" --mount source=\"$volume_name\",target=/data -i alpine:latest sh -c \"cd /data && tar -xf - --strip-components=1 && chown -R $user_id /data\"" + # Decrypt and decompress pipeline - use ubuntu and original tar approach with strip components + local restore_cmd="gpg --batch --yes --quiet --passphrase \"$RESTORE_DECRYPT\" --decrypt \"$backup_file\" | docker run --rm --name \"$temp_container\" --mount source=\"$volume_name\",target=/data -i ubuntu bash -c \"cd /data && tar -xf - --strip-components=1\"" if eval "$restore_cmd" 2>/dev/null; then logMessage SUCCESS "Successfully restored and decrypted $service_name volume" @@ -524,18 +503,70 @@ function restoreVolume() { return 1 fi else - # Regular restore without decryption - if $decompress_cmd < "$backup_file" | docker run --rm --name "$temp_container" \ - --mount source="$volume_name",target=/data \ - -i alpine:latest \ - sh -c "cd /data && tar -xf - --strip-components=1 && chown -R $user_id /data" 2>/dev/null; then - - logMessage SUCCESS "Successfully restored $service_name volume" - return 0 - else - logMessage ERROR "Failed to restore $service_name volume" - return 1 - fi + # Regular restore without decryption - use ubuntu and original tar approach with strip components + # For compressed files, we need to handle decompression properly + case "$backup_file" in + *.tar.gz) + if docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -v "$(dirname "$backup_file")":/backup \ + ubuntu bash \ + -c "cd /data && tar -xzf /backup/$(basename "$backup_file") --strip-components=1" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi + ;; + *.tar.xz) + if docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -v "$(dirname "$backup_file")":/backup \ + ubuntu bash \ + -c "cd /data && tar -xJf /backup/$(basename "$backup_file") --strip-components=1" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi + ;; + *.tar.lz4) + if docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -v "$(dirname "$backup_file")":/backup \ + ubuntu bash \ + -c "cd /data && lz4 -d /backup/$(basename "$backup_file") - | tar -xf - --strip-components=1" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi + ;; + *.tar) + if docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -v "$(dirname "$backup_file")":/backup \ + ubuntu bash \ + -c "cd /data && tar -xf /backup/$(basename "$backup_file") --strip-components=1" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi + ;; + *) + logMessage ERROR "Unsupported backup file format: $backup_file" + return 1 + ;; + esac fi } From 9698a092d95f1532834d32419381d15c48959a85 Mon Sep 17 00:00:00 2001 From: github-actions Date: Thu, 5 Jun 2025 10:40:31 +0000 Subject: [PATCH 20/69] Tagged 0.3 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index a54000c..be58634 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.2.6.5 +0.3 From f75e8883b0d815e93fca2863340a6237fde9229f Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Thu, 5 Jun 2025 14:50:11 +0200 Subject: [PATCH 21/69] add duplication docs --- docs/duplicate.md | 236 ++++++++++++++++++++++++++++++++++++++++++++++ docs/index.md | 1 + docs/usage.md | 16 ++++ 3 files changed, 253 insertions(+) create mode 100644 docs/duplicate.md diff --git a/docs/duplicate.md b/docs/duplicate.md new file mode 100644 index 0000000..4f92965 --- /dev/null +++ b/docs/duplicate.md @@ -0,0 +1,236 @@ +# Environment Duplication + +The `duplicate` command allows you to create a complete copy of your current Roll environment with a new name. This is useful for creating staging environments, testing upgrades, or setting up multiple development branches. + +## Basic Usage + +```bash +roll duplicate +``` + +**Example:** +```bash +roll duplicate moduleshop-staging +``` + +This creates a new environment called `moduleshop-staging` in a sibling directory with all data, source code, and configuration copied from the current environment. + +## What Gets Duplicated + +The duplication process includes: + +- **Source Code**: All application files and directories +- **Database**: Complete database backup and restore +- **Configuration**: Environment configuration files (`.env.roll`, etc.) +- **SSL Certificates**: New wildcard certificates for the new domain +- **Container Volumes**: All persistent data volumes + +## Command Options + +### Basic Options + +| Option | Description | +|--------|-------------| +| `-h, --help` | Display help information | +| `-q, --quiet` | Suppress output messages | +| `-f, --force` | Overwrite existing target directory | +| `--dry-run` | Preview what would be done without executing | +| `--verbose` | Show detailed progress information | + +### Duplication Control + +| Option | Description | +|--------|-------------| +| `--no-source` | Don't copy source code (data-only duplication) | +| `--no-start` | Don't start the new environment automatically | +| `--no-urls` | Skip updating database URLs | +| `--no-magento-commands` | Skip running Magento post-duplication commands | + +### Security Options + +| Option | Description | +|--------|-------------| +| `--encrypt` | Encrypt backup with interactive password prompt | +| `--encrypt=password` | Encrypt backup with specified password | + +## Examples + +### Basic Duplication +```bash +roll duplicate my-project-staging +``` +Creates a complete copy with automatic SSL certificate generation and URL updates. + +### Encrypted Duplication +```bash +roll duplicate my-project-backup --encrypt +``` +Creates an encrypted backup during duplication (prompts for password). + +### Preview Mode +```bash +roll duplicate my-project-test --dry-run +``` +Shows what would be done without actually performing the duplication. + +### Data-Only Duplication +```bash +roll duplicate my-project-dataonly --no-source +``` +Duplicates only the data (database, volumes) without copying source code. + +### Force Overwrite +```bash +roll duplicate existing-project --force +``` +Overwrites an existing environment directory. + +## Duplication Process + +The duplication process follows these steps: + +1. **Create Backup**: Creates a backup of the current environment's data +2. **Setup Directory**: Creates new environment directory and copies source code +3. **Copy Backup**: Transfers backup files to the new environment +4. **Restore Data**: Restores database and volume data in the new environment +5. **Generate Certificates**: Creates new SSL certificates for the new domain +6. **Update URLs**: Updates database URLs to match the new environment +7. **Start Environment**: Starts the new environment (unless `--no-start` is used) + +## Environment-Specific Behavior + +### Magento 2 + +For Magento 2 environments, the duplication process includes: + +- Updates `core_config_data` table for base URLs +- Updates `app/etc/env.php` configuration +- Runs post-duplication commands: + - `app:config:import` + - `setup:upgrade` + - `setup:di:compile` + - `cache:clean` + - `cache:flush` + +### Magento 1 + +Updates `core_config_data` table for base and secure URLs. + +### WordPress + +Updates `wp_options` table for `home` and `siteurl` options. + +## Directory Structure + +When you duplicate an environment, the new environment is created as a sibling directory: + +``` +parent-directory/ +├── original-project/ # Current environment +└── new-environment-name/ # Duplicated environment +``` + +## URL Pattern + +The new environment will be accessible at: +- **Default**: `https://app.new-environment-name.test` +- **Custom Domain**: Based on `TRAEFIK_DOMAIN` in `.env.roll` + +## Excluded Files and Directories + +The following are excluded from source code duplication: + +- `.roll/backups/` (backup files) +- `var/cache/`, `var/log/`, `var/session/`, `var/tmp/` (Magento cache/logs) +- `storage/logs/`, `storage/framework/cache/` (Laravel cache/logs) +- `node_modules/` (Node.js dependencies) +- `vendor/bin/` (Composer binaries) +- `*.log` (Log files) + +## Error Handling + +The duplication process includes robust error handling: + +- **Validation**: Checks environment name format and directory conflicts +- **Backup Verification**: Ensures backup creation succeeds before proceeding +- **Step-by-Step**: Each step is validated before continuing +- **Rollback**: Failed duplications can be cleaned up manually + +## Best Practices + +### Naming Conventions +Use descriptive names that indicate the purpose: +```bash +roll duplicate myproject-staging +roll duplicate myproject-upgrade-test +roll duplicate myproject-feature-branch +``` + +### Pre-Duplication Checks +1. Ensure your current environment is in a stable state +2. Stop any running processes that might interfere +3. Consider the disk space requirements (duplication roughly doubles space usage) + +### Post-Duplication Tasks +1. Verify the new environment starts correctly +2. Test critical functionality +3. Update any hardcoded URLs or paths specific to your use case +4. Configure any additional services or integrations + +## Troubleshooting + +### Common Issues + +**"Directory already exists" error:** +```bash +roll duplicate myproject-copy --force +``` + +**Backup creation fails:** +- Check disk space availability +- Ensure database is accessible +- Verify environment is properly started + +**URL updates don't work:** +```bash +# Manually update URLs after duplication +cd ../new-environment-name +roll env up -d +roll db connect +# Run SQL updates manually +``` + +**SSL certificate issues:** +```bash +cd ../new-environment-name +roll sign-certificate "*.new-environment-name.test" +``` + +## Performance Considerations + +- **Disk Space**: Duplication requires approximately 2x the original environment size +- **Duration**: Process time depends on database size and number of files +- **Memory**: Backup creation and restoration are memory-intensive operations +- **I/O**: Intensive disk operations during file copying and database operations + +## Security Notes + +- Use `--encrypt` for sensitive production data +- Review database contents before duplication (remove sensitive data if needed) +- Ensure proper file permissions on the duplicated environment +- Consider using `--no-source` if you only need data duplication + +## Integration with Other Commands + +The duplicate command works seamlessly with other Roll commands: + +```bash +# After duplication, switch to new environment +cd ../new-environment-name + +# Use all normal Roll commands +roll shell +roll env logs +roll magento cache:flush +roll backup create +``` \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index 1555338..7ca8292 100644 --- a/docs/index.md +++ b/docs/index.md @@ -29,6 +29,7 @@ caption: Getting Started installing services usage +duplicate backup-restore backup-restore-quick-reference registry diff --git a/docs/usage.md b/docs/usage.md index eb1bd23..7084c84 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -53,6 +53,22 @@ Remove volumes completely: roll env down -v +## Environment Duplication + +Duplicate the current environment to create a new environment with a different name: + + roll duplicate new-environment-name + +Create an encrypted duplicate: + + roll duplicate staging-env --encrypt + +Preview what would be duplicated without executing: + + roll duplicate test-env --dry-run + +For detailed duplication documentation, see the [Environment Duplication](duplicate.md) page. + ## Backup and Restore Commands Create a backup of all enabled services: From 96bbb30e62924f2f4e7612be86c60cf4187253c7 Mon Sep 17 00:00:00 2001 From: Rick Date: Fri, 6 Jun 2025 22:28:41 +0200 Subject: [PATCH 22/69] docs: add decrypt examples for restore-full --- commands/restore-full.cmd | 1022 ++++++++++++++++++++++++ commands/restore-full.help | 83 ++ commands/usage.help | 3 + docs/backup-restore-quick-reference.md | 17 +- docs/backup-restore.md | 23 + 5 files changed, 1147 insertions(+), 1 deletion(-) create mode 100644 commands/restore-full.cmd create mode 100644 commands/restore-full.help diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd new file mode 100644 index 0000000..60db2c6 --- /dev/null +++ b/commands/restore-full.cmd @@ -0,0 +1,1022 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +# Load core utilities (environment config loaded later if needed) +assertDockerRunning + +# Default configuration values +RESTORE_BACKUP_ID="" +RESTORE_SERVICES=() +RESTORE_CONFIG=1 +RESTORE_VERIFY=1 +RESTORE_FORCE=0 +RESTORE_DRY_RUN=0 +RESTORE_QUIET=0 +RESTORE_DECRYPT="" +RESTORE_BACKUP_FILE="" +RESTORE_OUTPUT_DIR="" +PROGRESS=1 +ROLL_ENV_LOADED=0 + +# Legacy migration support +RESTORE_LEGACY_MIGRATION=1 + +# Parse command line arguments +EXTRA_ARGS=() +while [[ $# -gt 0 ]]; do + case "$1" in + --help|-h) + roll restore-full --help + exit 0 + ;; + --backup-id=*|--backup=*) + RESTORE_BACKUP_ID="${1#*=}" + shift + ;; + --services=*) + IFS=',' read -ra RESTORE_SERVICES <<< "${1#*=}" + shift + ;; + --no-config) + RESTORE_CONFIG=0 + shift + ;; + --no-verify) + RESTORE_VERIFY=0 + shift + ;; + --force|-f) + RESTORE_FORCE=1 + shift + ;; + --dry-run) + RESTORE_DRY_RUN=1 + shift + ;; + --quiet|-q) + RESTORE_QUIET=1 + PROGRESS=0 + shift + ;; + --input|--archive|--file) + RESTORE_BACKUP_FILE="$2" + shift 2 + ;; + --input=*|--archive=*|--file=*) + RESTORE_BACKUP_FILE="${1#*=}" + shift + ;; + --output-dir|-o) + RESTORE_OUTPUT_DIR="$2" + shift 2 + ;; + --output-dir=*|-o=*) + RESTORE_OUTPUT_DIR="${1#*=}" + shift + ;; + --decrypt=*) + RESTORE_DECRYPT="${1#*=}" + shift + ;; + --decrypt) + # Flag without value - will prompt for password later + RESTORE_DECRYPT="PROMPT" + shift + ;; + --no-progress) + PROGRESS=0 + shift + ;; + --no-legacy-migration) + RESTORE_LEGACY_MIGRATION=0 + shift + ;; + --) + shift + break + ;; + -*) + error "Unknown option: $1" + exit 1 + ;; + *) + EXTRA_ARGS+=("$1") + shift + ;; + esac +done + +# Handle positional arguments for simplified full restore syntax +if [[ ${#EXTRA_ARGS[@]} -gt 0 ]]; then + candidate="${EXTRA_ARGS[0]}" + if [[ -z "$RESTORE_BACKUP_FILE" && ( -f "$candidate" || -d "$candidate" || "$candidate" =~ \.tar(\..*)?$ ) ]]; then + RESTORE_BACKUP_FILE="$candidate" + if [[ ${#EXTRA_ARGS[@]} -gt 1 && -z "$RESTORE_OUTPUT_DIR" ]]; then + RESTORE_OUTPUT_DIR="${EXTRA_ARGS[1]}" + if [[ ${#EXTRA_ARGS[@]} -gt 2 ]]; then + error "Too many positional arguments: ${EXTRA_ARGS[2]}" + exit 1 + fi + fi + else + if [[ -z "$RESTORE_BACKUP_ID" ]]; then + RESTORE_BACKUP_ID="$candidate" + else + error "Unexpected argument: $candidate" + exit 1 + fi + if [[ ${#EXTRA_ARGS[@]} -gt 1 ]]; then + error "Unexpected argument: ${EXTRA_ARGS[1]}" + exit 1 + fi + fi +fi + +# Set environment path +RESTORE_OUTPUT_DIR="${RESTORE_OUTPUT_DIR:-$(pwd)}" +mkdir -p "$RESTORE_OUTPUT_DIR" +cd "$RESTORE_OUTPUT_DIR" +ROLL_ENV_PATH="$(pwd)" + +# Utility functions for restore operations +function promptPassword() { + local prompt="$1" + local password="" + + # Don't prompt in quiet mode or non-interactive shells + if [[ $RESTORE_QUIET -eq 1 ]] || [[ ! -t 0 ]]; then + error "Password required but running in non-interactive mode. Use --decrypt=password instead." + exit 1 + fi + + echo -n "$prompt: " >&2 + read -s password + echo >&2 + + if [[ -z "$password" ]]; then + error "Password cannot be empty" + exit 1 + fi + + echo "$password" +} + +function detectEncryptedBackup() { + local backup_path="$1" + + # Check if backup contains .gpg files + if [[ -d "$backup_path" ]]; then + # Directory format - check for .gpg files + if find "$backup_path" -name "*.gpg" -type f | head -1 | grep -q .; then + return 0 # Encrypted + fi + else + # Archive format - check if archive contains .gpg files + local archive_file="$backup_path" + if [[ -f "$archive_file" ]]; then + # Determine decompression command + local decompress_cmd="cat" + case "$archive_file" in + *.tar.gz) decompress_cmd="gzip -dc" ;; + *.tar.xz) decompress_cmd="xz -dc" ;; + *.tar.lz4) decompress_cmd="lz4 -dc" ;; + esac + + # Check if archive contains .gpg files + if $decompress_cmd "$archive_file" | tar -tf - 2>/dev/null | grep -q "\.gpg$"; then + return 0 # Encrypted + fi + fi + fi + + return 1 # Not encrypted +} + +function showProgress() { + [[ $PROGRESS -eq 0 ]] && return + local current=$1 + local total=$2 + local description="$3" + local percent=$((current * 100 / total)) + local bar_length=30 + local filled_length=$((percent * bar_length / 100)) + + printf "\r[" + printf "%*s" $filled_length | tr ' ' '=' + printf "%*s" $((bar_length - filled_length)) | tr ' ' '-' + printf "] %d%% %s" $percent "$description" + + # Always end with a newline for clean output + echo "" +} + +function logMessage() { + [[ $RESTORE_QUIET -eq 1 ]] && return + local level="$1" + shift + case "$level" in + INFO) info "$@" ;; + SUCCESS) success "$@" ;; + WARNING) warning "$@" ;; + ERROR) error "$@" ;; + esac +} + +function performLegacyMigration() { + if [[ $RESTORE_LEGACY_MIGRATION -eq 0 ]]; then + return 0 + fi + + local current_dir="$(pwd)" + + # Handle Warden to Roll migration + if [[ ! -f "$current_dir/.env.roll" ]]; then + if [[ -f "$current_dir/.env" ]]; then + logMessage INFO "Performing legacy Warden to Roll migration..." + + # Create backup of original .env + cp "$current_dir/.env" "$current_dir/.env.backup.$(date +%s)" + + # Convert WARDEN to ROLL + sed -i.warden 's/WARDEN/ROLL/g' "$current_dir/.env" + + # Migrate .warden directory to .roll + if [[ -d "$current_dir/.warden" ]]; then + mv "$current_dir/.warden" "$current_dir/.roll" + + if [[ -f "$current_dir/.roll/warden-env.yml" ]]; then + mv "$current_dir/.roll/warden-env.yml" "$current_dir/.roll/roll-env.yml" + sed -i.warden 's/WARDEN/ROLL/g;s/warden/roll/g' "$current_dir/.roll/roll-env.yml" + fi + fi + + # Ensure ROLL_NO_STATIC_CACHING is set + if [[ -n "$(grep -r 'ROLL_NO_STATIC_CACHING' "$current_dir/.env")" ]]; then + perl -i -pe's/.*ROLL_NO_STATIC_CACHING.*$/ROLL_NO_STATIC_CACHING\=1/g' "$current_dir/.env" + else + echo "ROLL_NO_STATIC_CACHING=1" >> "$current_dir/.env" + fi + + # Move to .env.roll if it contains ROLL_ variables + if [[ -n "$(grep -r 'ROLL_' "$current_dir/.env")" ]]; then + mv "$current_dir/.env" "$current_dir/.env.roll" + fi + + logMessage SUCCESS "Legacy migration completed" + fi + fi +} + +function findLatestBackup() { + local backup_dir="$(pwd)/.roll/backups" + + if [[ ! -d "$backup_dir" ]]; then + return 1 + fi + + # Look for timestamped directories first (new format) + local latest_dir=$(ls "$backup_dir" 2>/dev/null | grep '^[0-9]\{10\}$' | sort -n | tail -1) + if [[ -n "$latest_dir" ]]; then + echo "$latest_dir" + return 0 + fi + + # Look for compressed archives + local latest_archive=$(ls "$backup_dir"/backup_*_*.tar* 2>/dev/null | sort | tail -1) + if [[ -n "$latest_archive" ]]; then + # Extract timestamp from filename + local timestamp=$(basename "$latest_archive" | grep -o '[0-9]\{10\}') + echo "$timestamp" + return 0 + fi + + return 1 +} + +function extractBackupArchive() { + local backup_id="$1" + local backup_dir="$(pwd)/.roll/backups" + local extract_dir="$backup_dir/${backup_id}_extracted" + + # Check if already extracted + if [[ -d "$extract_dir" ]]; then + echo "$extract_dir" + return 0 + fi + + # Find the archive file + local archive_file="" + for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do + local potential_file="$backup_dir/backup_${ROLL_ENV_NAME}_${backup_id}${ext}" + if [[ -f "$potential_file" ]]; then + archive_file="$potential_file" + break + fi + done + + # Also check for generic archive names + if [[ -z "$archive_file" ]]; then + archive_file=$(ls "$backup_dir"/*"$backup_id"*.tar* 2>/dev/null | head -1) + fi + + if [[ -z "$archive_file" ]]; then + logMessage ERROR "Backup archive not found for ID: $backup_id" + return 1 + fi + + logMessage INFO "Extracting backup archive: $(basename "$archive_file")" + + mkdir -p "$extract_dir" + + # Determine decompression command based on file extension + local decompress_cmd="cat" + case "$archive_file" in + *.tar.gz) decompress_cmd="gzip -d" ;; + *.tar.xz) decompress_cmd="xz -d" ;; + *.tar.lz4) decompress_cmd="lz4 -d" ;; + esac + + if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1; then + echo "$extract_dir" + return 0 + else + logMessage ERROR "Failed to extract backup archive" + rm -rf "$extract_dir" + return 1 + fi +} + +function extractBackupArchiveFile() { + local archive_file="$1" + local backup_dir="$(pwd)/.roll/backups" + local base_name="$(basename "$archive_file")" + base_name="${base_name%%.tar*}" + local extract_dir="$backup_dir/${base_name}_extracted" + + if [[ -d "$extract_dir" ]]; then + echo "$extract_dir" + return 0 + fi + + mkdir -p "$extract_dir" + + local decompress_cmd="cat" + case "$archive_file" in + *.tar.gz) decompress_cmd="gzip -d" ;; + *.tar.xz) decompress_cmd="xz -d" ;; + *.tar.lz4) decompress_cmd="lz4 -d" ;; + esac + + if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1; then + echo "$extract_dir" + return 0 + else + logMessage ERROR "Failed to extract backup archive" + rm -rf "$extract_dir" + return 1 + fi +} + +function validateBackup() { + local backup_path="$1" + + if [[ $RESTORE_VERIFY -eq 0 ]]; then + return 0 + fi + + logMessage INFO "Validating backup integrity..." + + # Check if backup metadata exists + if [[ ! -f "$backup_path/metadata/backup.json" ]]; then + logMessage WARNING "Backup metadata not found, proceeding with legacy format" + return 0 + fi + + # Verify checksums if available + if [[ -f "$backup_path/metadata/checksums.sha256" ]]; then + if (cd "$backup_path" && sha256sum -c metadata/checksums.sha256 >/dev/null 2>&1); then + logMessage SUCCESS "Backup integrity verified" + return 0 + else + logMessage ERROR "Backup integrity check failed" + return 1 + fi + fi + + logMessage SUCCESS "Backup validation completed" + return 0 +} + +function getBackupMetadata() { + local backup_path="$1" + local metadata_file="$backup_path/metadata/backup.json" + + if [[ -f "$metadata_file" ]]; then + cat "$metadata_file" + else + # Return empty JSON for legacy backups + echo "{}" + fi +} + +function detectBackupServices() { + local backup_path="$1" + local services=() + + # Check for volume backups + if [[ -d "$backup_path/volumes" ]]; then + for volume_file in "$backup_path/volumes"/*; do + if [[ -f "$volume_file" ]]; then + local service_name=$(basename "$volume_file" | sed 's/\.tar.*//') + services+=("$service_name") + fi + done + else + # Legacy format detection + if [[ -f "$backup_path/db.tar.gz" ]]; then + services+=("db") + fi + if [[ -f "$backup_path/redis.tar.gz" ]]; then + services+=("redis") + fi + if [[ -f "$backup_path/es.tar.gz" ]]; then + services+=("elasticsearch") + fi + fi + + echo "${services[@]}" +} + +function stopEnvironment() { + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would stop environment" + return 0 + fi + + logMessage INFO "Stopping environment for consistent restore..." + + local running_containers=$(roll env ps --services --filter "status=running" 2>/dev/null | grep 'php-fpm' | sed 's/ *$//g') + if [[ -n "$running_containers" ]]; then + "${ROLL_DIR}/bin/roll" env down >/dev/null 2>&1 + fi +} + +function getVolumeMapping() { + local service_name="$1" + + case "$service_name" in + db) + case "${DB_DISTRIBUTION:-mariadb}" in + mysql|mariadb) echo "${ROLL_ENV_NAME}_dbdata:mysql" ;; + postgres) echo "${ROLL_ENV_NAME}_dbdata:postgres" ;; + *) echo "${ROLL_ENV_NAME}_dbdata:mysql" ;; + esac + ;; + redis) echo "${ROLL_ENV_NAME}_redis:redis" ;; + dragonfly) echo "${ROLL_ENV_NAME}_dragonfly:dragonfly" ;; + elasticsearch) echo "${ROLL_ENV_NAME}_esdata:elasticsearch" ;; + opensearch) echo "${ROLL_ENV_NAME}_osdata:opensearch" ;; + mongodb) echo "${ROLL_ENV_NAME}_mongodb:mongodb" ;; + rabbitmq) echo "${ROLL_ENV_NAME}_rabbitmq:rabbitmq" ;; + varnish) echo "${ROLL_ENV_NAME}_varnish:varnish" ;; + *) echo "${ROLL_ENV_NAME}_${service_name}:generic" ;; + esac +} + +function restoreVolume() { + local service_name="$1" + local backup_path="$2" + local step="$3" + local total="$4" + + showProgress $step $total "Restoring $service_name volume" + + local volume_mapping=$(getVolumeMapping "$service_name") + IFS=':' read -r volume_name service_type <<< "$volume_mapping" + + # Determine backup file location (check for both encrypted and unencrypted) + local backup_file="" + local is_encrypted=false + + # Check for encrypted files first (.gpg extension) + if [[ -f "$backup_path/volumes/${service_name}.tar.gz.gpg" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.gz.gpg" + is_encrypted=true + elif [[ -f "$backup_path/volumes/${service_name}.tar.xz.gpg" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.xz.gpg" + is_encrypted=true + elif [[ -f "$backup_path/volumes/${service_name}.tar.lz4.gpg" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.lz4.gpg" + is_encrypted=true + elif [[ -f "$backup_path/volumes/${service_name}.tar.gpg" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.gpg" + is_encrypted=true + # Check for unencrypted files + elif [[ -f "$backup_path/volumes/${service_name}.tar.gz" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.gz" + elif [[ -f "$backup_path/volumes/${service_name}.tar.xz" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.xz" + elif [[ -f "$backup_path/volumes/${service_name}.tar.lz4" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar.lz4" + elif [[ -f "$backup_path/volumes/${service_name}.tar" ]]; then + backup_file="$backup_path/volumes/${service_name}.tar" + elif [[ -f "$backup_path/${service_name}.tar.gz" ]]; then + # Legacy format + backup_file="$backup_path/${service_name}.tar.gz" + else + logMessage WARNING "Backup file not found for service: $service_name" + return 0 + fi + + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + if [[ $is_encrypted == true ]]; then + logMessage INFO "[DRY RUN] Would decrypt and restore $service_name from $backup_file to volume $volume_name" + else + logMessage INFO "[DRY RUN] Would restore $service_name from $backup_file to volume $volume_name" + fi + return 0 + fi + + # Validate decryption password if file is encrypted + if [[ $is_encrypted == true ]]; then + if [[ -z "$RESTORE_DECRYPT" ]]; then + logMessage ERROR "Encrypted backup file found but no decryption password provided" + return 1 + fi + fi + + # Get Docker Compose version for proper labeling + local docker_compose_version=$(docker compose version 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+' | head -1) + local volume_base_name=$(echo "$volume_name" | sed "s/${ROLL_ENV_NAME}_//") + + # Remove existing volume if it exists + if docker volume inspect "$volume_name" >/dev/null 2>&1; then + if [[ $RESTORE_FORCE -eq 1 ]]; then + logMessage INFO "Removing existing volume: $volume_name" + docker volume rm "$volume_name" >/dev/null 2>&1 + else + logMessage ERROR "Volume $volume_name already exists. Use --force to overwrite." + return 1 + fi + fi + + # Create new volume with proper labels + docker volume create "$volume_name" \ + --label com.docker.compose.project="$ROLL_ENV_NAME" \ + --label com.docker.compose.version="$docker_compose_version" \ + --label com.docker.compose.volume="$volume_base_name" >/dev/null 2>&1 + + # Restore the volume data with decryption if needed + local temp_container="${ROLL_ENV_NAME}_restore_${service_name}_$$" + + if [[ $is_encrypted == true ]]; then + # Decrypt and decompress pipeline - use ubuntu and original tar approach with strip components + local restore_cmd="gpg --batch --yes --quiet --passphrase \"$RESTORE_DECRYPT\" --decrypt \"$backup_file\" | docker run --rm --name \"$temp_container\" --mount source=\"$volume_name\",target=/data -i ubuntu bash -c \"cd /data && tar -xf - --strip-components=1\"" + + if eval "$restore_cmd" 2>/dev/null; then + logMessage SUCCESS "Successfully restored and decrypted $service_name volume" + return 0 + else + logMessage ERROR "Failed to decrypt and restore $service_name volume" + return 1 + fi + else + # Regular restore without decryption - use ubuntu and original tar approach with strip components + # For compressed files, we need to handle decompression properly + case "$backup_file" in + *.tar.gz) + if docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -v "$(dirname "$backup_file")":/backup \ + ubuntu bash \ + -c "cd /data && tar -xzf /backup/$(basename "$backup_file") --strip-components=1" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi + ;; + *.tar.xz) + if docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -v "$(dirname "$backup_file")":/backup \ + ubuntu bash \ + -c "cd /data && tar -xJf /backup/$(basename "$backup_file") --strip-components=1" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi + ;; + *.tar.lz4) + if docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -v "$(dirname "$backup_file")":/backup \ + ubuntu bash \ + -c "cd /data && lz4 -d /backup/$(basename "$backup_file") - | tar -xf - --strip-components=1" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi + ;; + *.tar) + if docker run --rm --name "$temp_container" \ + --mount source="$volume_name",target=/data \ + -v "$(dirname "$backup_file")":/backup \ + ubuntu bash \ + -c "cd /data && tar -xf /backup/$(basename "$backup_file") --strip-components=1" 2>/dev/null; then + + logMessage SUCCESS "Successfully restored $service_name volume" + return 0 + else + logMessage ERROR "Failed to restore $service_name volume" + return 1 + fi + ;; + *) + logMessage ERROR "Unsupported backup file format: $backup_file" + return 1 + ;; + esac + fi +} + +function restoreConfigurations() { + local backup_path="$1" + local step="$2" + local total="$3" + + if [[ $RESTORE_CONFIG -eq 0 ]]; then + return 0 + fi + + showProgress $step $total "Restoring configuration files" + + local config_source_dir="$backup_path/config" + local current_dir="$(pwd)" + + # Legacy format support + if [[ ! -d "$config_source_dir" ]]; then + # Check for legacy files in backup root (both encrypted and unencrypted) + local legacy_files=("env.php" "auth.json") + for file in "${legacy_files[@]}"; do + local source_file="" + local is_encrypted=false + + # Check for encrypted version first + if [[ -f "$backup_path/${file}.gpg" ]]; then + source_file="$backup_path/${file}.gpg" + is_encrypted=true + elif [[ -f "$backup_path/$file" ]]; then + source_file="$backup_path/$file" + fi + + if [[ -n "$source_file" ]]; then + local target_path="" + case "$file" in + env.php) target_path="$current_dir/app/etc/env.php" ;; + auth.json) target_path="$current_dir/auth.json" ;; + esac + + if [[ -n "$target_path" ]]; then + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + if [[ $is_encrypted == true ]]; then + logMessage INFO "[DRY RUN] Would decrypt and restore $file to $target_path" + else + logMessage INFO "[DRY RUN] Would restore $file to $target_path" + fi + else + mkdir -p "$(dirname "$target_path")" + + if [[ $is_encrypted == true ]]; then + # Decrypt the file directly to target location + if [[ -n "$RESTORE_DECRYPT" ]]; then + if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$source_file" > "$target_path"; then + logMessage INFO "Decrypted and restored $file" + else + logMessage ERROR "Failed to decrypt $file" + return 1 + fi + else + logMessage ERROR "Encrypted config file found but no decryption password provided" + return 1 + fi + else + cp "$source_file" "$target_path" + logMessage INFO "Restored $file" + fi + fi + fi + fi + done + return 0 + fi + + # New format with structured config directory + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would restore configuration files from $config_source_dir" + return 0 + fi + + # Restore configuration files (both encrypted and unencrypted) + if [[ -d "$config_source_dir" ]]; then + # Process all files including .gpg files + find "$config_source_dir" -type f | while read -r config_file; do + local relative_path="${config_file#$config_source_dir/}" + local is_encrypted=false + + # Check if file is encrypted + if [[ "$config_file" == *.gpg ]]; then + is_encrypted=true + # Remove .gpg extension for target path + relative_path="${relative_path%.gpg}" + fi + + local target_path="$current_dir/$relative_path" + + # Create target directory if needed + mkdir -p "$(dirname "$target_path")" + + # Backup existing file if it exists + if [[ -f "$target_path" ]]; then + if [[ $is_encrypted == true ]]; then + # For encrypted files, we can't easily compare so always backup + cp "$target_path" "$target_path.backup.$(date +%s)" + logMessage INFO "Backed up existing $relative_path" + elif ! cmp -s "$config_file" "$target_path"; then + cp "$target_path" "$target_path.backup.$(date +%s)" + logMessage INFO "Backed up existing $relative_path" + fi + fi + + if [[ $is_encrypted == true ]]; then + # Decrypt the file + if [[ -n "$RESTORE_DECRYPT" ]]; then + if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$config_file" > "$target_path"; then + logMessage INFO "Decrypted and restored $relative_path" + else + logMessage ERROR "Failed to decrypt $relative_path" + return 1 + fi + else + logMessage ERROR "Encrypted config file found but no decryption password provided" + return 1 + fi + else + # Copy unencrypted file + cp "$config_file" "$target_path" + logMessage INFO "Restored $relative_path" + fi + done + fi + + logMessage SUCCESS "Configuration restore completed" +} + +function restoreSourceCode() { + local backup_path="$1" + local target_dir="$2" + local step="$3" + local total="$4" + + showProgress $step $total "Restoring source code" + + local src_file="" + local is_encrypted=false + + for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do + if [[ -f "$backup_path/source${ext}.gpg" ]]; then + src_file="$backup_path/source${ext}.gpg" + is_encrypted=true + break + elif [[ -f "$backup_path/source${ext}" ]]; then + src_file="$backup_path/source${ext}" + break + fi + done + + if [[ -z "$src_file" ]]; then + logMessage INFO "No source code archive found in backup" + return 0 + fi + + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage INFO "[DRY RUN] Would extract source code to $target_dir" + return 0 + fi + + mkdir -p "$target_dir" + + local decompress_cmd="cat" + case "$src_file" in + *.tar.gz*) decompress_cmd="gzip -dc" ;; + *.tar.xz*) decompress_cmd="xz -dc" ;; + *.tar.lz4*) decompress_cmd="lz4 -dc" ;; + esac + + if [[ $is_encrypted == true ]]; then + if [[ -z "$RESTORE_DECRYPT" ]]; then + logMessage ERROR "Encrypted source archive found but no decryption password provided" + return 1 + fi + if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$src_file" | $decompress_cmd | tar -xf - -C "$target_dir"; then + logMessage SUCCESS "Source code restored" + return 0 + else + logMessage ERROR "Failed to restore source code" + return 1 + fi + else + if $decompress_cmd "$src_file" | tar -xf - -C "$target_dir"; then + logMessage SUCCESS "Source code restored" + return 0 + else + logMessage ERROR "Failed to restore source code" + return 1 + fi + fi +} + +function performRestore() { + local backup_id="$1" + + # Perform legacy migration if needed + performLegacyMigration + + # Validate database environment + if [[ ${ROLL_DB:-1} -eq 0 ]]; then + logMessage ERROR "Database environment is not enabled (ROLL_DB=0)" + exit 1 + fi + + # Find backup if not specified + if [[ -z "$backup_id" ]]; then + backup_id=$(findLatestBackup) + if [[ -z "$backup_id" ]]; then + logMessage ERROR "No backups found and no backup ID specified" + exit 1 + fi + logMessage INFO "Using latest backup: $backup_id" + fi + + # Determine backup path + local backup_path="" + + if [[ -n "$RESTORE_BACKUP_FILE" ]]; then + if [[ -f "$RESTORE_BACKUP_FILE" ]]; then + backup_path=$(extractBackupArchiveFile "$RESTORE_BACKUP_FILE") + backup_id="$(basename "$RESTORE_BACKUP_FILE" | grep -o '[0-9]\{10\}' || echo "$backup_id")" + elif [[ -d "$RESTORE_BACKUP_FILE" ]]; then + backup_path="$RESTORE_BACKUP_FILE" + else + logMessage ERROR "Backup file not found: $RESTORE_BACKUP_FILE" + exit 1 + fi + else + backup_path="$(pwd)/.roll/backups/$backup_id" + if [[ ! -d "$backup_path" ]]; then + backup_path=$(extractBackupArchive "$backup_id") + if [[ $? -ne 0 ]]; then + logMessage ERROR "Backup not found: $backup_id" + exit 1 + fi + fi + fi + + # Detect if backup is encrypted and handle password prompting + if detectEncryptedBackup "$backup_path"; then + if [[ -z "$RESTORE_DECRYPT" ]]; then + # No password provided, prompt for it + RESTORE_DECRYPT=$(promptPassword "Encrypted backup detected. Enter decryption password") + elif [[ "$RESTORE_DECRYPT" == "PROMPT" ]]; then + # Explicit prompt requested + RESTORE_DECRYPT=$(promptPassword "Enter decryption password") + fi + + if [[ -z "$RESTORE_DECRYPT" ]]; then + logMessage ERROR "Encrypted backup requires a password. Use --decrypt=password or --decrypt to prompt." + exit 1 + fi + + logMessage INFO "Encrypted backup detected, will decrypt during restoration" + fi + + # Validate backup + validateBackup "$backup_path" || exit 1 + + # Get backup metadata + local metadata=$(getBackupMetadata "$backup_path") + logMessage INFO "Restoring backup: $backup_id" + + local source_exists=0 + for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do + if [[ -f "$backup_path/source${ext}" ]] || [[ -f "$backup_path/source${ext}.gpg" ]]; then + source_exists=1 + break + fi + done + + if [[ $ROLL_ENV_LOADED -eq 0 ]]; then + ROLL_ENV_NAME=$(echo "$metadata" | grep -o '"environment"[^"]*"' | head -1 | sed 's/.*"environment"[ ]*:[ ]*"\([^"]*\)".*/\1/') + fi + + # Detect available services in backup + local available_services=($(detectBackupServices "$backup_path")) + if [[ ${#available_services[@]} -eq 0 ]]; then + logMessage ERROR "No services found in backup" + exit 1 + fi + + logMessage INFO "Available services in backup: ${available_services[*]}" + + # Determine which services to restore + local services_to_restore=() + if [[ ${#RESTORE_SERVICES[@]} -gt 0 ]]; then + # Use specified services + for service in "${RESTORE_SERVICES[@]}"; do + if containsElement "$service" "${available_services[@]}"; then + services_to_restore+=("$service") + else + logMessage WARNING "Service $service not found in backup, skipping" + fi + done + else + # Restore all available services + services_to_restore=("${available_services[@]}") + fi + + if [[ ${#services_to_restore[@]} -eq 0 ]]; then + logMessage ERROR "No services to restore" + exit 1 + fi + + logMessage INFO "Restoring services: ${services_to_restore[*]}" + + # Stop environment + stopEnvironment + + # Calculate total steps + local total_steps=${#services_to_restore[@]} + if [[ $RESTORE_CONFIG -eq 1 ]]; then + ((total_steps++)) + fi + if [[ $source_exists -eq 1 ]]; then + ((total_steps++)) + fi + + local current_step=0 + + # Restore source code if available + if [[ $source_exists -eq 1 ]]; then + ((current_step++)) + restoreSourceCode "$backup_path" "$ROLL_ENV_PATH" $current_step $total_steps + fi + + # Restore configurations + if [[ $RESTORE_CONFIG -eq 1 ]]; then + ((current_step++)) + restoreConfigurations "$backup_path" $current_step $total_steps + if [[ $ROLL_ENV_LOADED -eq 0 ]]; then + loadEnvConfig "$ROLL_ENV_PATH" || exit 1 + ROLL_ENV_LOADED=1 + fi + fi + + # Restore volumes + for service in "${services_to_restore[@]}"; do + ((current_step++)) + restoreVolume "$service" "$backup_path" $current_step $total_steps + done + + # Clean up extracted backup if it was temporary + if [[ "$backup_path" =~ _extracted$ ]]; then + rm -rf "$backup_path" + fi + + if [[ $RESTORE_DRY_RUN -eq 1 ]]; then + logMessage SUCCESS "Dry run completed successfully!" + else + logMessage SUCCESS "Restore completed successfully!" + logMessage INFO "You can now start your environment with: roll env up" + fi +} + +# Main execution +if [[ -z "$RESTORE_BACKUP_ID" ]]; then + # If no backup ID provided, use the latest + RESTORE_BACKUP_ID=$(findLatestBackup) + if [[ -z "$RESTORE_BACKUP_ID" ]]; then + error "No backups found. Please create a backup first with: roll backup" + exit 1 + fi + logMessage INFO "No backup ID specified, using latest: $RESTORE_BACKUP_ID" +fi + +performRestore "$RESTORE_BACKUP_ID" diff --git a/commands/restore-full.help b/commands/restore-full.help new file mode 100644 index 0000000..ee9d302 --- /dev/null +++ b/commands/restore-full.help @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_USAGE=$(cat <' + • Use --force to overwrite existing volumes if they conflict + • Use --no-verify to skip checksums if backup is known to be good + • Check .roll/backups/ directory for available backups + • Legacy backups (old format) are automatically detected and supported +EOF +) diff --git a/commands/usage.help b/commands/usage.help index 0059778..0087eed 100755 --- a/commands/usage.help +++ b/commands/usage.help @@ -53,6 +53,9 @@ RollDev version $(cat ${ROLL_DIR}/version) registry Manage and inspect command registry (see \033[31m'roll registry -h'\033[0m for details) db Interacts with the db service on an environment (see \033[31m'roll db -h'\033[0m for details) redis Interacts with the redis service on an environment (see \033[31m'roll redis -h'\033[0m for details) + backup Create archive backups of an environment (see \033[31m'roll backup -h'\033[0m for details) + restore Restore data and configuration from a backup (see \033[31m'roll restore -h'\033[0m for details) + restore-full Restore an entire environment including source code (see \033[31m'roll restore-full -h'\033[0m for details) install Initializes or updates roll configuration on host machine shell Launches into a shell within the current project environment status Display list of all running RollDev project environments diff --git a/docs/backup-restore-quick-reference.md b/docs/backup-restore-quick-reference.md index 00a4ad2..b50e07d 100644 --- a/docs/backup-restore-quick-reference.md +++ b/docs/backup-restore-quick-reference.md @@ -39,6 +39,21 @@ roll restore --decrypt=password # Decrypt backup roll restore --quiet # Silent operation ``` +### Full Environment Restore +```bash +# Restore into current directory +roll restore-full backup.tar.gz + +# Restore into new directory +roll restore-full backup.tar.gz /path/newenv + +# Restore encrypted backup with password +roll restore-full --decrypt=password backup.tar.gz /path/newenv + +# Restore encrypted backup with prompt +roll restore-full --decrypt backup.tar.gz +``` + ## Common Use Cases ### Daily Development @@ -119,4 +134,4 @@ roll restore --dry-run # Verify backup integrity roll backup info -``` \ No newline at end of file +``` diff --git a/docs/backup-restore.md b/docs/backup-restore.md index 62fff64..c076e4d 100644 --- a/docs/backup-restore.md +++ b/docs/backup-restore.md @@ -139,6 +139,29 @@ roll restore --no-verify roll restore --no-legacy-migration ``` +## Full Environment Restore + +A full backup created with `roll backup --include-source` can be restored +directly using the `restore-full` command. If you don't specify an output +directory, the archive is extracted in the current path. + +```bash +# Restore to the current directory +roll restore-full backup_envname_1672531200.tar.gz + +# Restore to a new environment path +roll restore-full backup_envname_1672531200.tar.gz /path/to/newenv + +# Quiet forced restore of a specific archive +roll restore-full --quiet --force backup_envname_1672531200.tar.gz /path/to/env + +# Restore encrypted backup with password +roll restore-full --decrypt=mypassword backup_envname_1672531200.tar.gz /path/to/env + +# Restore encrypted backup with prompt +roll restore-full --decrypt backup_envname_1672531200.tar.gz +``` + ## Backup Structure RollDev creates organized backup archives with the following structure: From a446252c15cdf6b5a8a6be8bf16938b87862ddf1 Mon Sep 17 00:00:00 2001 From: Rick Date: Fri, 6 Jun 2025 23:24:24 +0200 Subject: [PATCH 23/69] Show running services in status --- commands/status.cmd | 51 +++++++++++++++++++++++++++++++++++++------- commands/status.help | 12 ++++++++++- 2 files changed, 54 insertions(+), 9 deletions(-) diff --git a/commands/status.cmd b/commands/status.cmd index 4e005e2..5e17fc7 100644 --- a/commands/status.cmd +++ b/commands/status.cmd @@ -3,7 +3,7 @@ assertDockerRunning -rollNetworkName=$(cat ${ROLL_DIR}/docker/docker-compose.yml | grep -A3 'networks:' | tail -n1 | sed -e 's/[[:blank:]]*name:[[:blank:]]*//g') +rollNetworkName=$(grep -A3 'networks:' "${ROLL_DIR}/docker/docker-compose.yml" | tail -n1 | sed -e 's/[[:blank:]]*name:[[:blank:]]*//g') rollNetworkId=$(docker network ls -q --filter name="${rollNetworkName}") if [[ -z "${rollNetworkId}" ]]; then @@ -12,10 +12,11 @@ fi OLDIFS="$IFS"; IFS=$'\n' -projectNetworkList=( $(docker network ls --format '{{.Name}}' -q --filter "label=dev.roll.environment.name") ) +mapfile -t projectNetworkList < <(docker network ls --format '{{.Name}}' -q --filter "label=dev.roll.environment.name") IFS="$OLDIFS" messageList=() +lastNetwork="${projectNetworkList[-1]}" for projectNetwork in "${projectNetworkList[@]}"; do [[ -z "${projectNetwork}" || "${projectNetwork}" == "${rollNetworkName}" ]] && continue # Skip empty project network names (if any) @@ -28,19 +29,22 @@ for projectNetwork in "${projectNetworkList[@]}"; do [[ -z "${container}" ]] && continue # Project is not running, skip it projectDir=$(docker container inspect --format '{{ index .Config.Labels "com.docker.compose.project.working_dir"}}' "$container") - projectName=$(cat "${projectDir}/.env.roll" | grep '^ROLL_ENV_NAME=' | sed -e 's/ROLL_ENV_NAME=[[:space:]]*//g' | tr -d '\r') - projectType=$(cat "${projectDir}/.env.roll" | grep '^ROLL_ENV_TYPE=' | sed -e 's/ROLL_ENV_TYPE=[[:space:]]*//g' | tr -d '\r') - traefikDomain=$(cat "${projectDir}/.env.roll" | grep '^TRAEFIK_DOMAIN=' | sed -e 's/TRAEFIK_DOMAIN=[[:space:]]*//g' | tr -d '\r') - traefikSubDomain=$(cat "${projectDir}/.env.roll" | grep '^TRAEFIK_SUBDOMAIN=' | sed -e 's/TRAEFIK_SUBDOMAIN=[[:space:]]*//g' | tr -d '\r') + projectName=$(grep -m1 '^ROLL_ENV_NAME=' "${projectDir}/.env.roll" | cut -d '=' -f2- | tr -d '\r') + projectType=$(grep -m1 '^ROLL_ENV_TYPE=' "${projectDir}/.env.roll" | cut -d '=' -f2- | tr -d '\r') + traefikDomain=$(grep -m1 '^TRAEFIK_DOMAIN=' "${projectDir}/.env.roll" | cut -d '=' -f2- | tr -d '\r') + traefikSubDomain=$(grep -m1 '^TRAEFIK_SUBDOMAIN=' "${projectDir}/.env.roll" | cut -d '=' -f2- | tr -d '\r') + containerCount=$(echo "$projectContainers" | wc -l | tr -d ' ') messageList+=(" \033[1;35m${projectName}\033[0m a \033[36m${projectType}\033[0m project") messageList+=(" Project Directory: \033[33m${projectDir}\033[0m") messageList+=(" Project URL: \033[94mhttps://${traefikSubDomain}.${traefikDomain}\033[0m") + messageList+=(" Docker Network: \033[33m${projectNetwork}\033[0m") + messageList+=(" Containers Running: \033[33m${containerCount}\033[0m") - [[ "$projectNetwork" != "${projectNetworkList[@]: -1:1}" ]] && messageList+=() + [[ "$projectNetwork" != "$lastNetwork" ]] && messageList+=("") done -if [[ "${#messageList[@]}" > 0 ]]; then +if (( ${#messageList[@]} > 0 )); then if [[ -z "${rollNetworkId}" ]]; then echo -e "Found the following \033[32mrunning\033[0m projects; however, \033[31mRollDev core services are currently not running\033[0m:" else @@ -51,4 +55,35 @@ if [[ "${#messageList[@]}" > 0 ]]; then done else echo "No running environments found." +fi + +if [[ -n "${rollNetworkId}" ]]; then + echo + echo -e "RollDev Services (enabled -> running):" + + portainerEnabled=0 + startpageEnabled=1 + if [[ -f "${ROLL_HOME_DIR}/.env" ]]; then + portainerEnabled=$(grep -m1 '^ROLL_SERVICE_PORTAINER=' "${ROLL_HOME_DIR}/.env" | cut -d '=' -f2- | tr -d '\r') + startpageEnabled=$(grep -m1 '^ROLL_SERVICE_STARTPAGE=' "${ROLL_HOME_DIR}/.env" | cut -d '=' -f2- | tr -d '\r') + fi + portainerEnabled=${portainerEnabled:-0} + startpageEnabled=${startpageEnabled:-1} + + services=(traefik dnsmasq mailhog tunnel) + [[ "${portainerEnabled}" == 1 ]] && services+=(portainer) + [[ "${startpageEnabled}" == 1 ]] && services+=(startpage) + + printf ' %-12s %-10s %-20s %s\n' "NAME" "STATE" "STATUS" "PORTS" + for svc in "${services[@]}"; do + name=$(docker ps --filter "name=^${svc}$" --format '{{.Names}}') + state=$(docker ps --filter "name=^${svc}$" --format '{{.State}}') + status=$(docker ps --filter "name=^${svc}$" --format '{{.Status}}') + ports=$(docker ps --filter "name=^${svc}$" --format '{{.Ports}}') + if [[ -z "${name}" ]]; then + printf ' %-12s %-10s %-20s -\n' "${svc}" "stopped" "Exited" + else + printf ' %-12s %-10s %-20s %s\n' "${name}" "${state}" "${status}" "${ports}" + fi + done fi \ No newline at end of file diff --git a/commands/status.help b/commands/status.help index 8a0150d..398a904 100644 --- a/commands/status.help +++ b/commands/status.help @@ -1,4 +1,14 @@ #!/usr/bin/env bash [[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 -ROLL_USAGE="Provides listing of projects that are currently running that RollDev has been used to start" \ No newline at end of file +ROLL_USAGE=$(cat <<'USAGE' +\033[33mUsage:\033[0m + status Display list of all running RollDev project environments + +The command shows each project's name, directory, primary URL and other +details such as the Docker network and the number of running containers. +When RollDev core services are running, a summary table of enabled services +is also displayed similar to the output of \`docker ps\`. +USAGE +) + From 8b13f22375291c7d279952fe2348c313052491e0 Mon Sep 17 00:00:00 2001 From: Rick Date: Tue, 10 Jun 2025 10:29:52 +0200 Subject: [PATCH 24/69] Fix status command for macOS compatibility --- commands/status.cmd | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/commands/status.cmd b/commands/status.cmd index 5e17fc7..7acab0e 100644 --- a/commands/status.cmd +++ b/commands/status.cmd @@ -10,9 +10,16 @@ if [[ -z "${rollNetworkId}" ]]; then echo -e "[\033[33;1m!!\033[0m] \033[31mRollDev is not currently running.\033[0m Run \033[36mroll svc up\033[0m to start RollDev core services." fi -OLDIFS="$IFS"; +OLDIFS="$IFS" IFS=$'\n' -mapfile -t projectNetworkList < <(docker network ls --format '{{.Name}}' -q --filter "label=dev.roll.environment.name") +if command -v mapfile >/dev/null 2>&1; then + mapfile -t projectNetworkList < <(docker network ls --format '{{.Name}}' -q --filter "label=dev.roll.environment.name") +else + projectNetworkList=() + while IFS= read -r net; do + projectNetworkList+=("$net") + done < <(docker network ls --format '{{.Name}}' -q --filter "label=dev.roll.environment.name") +fi IFS="$OLDIFS" messageList=() @@ -86,4 +93,4 @@ if [[ -n "${rollNetworkId}" ]]; then printf ' %-12s %-10s %-20s %s\n' "${name}" "${state}" "${status}" "${ports}" fi done -fi \ No newline at end of file +fi From b24df268caf632c9b1e1652a6e2da94193274108 Mon Sep 17 00:00:00 2001 From: Rick Date: Tue, 10 Jun 2025 10:33:08 +0200 Subject: [PATCH 25/69] Fix last network lookup in status command --- commands/status.cmd | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/commands/status.cmd b/commands/status.cmd index 7acab0e..65de9fd 100644 --- a/commands/status.cmd +++ b/commands/status.cmd @@ -23,7 +23,8 @@ fi IFS="$OLDIFS" messageList=() -lastNetwork="${projectNetworkList[-1]}" +lastIdx=$(( ${#projectNetworkList[@]} - 1 )) +lastNetwork="${projectNetworkList[$lastIdx]}" for projectNetwork in "${projectNetworkList[@]}"; do [[ -z "${projectNetwork}" || "${projectNetwork}" == "${rollNetworkName}" ]] && continue # Skip empty project network names (if any) From ba1776f59d12581e234a00ba51aaefb515b8e2fa Mon Sep 17 00:00:00 2001 From: Rick Date: Tue, 10 Jun 2025 11:22:09 +0200 Subject: [PATCH 26/69] Require explicit archive path for restore-full --- commands/restore-full.cmd | 108 +++++-------------------- commands/restore-full.help | 28 ++----- docs/backup-restore-quick-reference.md | 7 +- docs/backup-restore.md | 9 +-- 4 files changed, 29 insertions(+), 123 deletions(-) diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd index 60db2c6..b08f50b 100644 --- a/commands/restore-full.cmd +++ b/commands/restore-full.cmd @@ -5,7 +5,6 @@ assertDockerRunning # Default configuration values -RESTORE_BACKUP_ID="" RESTORE_SERVICES=() RESTORE_CONFIG=1 RESTORE_VERIFY=1 @@ -22,17 +21,13 @@ ROLL_ENV_LOADED=0 RESTORE_LEGACY_MIGRATION=1 # Parse command line arguments -EXTRA_ARGS=() +POSITIONAL_ARGS=() while [[ $# -gt 0 ]]; do case "$1" in --help|-h) roll restore-full --help exit 0 ;; - --backup-id=*|--backup=*) - RESTORE_BACKUP_ID="${1#*=}" - shift - ;; --services=*) IFS=',' read -ra RESTORE_SERVICES <<< "${1#*=}" shift @@ -58,22 +53,6 @@ while [[ $# -gt 0 ]]; do PROGRESS=0 shift ;; - --input|--archive|--file) - RESTORE_BACKUP_FILE="$2" - shift 2 - ;; - --input=*|--archive=*|--file=*) - RESTORE_BACKUP_FILE="${1#*=}" - shift - ;; - --output-dir|-o) - RESTORE_OUTPUT_DIR="$2" - shift 2 - ;; - --output-dir=*|-o=*) - RESTORE_OUTPUT_DIR="${1#*=}" - shift - ;; --decrypt=*) RESTORE_DECRYPT="${1#*=}" shift @@ -100,40 +79,22 @@ while [[ $# -gt 0 ]]; do exit 1 ;; *) - EXTRA_ARGS+=("$1") + POSITIONAL_ARGS+=("$1") shift ;; esac done -# Handle positional arguments for simplified full restore syntax -if [[ ${#EXTRA_ARGS[@]} -gt 0 ]]; then - candidate="${EXTRA_ARGS[0]}" - if [[ -z "$RESTORE_BACKUP_FILE" && ( -f "$candidate" || -d "$candidate" || "$candidate" =~ \.tar(\..*)?$ ) ]]; then - RESTORE_BACKUP_FILE="$candidate" - if [[ ${#EXTRA_ARGS[@]} -gt 1 && -z "$RESTORE_OUTPUT_DIR" ]]; then - RESTORE_OUTPUT_DIR="${EXTRA_ARGS[1]}" - if [[ ${#EXTRA_ARGS[@]} -gt 2 ]]; then - error "Too many positional arguments: ${EXTRA_ARGS[2]}" - exit 1 - fi - fi - else - if [[ -z "$RESTORE_BACKUP_ID" ]]; then - RESTORE_BACKUP_ID="$candidate" - else - error "Unexpected argument: $candidate" - exit 1 - fi - if [[ ${#EXTRA_ARGS[@]} -gt 1 ]]; then - error "Unexpected argument: ${EXTRA_ARGS[1]}" - exit 1 - fi - fi +# Expect exactly two positional arguments: archive and output directory +if [[ ${#POSITIONAL_ARGS[@]} -ne 2 ]]; then + error "Usage: roll restore-full [options] archive output-dir" + exit 1 fi +RESTORE_BACKUP_FILE="${POSITIONAL_ARGS[0]}" +RESTORE_OUTPUT_DIR="${POSITIONAL_ARGS[1]}" + # Set environment path -RESTORE_OUTPUT_DIR="${RESTORE_OUTPUT_DIR:-$(pwd)}" mkdir -p "$RESTORE_OUTPUT_DIR" cd "$RESTORE_OUTPUT_DIR" ROLL_ENV_PATH="$(pwd)" @@ -845,7 +806,6 @@ function restoreSourceCode() { } function performRestore() { - local backup_id="$1" # Perform legacy migration if needed performLegacyMigration @@ -856,38 +816,16 @@ function performRestore() { exit 1 fi - # Find backup if not specified - if [[ -z "$backup_id" ]]; then - backup_id=$(findLatestBackup) - if [[ -z "$backup_id" ]]; then - logMessage ERROR "No backups found and no backup ID specified" - exit 1 - fi - logMessage INFO "Using latest backup: $backup_id" - fi - - # Determine backup path + # Determine backup path from archive argument local backup_path="" - if [[ -n "$RESTORE_BACKUP_FILE" ]]; then - if [[ -f "$RESTORE_BACKUP_FILE" ]]; then - backup_path=$(extractBackupArchiveFile "$RESTORE_BACKUP_FILE") - backup_id="$(basename "$RESTORE_BACKUP_FILE" | grep -o '[0-9]\{10\}' || echo "$backup_id")" - elif [[ -d "$RESTORE_BACKUP_FILE" ]]; then - backup_path="$RESTORE_BACKUP_FILE" - else - logMessage ERROR "Backup file not found: $RESTORE_BACKUP_FILE" - exit 1 - fi + if [[ -f "$RESTORE_BACKUP_FILE" ]]; then + backup_path=$(extractBackupArchiveFile "$RESTORE_BACKUP_FILE") + elif [[ -d "$RESTORE_BACKUP_FILE" ]]; then + backup_path="$RESTORE_BACKUP_FILE" else - backup_path="$(pwd)/.roll/backups/$backup_id" - if [[ ! -d "$backup_path" ]]; then - backup_path=$(extractBackupArchive "$backup_id") - if [[ $? -ne 0 ]]; then - logMessage ERROR "Backup not found: $backup_id" - exit 1 - fi - fi + logMessage ERROR "Backup file not found: $RESTORE_BACKUP_FILE" + exit 1 fi # Detect if backup is encrypted and handle password prompting @@ -913,7 +851,7 @@ function performRestore() { # Get backup metadata local metadata=$(getBackupMetadata "$backup_path") - logMessage INFO "Restoring backup: $backup_id" + logMessage INFO "Restoring backup from: $(basename \"$RESTORE_BACKUP_FILE\")" local source_exists=0 for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do @@ -1009,14 +947,4 @@ function performRestore() { } # Main execution -if [[ -z "$RESTORE_BACKUP_ID" ]]; then - # If no backup ID provided, use the latest - RESTORE_BACKUP_ID=$(findLatestBackup) - if [[ -z "$RESTORE_BACKUP_ID" ]]; then - error "No backups found. Please create a backup first with: roll backup" - exit 1 - fi - logMessage INFO "No backup ID specified, using latest: $RESTORE_BACKUP_ID" -fi - -performRestore "$RESTORE_BACKUP_ID" +performRestore diff --git a/commands/restore-full.help b/commands/restore-full.help index ee9d302..7a6c81f 100644 --- a/commands/restore-full.help +++ b/commands/restore-full.help @@ -3,10 +3,10 @@ ROLL_USAGE=$(cat < Date: Tue, 10 Jun 2025 14:01:22 +0200 Subject: [PATCH 27/69] Fix restore-full argument handling --- commands/restore-full.cmd | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd index b08f50b..078580c 100644 --- a/commands/restore-full.cmd +++ b/commands/restore-full.cmd @@ -22,6 +22,10 @@ RESTORE_LEGACY_MIGRATION=1 # Parse command line arguments POSITIONAL_ARGS=() +# Start with any arguments passed from the main roll script +if [[ -n "${ROLL_PARAMS[*]}" ]]; then + POSITIONAL_ARGS+=("${ROLL_PARAMS[@]}") +fi while [[ $# -gt 0 ]]; do case "$1" in --help|-h) @@ -85,6 +89,9 @@ while [[ $# -gt 0 ]]; do esac done +# Add any remaining arguments after -- to positional args +POSITIONAL_ARGS+=("$@") + # Expect exactly two positional arguments: archive and output directory if [[ ${#POSITIONAL_ARGS[@]} -ne 2 ]]; then error "Usage: roll restore-full [options] archive output-dir" From 42145d843c754c037fd16dc256de25b46cc7d5fc Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 10 Jun 2025 16:12:28 +0200 Subject: [PATCH 28/69] remove redundant exclude patterns from backup command --- commands/backup.cmd | 2 -- 1 file changed, 2 deletions(-) diff --git a/commands/backup.cmd b/commands/backup.cmd index 1165d24..f99da51 100755 --- a/commands/backup.cmd +++ b/commands/backup.cmd @@ -475,9 +475,7 @@ function backupSourceCode() { showProgress $step $total "Backing up source code" local exclude_patterns=( - "--exclude=.git" "--exclude=node_modules" - "--exclude=vendor" "--exclude=var/cache" "--exclude=var/log" "--exclude=var/session" From cecf4c604cbee6b38e74c698dde3148206ef719d Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 10 Jun 2025 16:12:39 +0200 Subject: [PATCH 29/69] use passphrase-fd for gpg decryption to handle password escaping and add support for multiple archive formats --- commands/restore-full.cmd | 17 ++++++++++++----- commands/restore.cmd | 15 +++++++++++---- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd index 078580c..cee20d8 100644 --- a/commands/restore-full.cmd +++ b/commands/restore-full.cmd @@ -539,9 +539,16 @@ function restoreVolume() { if [[ $is_encrypted == true ]]; then # Decrypt and decompress pipeline - use ubuntu and original tar approach with strip components - local restore_cmd="gpg --batch --yes --quiet --passphrase \"$RESTORE_DECRYPT\" --decrypt \"$backup_file\" | docker run --rm --name \"$temp_container\" --mount source=\"$volume_name\",target=/data -i ubuntu bash -c \"cd /data && tar -xf - --strip-components=1\"" + # Use passphrase-fd to avoid shell escaping issues with passwords + # Determine the correct tar command based on the backup file format + local tar_cmd="tar -xf -" + case "$backup_file" in + *.tar.gz.gpg) tar_cmd="tar -xzf -" ;; + *.tar.xz.gpg) tar_cmd="tar -xJf -" ;; + *.tar.lz4.gpg) tar_cmd="lz4 -d - | tar -xf -" ;; + esac - if eval "$restore_cmd" 2>/dev/null; then + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$backup_file" | docker run --rm --name "$temp_container" --mount source="$volume_name",target=/data -i ubuntu bash -c "cd /data && $tar_cmd --strip-components=1" 2>/dev/null; then logMessage SUCCESS "Successfully restored and decrypted $service_name volume" return 0 else @@ -666,7 +673,7 @@ function restoreConfigurations() { if [[ $is_encrypted == true ]]; then # Decrypt the file directly to target location if [[ -n "$RESTORE_DECRYPT" ]]; then - if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$source_file" > "$target_path"; then + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$source_file" > "$target_path"; then logMessage INFO "Decrypted and restored $file" else logMessage ERROR "Failed to decrypt $file" @@ -727,7 +734,7 @@ function restoreConfigurations() { if [[ $is_encrypted == true ]]; then # Decrypt the file if [[ -n "$RESTORE_DECRYPT" ]]; then - if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$config_file" > "$target_path"; then + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$config_file" > "$target_path"; then logMessage INFO "Decrypted and restored $relative_path" else logMessage ERROR "Failed to decrypt $relative_path" @@ -794,7 +801,7 @@ function restoreSourceCode() { logMessage ERROR "Encrypted source archive found but no decryption password provided" return 1 fi - if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$src_file" | $decompress_cmd | tar -xf - -C "$target_dir"; then + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$src_file" | $decompress_cmd | tar -xf - -C "$target_dir"; then logMessage SUCCESS "Source code restored" return 0 else diff --git a/commands/restore.cmd b/commands/restore.cmd index f08854e..6b4935b 100755 --- a/commands/restore.cmd +++ b/commands/restore.cmd @@ -493,9 +493,16 @@ function restoreVolume() { if [[ $is_encrypted == true ]]; then # Decrypt and decompress pipeline - use ubuntu and original tar approach with strip components - local restore_cmd="gpg --batch --yes --quiet --passphrase \"$RESTORE_DECRYPT\" --decrypt \"$backup_file\" | docker run --rm --name \"$temp_container\" --mount source=\"$volume_name\",target=/data -i ubuntu bash -c \"cd /data && tar -xf - --strip-components=1\"" + # Use passphrase-fd to avoid shell escaping issues with passwords + # Determine the correct tar command based on the backup file format + local tar_cmd="tar -xf -" + case "$backup_file" in + *.tar.gz.gpg) tar_cmd="tar -xzf -" ;; + *.tar.xz.gpg) tar_cmd="tar -xJf -" ;; + *.tar.lz4.gpg) tar_cmd="lz4 -d - | tar -xf -" ;; + esac - if eval "$restore_cmd" 2>/dev/null; then + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$backup_file" | docker run --rm --name "$temp_container" --mount source="$volume_name",target=/data -i ubuntu bash -c "cd /data && $tar_cmd --strip-components=1" 2>/dev/null; then logMessage SUCCESS "Successfully restored and decrypted $service_name volume" return 0 else @@ -620,7 +627,7 @@ function restoreConfigurations() { if [[ $is_encrypted == true ]]; then # Decrypt the file directly to target location if [[ -n "$RESTORE_DECRYPT" ]]; then - if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$source_file" > "$target_path"; then + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$source_file" > "$target_path"; then logMessage INFO "Decrypted and restored $file" else logMessage ERROR "Failed to decrypt $file" @@ -681,7 +688,7 @@ function restoreConfigurations() { if [[ $is_encrypted == true ]]; then # Decrypt the file if [[ -n "$RESTORE_DECRYPT" ]]; then - if gpg --batch --yes --quiet --passphrase "$RESTORE_DECRYPT" --decrypt "$config_file" > "$target_path"; then + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$config_file" > "$target_path"; then logMessage INFO "Decrypted and restored $relative_path" else logMessage ERROR "Failed to decrypt $relative_path" From 484102517f1fcac8eb8b756715303d5830b26f58 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 10 Jun 2025 16:12:44 +0200 Subject: [PATCH 30/69] improve backup ID extraction logic to handle warnings in command output --- commands/duplicate.cmd | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/commands/duplicate.cmd b/commands/duplicate.cmd index 81400d6..eca89cf 100644 --- a/commands/duplicate.cmd +++ b/commands/duplicate.cmd @@ -231,13 +231,19 @@ function createBackup() { logMessage INFO "Creating backup..." - # The --output-id flag outputs ONLY the backup ID (no warnings) - if backup_id=$("${ROLL_DIR}/bin/roll" backup "${backup_args[@]}" 2>&1); then + # The --output-id flag should output ONLY the backup ID + if backup_output=$("${ROLL_DIR}/bin/roll" backup "${backup_args[@]}" 2>&1); then backup_exit_code=0 - # Remove any whitespace (should just be a number) - backup_id=$(echo "$backup_id" | tr -d ' \n\r\t') + # First try to use the output directly (clean --output-id output) + backup_id=$(echo "$backup_output" | tr -d ' \n\r\t') + + # If --output-id didn't work cleanly (warnings mixed in), fall back to regex + if [[ ! "$backup_id" =~ ^[0-9]+$ ]]; then + backup_id=$(echo "$backup_output" | grep -o '[0-9]\{10\}' | tail -1) + fi else backup_exit_code=$? + backup_output="" backup_id="" fi @@ -247,7 +253,8 @@ function createBackup() { return 0 else logMessage ERROR "Failed to create backup or get valid backup ID" - logMessage ERROR "Backup command output: '$backup_id'" + logMessage ERROR "Backup command output: '$backup_output'" + logMessage ERROR "Extracted backup ID: '$backup_id'" logMessage ERROR "Exit code: $backup_exit_code" return 1 fi From 99835671947cda37537e8ad2a0fbcc5516d26fcd Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 10 Jun 2025 16:12:49 +0200 Subject: [PATCH 31/69] add restore-full command to ROLL_CMD_ANYARGS list --- bin/roll | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/roll b/bin/roll index ef26c45..fdb59ba 100755 --- a/bin/roll +++ b/bin/roll @@ -40,7 +40,7 @@ declare ROLL_PARAMS=() declare ROLL_CMD_VERB= declare ROLL_CMD_EXEC= declare ROLL_CMD_HELP= -declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore duplicate) +declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore restore-full duplicate) ## parse first argument as command and determine validity if (( "$#" )); then From bc804af3e7f6472ee2d4822f0fc26146522b7cc1 Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 10 Jun 2025 14:41:32 +0000 Subject: [PATCH 32/69] Tagged 0.3.1 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index be58634..9e11b32 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.3 +0.3.1 From 0d3ea194ab5c1b20148b497788051f6dc3c9ffb1 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 10 Jun 2025 21:27:24 +0200 Subject: [PATCH 33/69] add magento2-init command, improve usage loading with dynamic paths, and enhance global/environment-specific config handling --- bin/roll | 2 +- commands/registry.cmd | 30 +------------------ commands/usage.cmd | 37 ++++++++++++++++------- commands/usage.help | 1 + utils/config.sh | 43 ++++++++++++++++++++++++++- utils/registry.sh | 68 ++++++++++++++++++++++++++++++++++++------- 6 files changed, 129 insertions(+), 52 deletions(-) diff --git a/bin/roll b/bin/roll index fdb59ba..a124320 100755 --- a/bin/roll +++ b/bin/roll @@ -40,7 +40,7 @@ declare ROLL_PARAMS=() declare ROLL_CMD_VERB= declare ROLL_CMD_EXEC= declare ROLL_CMD_HELP= -declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore restore-full duplicate) +declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore restore-full duplicate magento2-init) ## parse first argument as command and determine validity if (( "$#" )); then diff --git a/commands/registry.cmd b/commands/registry.cmd index 903e7e7..4cc7f9f 100644 --- a/commands/registry.cmd +++ b/commands/registry.cmd @@ -163,35 +163,7 @@ case "${ROLL_PARAMS[0]}" in paths) # Show command search paths and their priorities - echo -e "\033[33mCommand Search Paths (by priority):\033[0m" - echo "" - - echo -e "\033[36mGlobal Command Paths:\033[0m" - for search_path in "${ROLL_COMMAND_SEARCH_PATHS[@]}"; do - priority="${search_path%%:*}" - directory="${search_path##*:}" - status="❌" - [[ -d "$directory" ]] && status="✅" - - printf " %s Priority %s: %s\n" "$status" "$priority" "$directory" - done - - if [[ -n "${ROLL_ENV_TYPE}" ]]; then - echo "" - echo -e "\033[36mEnvironment-Specific Paths (${ROLL_ENV_TYPE}):\033[0m" - while IFS= read -r env_path; do - [[ -z "$env_path" ]] && continue - priority="${env_path%%:*}" - directory="${env_path##*:}" - status="❌" - [[ -d "$directory" ]] && status="✅" - - printf " %s Priority %s: %s\n" "$status" "$priority" "$directory" - done < <(getEnvCommandPaths) - else - echo "" - info "No environment loaded - environment-specific paths not shown" - fi + showRegistryPaths ;; *) diff --git a/commands/usage.cmd b/commands/usage.cmd index d87b712..6d7fe4b 100644 --- a/commands/usage.cmd +++ b/commands/usage.cmd @@ -6,21 +6,38 @@ ROLL_ENV_PATH="$(locateEnvPath 2>/dev/null)" || true if [[ -n "$ROLL_ENV_PATH" ]];then loadEnvConfig "${ROLL_ENV_PATH}" || true - if [[ -n "$ROLL_ENV_TYPE" && -f "${ROLL_DIR}/commands/${ROLL_ENV_TYPE}/usage.help" ]]; then - source "${ROLL_DIR}/commands/${ROLL_ENV_TYPE}/usage.help" - fi - if [[ -n "$ROLL_ENV_TYPE" && -f "${HOME}/.roll/reclu/${ROLL_ENV_TYPE}/usage.help" ]]; then - source "${HOME}/.roll/reclu/${ROLL_ENV_TYPE}/usage.help" - fi + + # Pre-load environment-specific usage fragments to set variables for global usage.help + if [[ -n "$ROLL_ENV_TYPE" ]]; then + # Load system environment-specific usage fragments + if [[ -f "${ROLL_DIR}/commands/${ROLL_ENV_TYPE}/usage.help" ]]; then + source "${ROLL_DIR}/commands/${ROLL_ENV_TYPE}/usage.help" + fi + + # Load global environment-specific usage fragments (new structure) + if [[ -f "${ROLL_HOME_DIR:-$HOME/.roll}/commands/${ROLL_ENV_TYPE}/usage.help" ]]; then + source "${ROLL_HOME_DIR:-$HOME/.roll}/commands/${ROLL_ENV_TYPE}/usage.help" + fi + + # Load global environment-specific usage fragments (legacy structure) + if [[ -f "${ROLL_HOME_DIR:-$HOME/.roll}/reclu/${ROLL_ENV_TYPE}/usage.help" ]]; then + source "${ROLL_HOME_DIR:-$HOME/.roll}/reclu/${ROLL_ENV_TYPE}/usage.help" + fi + fi fi - -## load usage info for the given command falling back on default usage text +## Load usage info for the given command falling back on default usage text if [[ -f "${ROLL_CMD_HELP}" ]]; then + # Load command-specific help file source "${ROLL_CMD_HELP}" -elif [[ -f "${HOME}/.roll/reclu/usage.help" ]]; then - source "${HOME}/.roll/reclu/usage.help" +elif [[ -f "${ROLL_HOME_DIR:-$HOME/.roll}/commands/usage.help" ]]; then + # Load global usage (variables are already set above) + source "${ROLL_HOME_DIR:-$HOME/.roll}/commands/usage.help" +elif [[ -f "${ROLL_HOME_DIR:-$HOME/.roll}/reclu/usage.help" ]]; then + # Load legacy global usage + source "${ROLL_HOME_DIR:-$HOME/.roll}/reclu/usage.help" else + # Load system default usage (fragments already loaded above if needed) source "${ROLL_DIR}/commands/usage.help" fi diff --git a/commands/usage.help b/commands/usage.help index 0087eed..9d64daf 100755 --- a/commands/usage.help +++ b/commands/usage.help @@ -48,6 +48,7 @@ RollDev version $(cat ${ROLL_DIR}/version) \033[33mCommands:\033[0m svc Orchestrates global services such as traefik, portainer and dnsmasq via docker-compose env-init Configure environment by adding \033[31m'.env.roll'\033[0m file to the current working directory + magento2-init Scaffold a complete Magento 2 project from scratch env Controls an environment from any point within the root project directory config Manage and validate Roll configuration (see \033[31m'roll config -h'\033[0m for details) registry Manage and inspect command registry (see \033[31m'roll registry -h'\033[0m for details) diff --git a/utils/config.sh b/utils/config.sh index dcbb70a..d485b01 100644 --- a/utils/config.sh +++ b/utils/config.sh @@ -140,6 +140,11 @@ function initConfigSchema() { ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_SHELL_COMMAND); ROLL_CONFIG_SCHEMA_VALUES+=("string:bash") ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_SHELL_DEBUG_CONTAINER); ROLL_CONFIG_SCHEMA_VALUES+=("string:php-debug") + # Global service configuration + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_SERVICE_STARTPAGE); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:1") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_SERVICE_PORTAINER); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:1") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_SERVICE_DOMAIN); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + # XDebug configuration ROLL_CONFIG_SCHEMA_KEYS+=(XDEBUG_CONNECT_BACK_HOST); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") ROLL_CONFIG_SCHEMA_KEYS+=(XDEBUG_VERSION); ROLL_CONFIG_SCHEMA_VALUES+=("string:debug") @@ -312,7 +317,28 @@ function loadRollConfig() { # Initialize schema if not done initConfigSchema - # Load configuration from file + # Load global configuration first from ROLL_HOME_DIR + local global_config_loaded=0 + + # Check for new-style global config file + if [[ -f "${ROLL_HOME_DIR}/.env.roll" ]]; then + if loadConfigFromFile "${ROLL_HOME_DIR}/.env.roll"; then + global_config_loaded=1 + else + warning "Failed to load global configuration from ${ROLL_HOME_DIR}/.env.roll" + fi + fi + + # Check for legacy global config file + if [[ -f "${ROLL_HOME_DIR}/.env" ]]; then + if loadConfigFromFile "${ROLL_HOME_DIR}/.env"; then + global_config_loaded=1 + else + warning "Failed to load global configuration from ${ROLL_HOME_DIR}/.env" + fi + fi + + # Load project-specific configuration (this will override global settings) if ! loadConfigFromFile "$config_file"; then return 1 fi @@ -508,6 +534,21 @@ function showConfig() { echo -e "\033[33mRoll Configuration:\033[0m" echo "Environment: ${ROLL_ENV_NAME:-} (${ROLL_ENV_TYPE:-})" echo "Platform: ${ROLL_ENV_SUBT:-}" + + # Show loaded configuration files + if [[ ${#ROLL_CONFIG_LOADED_FILES[@]} -gt 0 ]]; then + echo "" + echo -e "\033[33mLoaded configuration files:\033[0m" + local loaded_file + for loaded_file in "${ROLL_CONFIG_LOADED_FILES[@]}"; do + if [[ "$loaded_file" =~ ${ROLL_HOME_DIR} ]]; then + echo " ${loaded_file} (global)" + else + echo " ${loaded_file} (project)" + fi + done + fi + echo "" local i=0 diff --git a/utils/registry.sh b/utils/registry.sh index 96b56d5..4bd57bb 100644 --- a/utils/registry.sh +++ b/utils/registry.sh @@ -15,11 +15,18 @@ ROLL_REGISTRY_PRIORITIES=() ROLL_REGISTRY_INITIALIZED=0 # Command search paths with priorities (lower number = higher priority) -ROLL_COMMAND_SEARCH_PATHS=( - "2:${ROLL_HOME_DIR}/commands" - "3:${ROLL_HOME_DIR}/reclu" - "4:${ROLL_DIR}/commands" -) +# Note: ROLL_HOME_DIR may not be available when this script is sourced, so we define this dynamically +ROLL_COMMAND_SEARCH_PATHS=() + +# Function to get command search paths (called when registry is initialized) +function getCommandSearchPaths() { + local search_paths=( + "2:${ROLL_HOME_DIR:-$HOME/.roll}/commands" + "3:${ROLL_HOME_DIR:-$HOME/.roll}/reclu" + "4:${ROLL_DIR}/commands" + ) + printf '%s\n' "${search_paths[@]}" +} # Environment-specific command paths (added dynamically if env is available) function getEnvCommandPaths() { @@ -32,7 +39,11 @@ function getEnvCommandPaths() { # Add environment-specific commands if ROLL_ENV_TYPE is available if [[ -n "${ROLL_ENV_TYPE}" ]]; then - [[ -d "${ROLL_HOME_DIR}/reclu/${ROLL_ENV_TYPE}" ]] && env_paths+=("1:${ROLL_HOME_DIR}/reclu/${ROLL_ENV_TYPE}") + # Check for commands in ${ROLL_HOME_DIR}/commands/${ROLL_ENV_TYPE} (new structure) + [[ -d "${ROLL_HOME_DIR:-$HOME/.roll}/commands/${ROLL_ENV_TYPE}" ]] && env_paths+=("1:${ROLL_HOME_DIR:-$HOME/.roll}/commands/${ROLL_ENV_TYPE}") + # Check for commands in ${ROLL_HOME_DIR}/reclu/${ROLL_ENV_TYPE} (legacy structure) + [[ -d "${ROLL_HOME_DIR:-$HOME/.roll}/reclu/${ROLL_ENV_TYPE}" ]] && env_paths+=("1:${ROLL_HOME_DIR:-$HOME/.roll}/reclu/${ROLL_ENV_TYPE}") + # System environment-specific commands [[ -d "${ROLL_DIR}/commands/${ROLL_ENV_TYPE}" ]] && env_paths+=("2:${ROLL_DIR}/commands/${ROLL_ENV_TYPE}") fi @@ -164,11 +175,10 @@ function initializeRegistry() { [[ -n "$env_path" ]] && scanCommandDirectory "$env_path" "environment" done < <(getEnvCommandPaths) - # Scan global command directories - local search_path - for search_path in "${ROLL_COMMAND_SEARCH_PATHS[@]}"; do - scanCommandDirectory "$search_path" "global" - done + # Scan global command directories using dynamic search paths + while IFS= read -r search_path; do + [[ -n "$search_path" ]] && scanCommandDirectory "$search_path" "global" + done < <(getCommandSearchPaths) ROLL_REGISTRY_INITIALIZED=1 } @@ -355,6 +365,42 @@ function showRegistryStats() { done } +## Display command search paths +function showRegistryPaths() { + echo "Command Search Paths (by priority):" + echo "" + + # Show environment-specific paths first + local env_paths + env_paths=($(getEnvCommandPaths)) + if [[ ${#env_paths[@]} -gt 0 ]]; then + echo "Environment-Specific Paths (${ROLL_ENV_TYPE:-unknown}):" + local env_path + for env_path in "${env_paths[@]}"; do + local priority="${env_path%%:*}" + local directory="${env_path##*:}" + if [[ -d "$directory" ]]; then + echo " ✅ Priority $priority: $directory" + else + echo " ❌ Priority $priority: $directory" + fi + done + echo "" + fi + + # Show global command paths + echo "Global Command Paths:" + while IFS= read -r search_path; do + local priority="${search_path%%:*}" + local directory="${search_path##*:}" + if [[ -d "$directory" ]]; then + echo " ✅ Priority $priority: $directory" + else + echo " ❌ Priority $priority: $directory" + fi + done < <(getCommandSearchPaths) +} + ## Export command list for external tools function exportCommands() { local format="${1:-simple}" From a028ac136373e4e082b62cbb6f2e489f4bf69a09 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 10 Jun 2025 21:27:43 +0200 Subject: [PATCH 34/69] add magento2-init command for project initialization with dynamic configuration and compatibility checks --- commands/magento2-init.cmd | 699 ++++++++++++++++++++++++++++++++++++ commands/magento2-init.help | 77 ++++ 2 files changed, 776 insertions(+) create mode 100755 commands/magento2-init.cmd create mode 100755 commands/magento2-init.help diff --git a/commands/magento2-init.cmd b/commands/magento2-init.cmd new file mode 100755 index 0000000..41f80a0 --- /dev/null +++ b/commands/magento2-init.cmd @@ -0,0 +1,699 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +# Default Magento version (minimum supported: 2.4.6) +DEFAULT_MAGENTO_VERSION="2.4.x" + +# Extract parameters +PROJECT_NAME="${ROLL_PARAMS[0]:-}" +MAGENTO_VERSION="${ROLL_PARAMS[1]:-$DEFAULT_MAGENTO_VERSION}" +TARGET_DIR="${ROLL_PARAMS[2]:-}" + +# Function to display usage information +show_usage() { + echo -e "\033[33mUsage:\033[0m" + echo " roll magento2-init [magento_version] [target_directory]" + echo "" + echo -e "\033[33mArguments:\033[0m" + echo " project_name Name of the Magento 2 project" + echo " magento_version Magento version to install (default: 2.4.x)" + echo " Supports: 2.4.6+, 2.4.7, 2.4.7-p3, 2.4.8, etc." + echo " Minimum supported version: 2.4.6" + echo " target_directory Directory to create project in (default: current directory)" + echo "" + echo -e "\033[33mExamples:\033[0m" + echo " roll magento2-init myproject" + echo " roll magento2-init myproject 2.4.7" + echo " roll magento2-init myproject 2.4.7-p3" + echo " roll magento2-init myproject 2.4.8" + echo " roll magento2-init myproject 2.4.x ~/Sites/myproject" + exit 1 +} + +# Validate project name +if [ -z "${PROJECT_NAME}" ]; then + echo -e "\033[31mError: Project name is required.\033[0m" + show_usage +fi + +# Validate project name format +if [[ ! "${PROJECT_NAME}" =~ ^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$ ]]; then + echo -e "\033[31mError: Project name should contain only lowercase letters, numbers, and hyphens.\033[0m" + echo -e "\033[31mIt should start and end with a letter or number.\033[0m" + exit 1 +fi + +# Validate Magento version format and minimum version (2.4.6+) +if [[ ! "${MAGENTO_VERSION}" =~ ^2\.4(\.[6-9x]+)?(-p[0-9]+)?$ ]] && [[ ! "${MAGENTO_VERSION}" =~ ^2\.[5-9](\.[0-9x]+)?(-p[0-9]+)?$ ]]; then + echo -e "\033[31mError: Invalid Magento version format.\033[0m" + echo -e "\033[31mSupported formats: 2.4.6+, 2.4.7, 2.4.7-p3, 2.4.8, etc.\033[0m" + exit 1 +fi + +# Check minimum version requirement (2.4.6+) +if [[ "${MAGENTO_VERSION}" =~ ^2\.4\.([0-5])($|-p) ]]; then + echo -e "\033[31mError: Magento version ${MAGENTO_VERSION} is not supported.\033[0m" + echo -e "\033[31mMinimum supported version is 2.4.6\033[0m" + echo -e "\033[33mFor older Magento versions, please use manual installation or upgrade to 2.4.6+\033[0m" + exit 1 +fi + +# Function to get compatible software versions based on Magento version (2.4.6+ only) +get_software_versions() { + local magento_version="$1" + local base_version + local patch_version="" + + # Extract base version and patch version + if [[ "${magento_version}" =~ ^([0-9]+\.[0-9]+\.[0-9x]+)(-p([0-9]+))?$ ]]; then + base_version="${BASH_REMATCH[1]}" + patch_version="${BASH_REMATCH[3]:-0}" + else + base_version="${magento_version}" + patch_version="0" + fi + + # Set default values for 2.4.6+ + PHP_VERSION="8.2" + DB_DISTRIBUTION_VERSION="10.6" + ELASTICSEARCH_VERSION="7.17" + REDIS_VERSION="7.0" + RABBITMQ_VERSION="3.9" + VARNISH_VERSION="7.1" + COMPOSER_VERSION="2" + NODE_VERSION="19" + + # Version mapping based on Magento compatibility matrix (2.4.6+ only) + case "${base_version}" in + "2.4.9"*) + PHP_VERSION="8.4" + DB_DISTRIBUTION_VERSION="11.4" + ELASTICSEARCH_VERSION="2.19" # OpenSearch + REDIS_VERSION="8" # Valkey + RABBITMQ_VERSION="4.1" + VARNISH_VERSION="7.7" + COMPOSER_VERSION="2" + ;; + "2.4.8"*) + PHP_VERSION="8.3" + DB_DISTRIBUTION_VERSION="11.4" + ELASTICSEARCH_VERSION="2.19" # OpenSearch + REDIS_VERSION="8" # Valkey + RABBITMQ_VERSION="4.1" + VARNISH_VERSION="7.7" + COMPOSER_VERSION="2" + ;; + "2.4.7"*) + PHP_VERSION="8.3" + if [[ "${patch_version}" -ge 6 ]]; then + DB_DISTRIBUTION_VERSION="10.11" + REDIS_VERSION="7.2" + VARNISH_VERSION="7.7" + COMPOSER_VERSION="2" + elif [[ "${patch_version}" -ge 3 ]]; then + DB_DISTRIBUTION_VERSION="10.6" + REDIS_VERSION="7.2" + VARNISH_VERSION="7.5" + COMPOSER_VERSION="2" + else + DB_DISTRIBUTION_VERSION="10.6" + REDIS_VERSION="7.2" + VARNISH_VERSION="7.5" + COMPOSER_VERSION="2" + fi + ELASTICSEARCH_VERSION="7.17" + RABBITMQ_VERSION="3.13" + ;; + "2.4.6"*) + PHP_VERSION="8.2" + DB_DISTRIBUTION_VERSION="10.6" + ELASTICSEARCH_VERSION="7.17" + REDIS_VERSION="7.0" + RABBITMQ_VERSION="3.9" + VARNISH_VERSION="7.1" + COMPOSER_VERSION="2" + if [[ "${patch_version}" -ge 8 ]]; then + REDIS_VERSION="7.2" + VARNISH_VERSION="7.5" + fi + ;; + "2.4.x"|"2.4"*) + # Default to latest stable versions for 2.4.x + PHP_VERSION="8.3" + DB_DISTRIBUTION_VERSION="10.6" + ELASTICSEARCH_VERSION="7.17" + REDIS_VERSION="7.2" + RABBITMQ_VERSION="3.13" + VARNISH_VERSION="7.5" + COMPOSER_VERSION="2" + ;; + esac + + echo -e "\033[33mConfigured software versions for Magento ${magento_version}:\033[0m" + echo -e " PHP: ${PHP_VERSION}" + echo -e " MariaDB: ${DB_DISTRIBUTION_VERSION}" + if [[ "${ELASTICSEARCH_VERSION}" == "2."* ]]; then + echo -e " Search Engine: OpenSearch ${ELASTICSEARCH_VERSION}" + else + echo -e " Search Engine: Elasticsearch ${ELASTICSEARCH_VERSION}" + fi + echo -e " Redis: ${REDIS_VERSION}" + echo -e " RabbitMQ: ${RABBITMQ_VERSION}" + echo -e " Varnish: ${VARNISH_VERSION}" + echo -e " Composer: ${COMPOSER_VERSION}" + echo -e " Node.js: ${NODE_VERSION}" +} + +# Set target directory +if [ -z "${TARGET_DIR}" ]; then + TARGET_DIR="$(pwd)/${PROJECT_NAME}" +else + # Handle relative paths and ensure absolute path + if [[ "${TARGET_DIR}" != /* ]]; then + TARGET_DIR="$(pwd)/${TARGET_DIR}" + fi + TARGET_DIR="${TARGET_DIR}/${PROJECT_NAME}" +fi + +echo -e "\033[32mInitializing Magento 2 project: ${PROJECT_NAME}\033[0m" +echo -e "\033[32mMagento version: ${MAGENTO_VERSION}\033[0m" +echo -e "\033[32mTarget directory: ${TARGET_DIR}\033[0m" + +# Check if target directory already exists +if [ -d "${TARGET_DIR}" ]; then + echo -e "\033[31mError: Directory ${TARGET_DIR} already exists.\033[0m" + exit 1 +fi + +# Create project directory +echo -e "\033[36m[1/10] Creating project directory...\033[0m" +mkdir -p "${TARGET_DIR}" +cd "${TARGET_DIR}" + +# Get compatible software versions for this Magento version +echo -e "\033[36m[2/10] Determining compatible software versions...\033[0m" +get_software_versions "${MAGENTO_VERSION}" + +# Initialize environment +echo -e "\033[36m[3/10] Initializing environment configuration...\033[0m" +"${ROLL_DIR}/bin/roll" env-init "${PROJECT_NAME}" magento2 + +# Update .env.roll with version-specific software versions +echo -e "\033[36m[4/10] Updating environment with compatible software versions...\033[0m" +ENV_FILE="${TARGET_DIR}/.env.roll" + +# Update software versions in .env.roll file +sed -i.bak "s/^PHP_VERSION=.*/PHP_VERSION=${PHP_VERSION}/" "${ENV_FILE}" +sed -i.bak "s/^DB_DISTRIBUTION_VERSION=.*/DB_DISTRIBUTION_VERSION=${DB_DISTRIBUTION_VERSION}/" "${ENV_FILE}" +sed -i.bak "s/^COMPOSER_VERSION=.*/COMPOSER_VERSION=${COMPOSER_VERSION}/" "${ENV_FILE}" +sed -i.bak "s/^NODE_VERSION=.*/NODE_VERSION=${NODE_VERSION}/" "${ENV_FILE}" +sed -i.bak "s/^RABBITMQ_VERSION=.*/RABBITMQ_VERSION=${RABBITMQ_VERSION}/" "${ENV_FILE}" +sed -i.bak "s/^VARNISH_VERSION=.*/VARNISH_VERSION=${VARNISH_VERSION}/" "${ENV_FILE}" + +# Handle search engine configuration (OpenSearch vs Elasticsearch) +if [[ "${ELASTICSEARCH_VERSION}" == "2."* ]]; then + # Use OpenSearch for newer Magento versions (2.4.8+) + OPENSEARCH_VERSION="${ELASTICSEARCH_VERSION}" + sed -i.bak "s/^ROLL_ELASTICSEARCH=.*/ROLL_ELASTICSEARCH=0/" "${ENV_FILE}" + sed -i.bak "s/^ROLL_OPENSEARCH=.*/ROLL_OPENSEARCH=1/" "${ENV_FILE}" + # Set OpenSearch version + if ! grep -q "^OPENSEARCH_VERSION=" "${ENV_FILE}"; then + echo "OPENSEARCH_VERSION=${OPENSEARCH_VERSION}" >> "${ENV_FILE}" + else + sed -i.bak "s/^OPENSEARCH_VERSION=.*/OPENSEARCH_VERSION=${OPENSEARCH_VERSION}/" "${ENV_FILE}" + fi + # Keep Elasticsearch version for compatibility, but disabled + sed -i.bak "s/^ELASTICSEARCH_VERSION=.*/ELASTICSEARCH_VERSION=7.17/" "${ENV_FILE}" + # Set actual search engine version for configuration + ELASTICSEARCH_VERSION="7.17" # Fallback version + echo -e " OpenSearch: ${OPENSEARCH_VERSION} (primary)" + echo -e " Elasticsearch: ${ELASTICSEARCH_VERSION} (fallback)" +else + # Use Elasticsearch for older versions + sed -i.bak "s/^ROLL_ELASTICSEARCH=.*/ROLL_ELASTICSEARCH=1/" "${ENV_FILE}" + sed -i.bak "s/^ROLL_OPENSEARCH=.*/ROLL_OPENSEARCH=0/" "${ENV_FILE}" + sed -i.bak "s/^ELASTICSEARCH_VERSION=.*/ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION}/" "${ENV_FILE}" + # Ensure OpenSearch is disabled + if grep -q "^OPENSEARCH_VERSION=" "${ENV_FILE}"; then + sed -i.bak "/^OPENSEARCH_VERSION=/d" "${ENV_FILE}" + fi +fi + +# Handle Redis vs Valkey (Valkey for version 8+) +if [[ "${REDIS_VERSION}" == "8" ]]; then + # Use Valkey (Redis fork) for version 8 + sed -i.bak "s/^ROLL_REDIS=.*/ROLL_REDIS=0/" "${ENV_FILE}" + sed -i.bak "s/^ROLL_DRAGONFLY=.*/ROLL_DRAGONFLY=1/" "${ENV_FILE}" + # Add Dragonfly/Valkey version if not present + if ! grep -q "^DRAGONFLY_VERSION=" "${ENV_FILE}"; then + echo "DRAGONFLY_VERSION=${REDIS_VERSION}" >> "${ENV_FILE}" + else + sed -i.bak "s/^DRAGONFLY_VERSION=.*/DRAGONFLY_VERSION=${REDIS_VERSION}/" "${ENV_FILE}" + fi +else + # Use Redis for traditional versions + sed -i.bak "s/^ROLL_REDIS=.*/ROLL_REDIS=1/" "${ENV_FILE}" + sed -i.bak "s/^ROLL_DRAGONFLY=.*/ROLL_DRAGONFLY=0/" "${ENV_FILE}" + sed -i.bak "s/^REDIS_VERSION=.*/REDIS_VERSION=${REDIS_VERSION}/" "${ENV_FILE}" +fi + +# Clean up backup file +rm -f "${ENV_FILE}.bak" + +# Sign SSL certificate +echo -e "\033[36m[5/10] Signing SSL certificate...\033[0m" +"${ROLL_DIR}/bin/roll" sign-certificate "${PROJECT_NAME}.test" + +# Start environment +echo -e "\033[36m[6/10] Starting project environment...\033[0m" +"${ROLL_DIR}/bin/roll" env up + +# Wait for services to be ready +echo -e "\033[36m[7/10] Waiting for services to be ready...\033[0m" +echo -e "\033[33mChecking service status...\033[0m" + +# Wait for database to be ready +echo -n "Waiting for database... " +timeout=60 +while [ $timeout -gt 0 ]; do + if "${ROLL_DIR}/bin/roll" db connect -e "SELECT 1;" >/dev/null 2>&1; then + echo "✅ Ready" + break + fi + echo -n "." + sleep 2 + timeout=$((timeout-2)) +done + +if [ $timeout -le 0 ]; then + echo "❌ Database not ready after 60 seconds" + exit 1 +fi + +# Wait for search engine to be ready +echo -n "Waiting for search engine... " +timeout=60 +SEARCH_HOST="elasticsearch" +SEARCH_PORT="9200" + +# Determine search engine host based on configuration +# Check environment variables to see which service is actually enabled +if grep -q "^ROLL_OPENSEARCH=1" "${ENV_FILE}" 2>/dev/null; then + SEARCH_HOST="opensearch" + echo -n "(OpenSearch) " +else + echo -n "(Elasticsearch) " +fi + +while [ $timeout -gt 0 ]; do + if "${ROLL_DIR}/bin/roll" cli bash -c "timeout 5 bash -c '/dev/null; then + # Double check with HTTP request if port is open + if "${ROLL_DIR}/bin/roll" cli curl -f -s "http://${SEARCH_HOST}:${SEARCH_PORT}/_cluster/health" >/dev/null 2>&1; then + echo "✅ Ready" + break + fi + fi + echo -n "." + sleep 2 + timeout=$((timeout-2)) +done + +if [ $timeout -le 0 ]; then + echo "❌ Search engine not ready after 60 seconds" + echo "Debug: Checking ${SEARCH_HOST}:${SEARCH_PORT}" + "${ROLL_DIR}/bin/roll" cli bash -c "timeout 5 bash -c '&1 || echo "Port not accessible" + echo "Tip: Make sure ${SEARCH_HOST} service is running with 'roll env up'" + exit 1 +fi + +# Wait for Redis to be ready +echo -n "Waiting for Redis... " +timeout=30 +while [ $timeout -gt 0 ]; do + if "${ROLL_DIR}/bin/roll" redis ping 2>/dev/null | grep -q PONG; then + echo "✅ Ready" + break + fi + echo -n "." + sleep 2 + timeout=$((timeout-2)) +done + +if [ $timeout -le 0 ]; then + echo "❌ Redis not ready after 30 seconds" + exit 1 +fi + +echo -e "\033[32m✅ All services are ready!\033[0m" + +# Drop into shell for setup +echo -e "\033[36m[8/12] Setting up Magento project files...\033[0m" + +# Check if composer global auth is configured +echo -e "\033[33mNote: This process requires Magento Marketplace credentials.\033[0m" +echo -e "\033[33mIf you haven't configured them globally, you'll be prompted during composer install.\033[0m" + +# Meta package for Magento 2.4.6+ +META_PACKAGE="magento/project-community-edition" + +# Create project using composer inside container +"${ROLL_DIR}/bin/roll" cli bash -c " + set -e + + echo 'Creating Magento project with composer...' + composer create-project --repository-url=https://repo.magento.com/ \\ + '${META_PACKAGE}' /tmp/${PROJECT_NAME} '${MAGENTO_VERSION}' + + echo 'Moving files to web root...' + rsync -a /tmp/${PROJECT_NAME}/ /var/www/html/ + rm -rf /tmp/${PROJECT_NAME}/ + + echo 'Setting proper file permissions...' + find /var/www/html -type f -exec chmod 644 {} \\; + find /var/www/html -type d -exec chmod 755 {} \\; + chmod u+x /var/www/html/bin/magento +" + +# Apply Magento 2.4.4 patch for ReflectionUnionType::getName() error +if [[ "${MAGENTO_VERSION}" == "2.4.4"* ]]; then + echo -e "\033[36m[8.5/12] Applying Magento 2.4.4 patches...\033[0m" + echo -e "\033[33m🔧 Detected Magento 2.4.4 - applying ACSD-59280 patch for ReflectionUnionType issue\033[0m" + + "${ROLL_DIR}/bin/roll" cli bash -c " + set -e + + echo 'Installing Quality Patches Tool...' + if ! composer show magento/quality-patches >/dev/null 2>&1; then + composer require magento/quality-patches --no-update + composer update magento/quality-patches --no-dev + fi + + echo 'Checking available patches...' + if vendor/bin/magento-patches status | grep -q 'ACSD-59280'; then + echo 'Applying ACSD-59280 patch for ReflectionUnionType issue...' + vendor/bin/magento-patches apply ACSD-59280 || echo 'Patch may already be applied or not needed' + else + echo 'ACSD-59280 patch not found, may not be needed for this version' + fi + + echo 'Clearing generated code after patching...' + rm -rf generated/metadata generated/code var/generation + " + + echo -e "\033[32m✅ Magento 2.4.4 patches applied successfully\033[0m" +fi + +echo -e "\033[36m[9/12] Installing Magento application...\033[0m" + +# Determine search engine parameters based on version (2.4.6+ only) +echo -e "\033[33mConfiguring search engine parameters...\033[0m" + +# Determine search engine type based on environment configuration +if grep -q "^ROLL_OPENSEARCH=1" "${ENV_FILE}" 2>/dev/null; then + # OpenSearch configuration for Magento 2.4.8+ + OPENSEARCH_VER=$(grep "^OPENSEARCH_VERSION=" "${ENV_FILE}" | cut -d'=' -f2 || echo "2.19") + SEARCH_ENGINE_PARAMS=" + --search-engine=opensearch \\ + --opensearch-host=opensearch \\ + --opensearch-port=9200 \\ + --opensearch-index-prefix=magento2 \\ + --opensearch-enable-auth=0 \\ + --opensearch-timeout=15" + echo -e "\033[33mUsing OpenSearch ${OPENSEARCH_VER} for Magento 2.4.8+\033[0m" +else + # Elasticsearch configuration for Magento 2.4.6-2.4.7 + SEARCH_ENGINE_PARAMS=" + --search-engine=elasticsearch7 \\ + --elasticsearch-host=elasticsearch \\ + --elasticsearch-port=9200 \\ + --elasticsearch-index-prefix=magento2 \\ + --elasticsearch-enable-auth=0 \\ + --elasticsearch-timeout=15" + echo -e "\033[33mUsing Elasticsearch ${ELASTICSEARCH_VERSION}\033[0m" +fi + +# Debug: Show search engine parameters +echo -e "\033[33mSearch engine parameters:\033[0m" +echo "${SEARCH_ENGINE_PARAMS}" + +# Install Magento with fallback mechanism +echo -e "\033[33mAttempting Magento installation with configured search engine...\033[0m" + +# Build the installation command based on search engine type +if grep -q "^ROLL_OPENSEARCH=1" "${ENV_FILE}" 2>/dev/null; then + # OpenSearch installation command + INSTALL_COMMAND="bin/magento setup:install \\ + --backend-frontname=shopmanager \\ + --amqp-host=rabbitmq \\ + --amqp-port=5672 \\ + --amqp-user=guest \\ + --amqp-password=guest \\ + --db-host=db \\ + --db-name=magento \\ + --db-user=magento \\ + --db-password=magento \\ + --search-engine=opensearch \\ + --opensearch-host=opensearch \\ + --opensearch-port=9200 \\ + --opensearch-index-prefix=magento2 \\ + --opensearch-enable-auth=0 \\ + --opensearch-timeout=15 \\ + --http-cache-hosts=varnish:80 \\ + --session-save=redis \\ + --session-save-redis-host=redis \\ + --session-save-redis-port=6379 \\ + --session-save-redis-db=2 \\ + --session-save-redis-max-concurrency=20 \\ + --cache-backend=redis \\ + --cache-backend-redis-server=redis \\ + --cache-backend-redis-db=0 \\ + --cache-backend-redis-port=6379 \\ + --page-cache=redis \\ + --page-cache-redis-server=redis \\ + --page-cache-redis-db=1 \\ + --page-cache-redis-port=6379" +else + # Elasticsearch installation command + INSTALL_COMMAND="bin/magento setup:install \\ + --backend-frontname=shopmanager \\ + --amqp-host=rabbitmq \\ + --amqp-port=5672 \\ + --amqp-user=guest \\ + --amqp-password=guest \\ + --db-host=db \\ + --db-name=magento \\ + --db-user=magento \\ + --db-password=magento \\ + --search-engine=elasticsearch7 \\ + --elasticsearch-host=elasticsearch \\ + --elasticsearch-port=9200 \\ + --elasticsearch-index-prefix=magento2 \\ + --elasticsearch-enable-auth=0 \\ + --elasticsearch-timeout=15 \\ + --http-cache-hosts=varnish:80 \\ + --session-save=redis \\ + --session-save-redis-host=redis \\ + --session-save-redis-port=6379 \\ + --session-save-redis-db=2 \\ + --session-save-redis-max-concurrency=20 \\ + --cache-backend=redis \\ + --cache-backend-redis-server=redis \\ + --cache-backend-redis-db=0 \\ + --cache-backend-redis-port=6379 \\ + --page-cache=redis \\ + --page-cache-redis-server=redis \\ + --page-cache-redis-db=1 \\ + --page-cache-redis-port=6379" +fi + +if ! "${ROLL_DIR}/bin/roll" cli bash -c " + set -e + + echo 'Installing Magento application...' + echo 'Search engine parameters:' + echo '${SEARCH_ENGINE_PARAMS}' + ${INSTALL_COMMAND} +"; then + echo -e "\033[33m⚠️ Primary search engine installation failed, trying fallback to Elasticsearch...\033[0m" + + # Fallback to Elasticsearch 7 + FALLBACK_COMMAND="bin/magento setup:install \\ + --backend-frontname=shopmanager \\ + --amqp-host=rabbitmq \\ + --amqp-port=5672 \\ + --amqp-user=guest \\ + --amqp-password=guest \\ + --db-host=db \\ + --db-name=magento \\ + --db-user=magento \\ + --db-password=magento \\ + --search-engine=elasticsearch7 \\ + --elasticsearch-host=elasticsearch \\ + --elasticsearch-port=9200 \\ + --elasticsearch-index-prefix=magento2 \\ + --elasticsearch-enable-auth=0 \\ + --elasticsearch-timeout=15 \\ + --http-cache-hosts=varnish:80 \\ + --session-save=redis \\ + --session-save-redis-host=redis \\ + --session-save-redis-port=6379 \\ + --session-save-redis-db=2 \\ + --session-save-redis-max-concurrency=20 \\ + --cache-backend=redis \\ + --cache-backend-redis-server=redis \\ + --cache-backend-redis-db=0 \\ + --cache-backend-redis-port=6379 \\ + --page-cache=redis \\ + --page-cache-redis-server=redis \\ + --page-cache-redis-db=1 \\ + --page-cache-redis-port=6379" + + "${ROLL_DIR}/bin/roll" cli bash -c " + set -e + + echo 'Retrying with Elasticsearch 7 fallback...' + ${FALLBACK_COMMAND} + " + + echo -e "\033[32m✅ Installation completed with Elasticsearch fallback\033[0m" + USED_FALLBACK=1 +else + echo -e "\033[32m✅ Installation completed with configured search engine\033[0m" + USED_FALLBACK=0 +fi + +echo -e "\033[36m[10/12] Configuring Magento application...\033[0m" + +# Configure Magento +"${ROLL_DIR}/bin/roll" cli bash -c " + set -e + + echo 'Configuring base URLs...' + bin/magento config:set --lock-env web/unsecure/base_url \\ + \"https://app.${PROJECT_NAME}.test/\" + + bin/magento config:set --lock-env web/secure/base_url \\ + \"https://app.${PROJECT_NAME}.test/\" + + bin/magento config:set --lock-env web/secure/offloader_header X-Forwarded-Proto + bin/magento config:set --lock-env web/secure/use_in_frontend 1 + bin/magento config:set --lock-env web/secure/use_in_adminhtml 1 + bin/magento config:set --lock-env web/seo/use_rewrites 1 + + echo 'Configuring cache settings...' + bin/magento config:set --lock-env system/full_page_cache/caching_application 2 + bin/magento config:set --lock-env system/full_page_cache/ttl 604800 + bin/magento config:set --lock-env catalog/search/enable_eav_indexer 1 + bin/magento config:set --lock-env dev/static/sign 0 + + echo 'Setting developer mode...' + bin/magento deploy:mode:set -s developer + bin/magento cache:disable block_html full_page +" + +# Configure search engine post-installation for OpenSearch fallback scenarios +if [[ "${USED_FALLBACK}" == "1" ]] && grep -q "^ROLL_OPENSEARCH=1" "${ENV_FILE}" 2>/dev/null; then + echo -e "\033[36mInstallation used Elasticsearch fallback, but OpenSearch is configured for runtime...\033[0m" + echo -e "\033[33m📝 Note: You can manually configure OpenSearch later using:\033[0m" + echo -e "\033[33m bin/magento config:set catalog/search/engine opensearch\033[0m" + echo -e "\033[33m bin/magento config:set catalog/search/opensearch_server_hostname opensearch\033[0m" + echo -e "\033[33m bin/magento config:set catalog/search/opensearch_server_port 9200\033[0m" +fi + +echo -e "\033[36m[11/12] Running initial indexing...\033[0m" +"${ROLL_DIR}/bin/roll" cli bash -c " + bin/magento indexer:reindex + bin/magento cache:flush +" + +echo -e "\033[36m[11/12] Creating admin user and configuring 2FA...\033[0m" + +# Generate admin user and 2FA setup for Magento 2.4.6+ (all supported versions require 2FA) +"${ROLL_DIR}/bin/roll" cli bash -c " + set -e + + # Generate admin credentials + ADMIN_PASS=\"\$(pwgen -n1 16)\" + ADMIN_USER=admin + + echo 'Creating admin user...' + bin/magento admin:user:create \\ + --admin-password=\"\${ADMIN_PASS}\" \\ + --admin-user=\"\${ADMIN_USER}\" \\ + --admin-firstname=\"Local\" \\ + --admin-lastname=\"Admin\" \\ + --admin-email=\"\${ADMIN_USER}@example.com\" + + echo \"Admin Username: \${ADMIN_USER}\" + echo \"Admin Password: \${ADMIN_PASS}\" + + # Configure 2FA + echo 'Configuring 2FA...' + TFA_SECRET=\$(python3 -c \"import base64; print(base64.b32encode('\$(pwgen -A1 128)'.encode()).decode().strip('='))\") + OTPAUTH_URL=\$(printf \"otpauth://totp/%s%%3Alocaladmin%%40example.com?issuer=%s&secret=%s\" \\ + \"app.${PROJECT_NAME}.test\" \"app.${PROJECT_NAME}.test\" \"\${TFA_SECRET}\" + ) + + bin/magento config:set --lock-env twofactorauth/general/force_providers google + bin/magento security:tfa:google:set-secret \"\${ADMIN_USER}\" \"\${TFA_SECRET}\" + + echo \"2FA Setup URL: \${OTPAUTH_URL}\" + echo \"2FA Backup Codes:\" + oathtool -s 30 -w 10 --totp --base32 \"\${TFA_SECRET}\" + + # Generate QR code + segno \"\${OTPAUTH_URL}\" -s 4 -o \"pub/media/\${ADMIN_USER}-totp-qr.png\" + QR_URL=\"https://app.${PROJECT_NAME}.test/media/\${ADMIN_USER}-totp-qr.png?t=\$(date +%s)\" + echo \"QR Code URL: \${QR_URL}\" + + # Save credentials to file for user reference + cat > /var/www/html/admin-credentials.txt << EOL +Magento Admin Credentials +======================== +Username: \${ADMIN_USER} +Password: \${ADMIN_PASS} +2FA Setup URL: \${OTPAUTH_URL} +QR Code URL: \${QR_URL} + +Admin Panel: https://app.${PROJECT_NAME}.test/shopmanager/ +Frontend: https://app.${PROJECT_NAME}.test/ + +Generated on: \$(date) +EOL + + echo 'Admin credentials saved to admin-credentials.txt' +" + +echo -e "\033[36m[12/12] Finalizing setup...\033[0m" + +echo -e "\033[32m✅ Magento 2 project '${PROJECT_NAME}' has been successfully created!\033[0m" +echo "" +echo -e "\033[33m🔗 Access URLs:\033[0m" +echo -e " Frontend: https://app.${PROJECT_NAME}.test/" +echo -e " Admin: https://app.${PROJECT_NAME}.test/shopmanager/" +echo -e " RabbitMQ: https://rabbitmq.${PROJECT_NAME}.test/" +echo -e " Elasticsearch: https://elasticsearch.${PROJECT_NAME}.test/" +echo "" +echo -e "\033[33m📁 Project Location:\033[0m" +echo -e " ${TARGET_DIR}" +echo "" +echo -e "\033[33m🔑 Admin Credentials:\033[0m" +echo -e " Check the file: admin-credentials.txt in your project root" +echo "" +echo -e "\033[33m💡 Next Steps:\033[0m" +echo -e " 1. Navigate to your project: cd ${TARGET_DIR}" +echo -e " 2. Access the shell: roll shell" +echo -e " 3. Open your browser to: https://app.${PROJECT_NAME}.test/" + +if [[ "${USED_FALLBACK}" == "1" ]]; then + echo "" + echo -e "\033[33m⚠️ Installation Note:\033[0m" + echo -e " Installation used Elasticsearch fallback due to OpenSearch connectivity issues" + echo -e " OpenSearch is configured in your environment for future use" + echo -e " Check the manual configuration commands shown above to switch to OpenSearch" +fi + +echo "" +echo -e "\033[33m🛑 To destroy this environment:\033[0m" +echo -e " roll env down -v" +echo "" \ No newline at end of file diff --git a/commands/magento2-init.help b/commands/magento2-init.help new file mode 100755 index 0000000..a693fbc --- /dev/null +++ b/commands/magento2-init.help @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_USAGE=$(cat < [magento_version] [target_directory] + +\033[33mArguments:\033[0m + project_name Name of the Magento 2 project (required) + Should contain only lowercase letters, numbers, and hyphens + Must start and end with a letter or number + + magento_version Magento version to install (optional, default: 2.4.x) + Supports: major.minor.patch and patch versions + Examples: 2.4.x, 2.4.7, 2.4.7-p3, 2.4.6-p2, 2.3.7 + + target_directory Directory to create project in (optional) + Default: current directory + Project will be created in a subdirectory named after the project + +\033[33mDescription:\033[0m + This command scaffolds a complete Magento 2 project from scratch, including: + + • Automatic compatible software version configuration + • Environment initialization with Docker services + • SSL certificate generation + • Magento installation via Composer + • Database setup and configuration + • Redis/Valkey and Elasticsearch/OpenSearch configuration + • Admin user creation with 2FA setup (for Magento 2.4.x) + • Developer mode configuration + +\033[33mExamples:\033[0m + # Create a new project with default Magento version (2.4.x) + roll magento2-init mystore + + # Create a project with specific Magento version + roll magento2-init mystore 2.4.7 + + # Create a project with patch version + roll magento2-init mystore 2.4.7-p3 + + # Create a project in a specific directory + roll magento2-init mystore 2.4.x ~/Sites/ + + # Create a project with Magento 2.3.x (without mandatory 2FA) + roll magento2-init legacystore 2.3.7 + +\033[33mPrerequisites:\033[0m + • RollDev services must be running (roll svc up) + • Magento Marketplace credentials configured globally: + composer global config http-basic.repo.magento.com + +\033[33mPost-Installation:\033[0m + After successful installation, you will have access to: + • Frontend: https://app..test/ + • Admin Panel: https://app..test/shopmanager/ + • Admin credentials in admin-credentials.txt file + • 2FA QR code (for Magento 2.4.x) accessible via web interface + +\033[33mAutomatic Version Configuration:\033[0m + The command automatically configures compatible software versions based on + the Magento version you specify: + • Magento 2.4.8+: PHP 8.3+, MariaDB 11.4, OpenSearch 2.19, Valkey 8 + • Magento 2.4.7: PHP 8.3, MariaDB 10.6+, Elasticsearch 7.17, Redis 7.2 + • Magento 2.4.6: PHP 8.2, MariaDB 10.6, Elasticsearch 7.17, Redis 7.0+ + • Magento 2.4.5: PHP 8.1, MariaDB 10.4, Elasticsearch 7.17, Redis 6.2+ + • Magento 2.3.x: PHP 7.4, MariaDB 10.3, Elasticsearch 7.6, Redis 5.0 + +\033[33mNote:\033[0m + This process may take several minutes depending on your internet connection + and system performance. The command will handle all setup steps automatically. + +EOF +) + +echo -e "${ROLL_USAGE}" \ No newline at end of file From e0c03b692401236412cea7f763321e1a089a2752 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 11 Jun 2025 10:11:35 +0200 Subject: [PATCH 35/69] add extensive documentation for magento2-init command, including a quick reference guide, usage examples, compatibility matrix, and troubleshooting steps --- docs/environments/magento2.md | 76 +++++ docs/index.md | 2 + docs/magento2-init-quick-reference.md | 99 +++++++ docs/magento2-init.md | 385 ++++++++++++++++++++++++++ docs/usage.md | 6 + 5 files changed, 568 insertions(+) create mode 100644 docs/magento2-init-quick-reference.md create mode 100644 docs/magento2-init.md diff --git a/docs/environments/magento2.md b/docs/environments/magento2.md index af4c6d5..27fe8f6 100644 --- a/docs/environments/magento2.md +++ b/docs/environments/magento2.md @@ -1,5 +1,81 @@ # Installing Magento 2 +## Quick Start with `magento2-init` (Recommended) + +RollDev 3.2+ provides an automated `magento2-init` command that handles the complete Magento 2 setup process in a single command. This is the recommended approach for new projects. + +### Basic Usage + +```bash +roll magento2-init [magento_version] +``` + +### Examples + +```bash +# Create project with latest Magento version +roll magento2-init mystore + +# Create project with specific version +roll magento2-init mystore 2.4.7 + +# Create project with patch version +roll magento2-init mystore 2.4.7-p3 + +# Create project with OpenSearch (Magento 2.4.8+) +roll magento2-init mystore 2.4.8 +``` + +### What it Does + +The `magento2-init` command automates all the manual steps listed below: + +1. **Environment Setup**: Creates `.env.roll` with optimized configuration for the specified Magento version +2. **Version Compatibility**: Automatically configures compatible PHP, MariaDB, Elasticsearch/OpenSearch, Redis, RabbitMQ, and Composer versions +3. **SSL Certificate**: Generates and signs SSL certificate for local development +4. **Service Startup**: Starts all required Docker services (database, search, cache, etc.) +5. **Magento Installation**: Downloads and installs Magento via Composer +6. **Database Configuration**: Sets up database, Redis, and search engine connections +7. **Admin User**: Creates admin user with 2FA setup (for Magento 2.4.x) +8. **Developer Mode**: Configures development-optimized settings + +### Software Version Matrix + +The command automatically selects compatible software versions: + +| Magento Version | PHP | MariaDB | Search Engine | Redis | RabbitMQ | Varnish | +|-----------------|-----|---------|---------------|-------|----------|---------| +| 2.4.8+ | 8.3 | 11.4 | OpenSearch 2.19 | Valkey 8 | 4.1 | 7.7 | +| 2.4.7 | 8.3 | 10.6+ | Elasticsearch 7.17 | Redis 7.2 | 3.13 | 7.5+ | +| 2.4.6 | 8.2 | 10.6 | Elasticsearch 7.17 | Redis 7.0+ | 3.9 | 7.1+ | + +### Prerequisites + +- RollDev services running: `roll svc up` +- Magento Marketplace credentials configured globally: + ```bash + composer global config http-basic.repo.magento.com + ``` + +### Post-Installation Access + +After successful installation: + +- **Frontend**: `https://app..test/` +- **Admin Panel**: `https://app..test/shopmanager/` +- **Admin Credentials**: Check `admin-credentials.txt` in project root +- **2FA QR Code**: Available via web interface for easy mobile setup + +### OpenSearch vs Elasticsearch + +For Magento 2.4.8+, the command automatically configures OpenSearch. If OpenSearch setup fails, it automatically falls back to Elasticsearch 7.17 with instructions for manual OpenSearch configuration. + +--- + +## Manual Installation (Alternative) + +If you prefer manual setup or need custom configuration, follow the detailed steps below. + The below example demonstrates the from-scratch setup of the Magento 2 application for local development. A similar process can easily be used to configure an environment of any other type. This assumes that RollDev has been previously started via `roll svc up` as part of the installation procedure. 1. Create a new directory on your host machine at the location of your choice and then jump into the new directory to get started: diff --git a/docs/index.md b/docs/index.md index 7ca8292..d9fc478 100644 --- a/docs/index.md +++ b/docs/index.md @@ -28,6 +28,8 @@ caption: Getting Started installing services +magento2-init +magento2-init-quick-reference usage duplicate backup-restore diff --git a/docs/magento2-init-quick-reference.md b/docs/magento2-init-quick-reference.md new file mode 100644 index 0000000..1e8b38c --- /dev/null +++ b/docs/magento2-init-quick-reference.md @@ -0,0 +1,99 @@ +# Magento 2 Init - Quick Reference + +## Basic Commands + +```bash +# Create project with latest version +roll magento2-init mystore + +# Create with specific version +roll magento2-init mystore 2.4.7 + +# Create with patch version +roll magento2-init mystore 2.4.7-p3 + +# Create with OpenSearch (2.4.8+) +roll magento2-init mystore 2.4.8 + +# Create in custom directory +roll magento2-init mystore 2.4.7 ~/Sites/ +``` + +## Prerequisites Checklist + +- [ ] RollDev services running: `roll svc up` +- [ ] Magento credentials configured: + ```bash + composer global config http-basic.repo.magento.com + ``` + +## Post-Installation URLs + +| Service | URL | +|---------|-----| +| Frontend | `https://app..test/` | +| Admin | `https://app..test/shopmanager/` | +| RabbitMQ | `https://rabbitmq..test/` | +| Search | `https://elasticsearch..test/` | + +## Version Matrix (Auto-Selected) + +| Magento | PHP | MariaDB | Search | Redis | RabbitMQ | +|---------|-----|---------|--------|-------|----------| +| 2.4.8+ | 8.3 | 11.4 | OpenSearch 2.19 | Valkey 8 | 4.1 | +| 2.4.7 | 8.3 | 10.6+ | Elasticsearch 7.17 | Redis 7.2 | 3.13 | +| 2.4.6 | 8.2 | 10.6 | Elasticsearch 7.17 | Redis 7.0+ | 3.9 | + +## Common Post-Install Tasks + +```bash +# Enter project directory +cd + +# Access shell +roll shell + +# Check admin credentials +cat admin-credentials.txt + +# Run Magento commands +roll cli bin/magento cache:flush +roll cli bin/magento indexer:reindex +``` + +## Troubleshooting + +```bash +# Check service status +roll env ps + +# View logs +roll env logs --tail 50 + +# Restart services +roll env restart + +# Test connectivity +roll db connect -e "SELECT 1;" +roll redis ping +``` + +## OpenSearch Manual Config (if needed) + +```bash +roll shell +bin/magento config:set catalog/search/engine opensearch +bin/magento config:set catalog/search/opensearch_server_hostname opensearch +bin/magento config:set catalog/search/opensearch_server_port 9200 +bin/magento indexer:reindex catalogsearch_fulltext +``` + +## Help + +```bash +# Command help +roll magento2-init --help + +# Full documentation +https://rolldev.readthedocs.io/magento2-init/ +``` \ No newline at end of file diff --git a/docs/magento2-init.md b/docs/magento2-init.md new file mode 100644 index 0000000..9091452 --- /dev/null +++ b/docs/magento2-init.md @@ -0,0 +1,385 @@ +# Magento 2 Project Initialization + +## Overview + +The `magento2-init` command provides a fully automated way to scaffold new Magento 2 projects from scratch. Introduced in RollDev 3.2, this command eliminates the need for manual setup steps and ensures consistent, optimized project configurations. + +## Quick Start + +```bash +# Create a new Magento 2 project with default settings +roll magento2-init mystore + +# Create with specific Magento version +roll magento2-init mystore 2.4.7-p3 + +# Create in specific directory +roll magento2-init mystore 2.4.8 ~/Sites/ +``` + +## Command Syntax + +```bash +roll magento2-init [magento_version] [target_directory] +``` + +### Parameters + +| Parameter | Required | Description | Default | +|-----------|----------|-------------|---------| +| `project_name` | Yes | Name of the project (lowercase, alphanumeric, hyphens only) | - | +| `magento_version` | No | Magento version to install | `2.4.x` (latest) | +| `target_directory` | No | Directory to create project in | Current directory | + +### Supported Magento Versions + +- **2.4.6+** (minimum supported) +- **2.4.7** and patch versions (`2.4.7-p1`, `2.4.7-p3`, etc.) +- **2.4.8+** with OpenSearch support +- **2.4.x** for latest stable version + +## Automated Setup Process + +The command performs 12 automated steps: + +### 1. Project Directory Creation +- Creates the project directory structure +- Validates project name format + +### 2. Software Version Compatibility +- Automatically determines compatible software versions +- Configures PHP, MariaDB, search engine, Redis, RabbitMQ, Varnish versions + +### 3. Environment Initialization +- Creates `.env.roll` configuration file +- Sets up RollDev environment for Magento 2 + +### 4. Version-Specific Configuration +- Updates environment file with compatible software versions +- Configures OpenSearch for 2.4.8+ or Elasticsearch for older versions +- Sets up Redis/Valkey based on version requirements + +### 5. SSL Certificate Generation +- Creates and signs SSL certificate for `.test` domain +- Enables HTTPS for local development + +### 6. Docker Services Startup +- Starts all required Docker containers +- Database, search engine, Redis, RabbitMQ, Varnish, web server + +### 7. Service Health Checks +- Waits for database connectivity +- Verifies search engine cluster health +- Confirms Redis availability + +### 8. Magento Project Files +- Downloads Magento via Composer +- Uses `magento/project-community-edition` meta-package +- Sets proper file permissions + +### 9. Magento Installation +- Runs `setup:install` with optimized parameters +- Configures database, Redis, search engine connections +- Sets up RabbitMQ for message queues + +### 10. Application Configuration +- Sets base URLs for frontend and admin +- Configures SSL and security settings +- Optimizes cache and search settings + +### 11. Initial Indexing +- Runs all Magento indexers +- Flushes cache for clean start + +### 12. Admin User & 2FA Setup +- Creates admin user with random password +- Configures Two-Factor Authentication (2FA) +- Generates TOTP QR code for mobile authenticator apps + +## Software Compatibility Matrix + +The command automatically configures compatible software versions based on the Magento version: + +### Magento 2.4.8+ +- **PHP**: 8.3 +- **Database**: MariaDB 11.4 +- **Search**: OpenSearch 2.19 (with Elasticsearch 7.17 fallback) +- **Cache**: Valkey 8 (Redis fork) +- **Queue**: RabbitMQ 4.1 +- **HTTP Cache**: Varnish 7.7 +- **Package Manager**: Composer 2 +- **JavaScript**: Node.js 19 + +### Magento 2.4.7 +- **PHP**: 8.3 +- **Database**: MariaDB 10.6+ (10.11 for p6+) +- **Search**: Elasticsearch 7.17 +- **Cache**: Redis 7.2 +- **Queue**: RabbitMQ 3.13 +- **HTTP Cache**: Varnish 7.5+ (7.7 for p6+) +- **Package Manager**: Composer 2 +- **JavaScript**: Node.js 19 + +### Magento 2.4.6 +- **PHP**: 8.2 +- **Database**: MariaDB 10.6 +- **Search**: Elasticsearch 7.17 +- **Cache**: Redis 7.0+ (7.2 for p8+) +- **Queue**: RabbitMQ 3.9 +- **HTTP Cache**: Varnish 7.1+ (7.5 for p8+) +- **Package Manager**: Composer 2 +- **JavaScript**: Node.js 19 + +## OpenSearch Support + +For Magento 2.4.8 and later versions, the command automatically configures OpenSearch as the primary search engine: + +### Automatic Configuration +- Sets `ROLL_OPENSEARCH=1` in environment +- Configures OpenSearch version 2.19 +- Uses `opensearch` hostname for connections + +### Fallback Mechanism +If OpenSearch installation fails: +- Automatically falls back to Elasticsearch 7.17 +- Provides manual configuration instructions +- Maintains full functionality with fallback + +### Manual OpenSearch Configuration +To manually switch to OpenSearch after installation: + +```bash +roll shell +bin/magento config:set catalog/search/engine opensearch +bin/magento config:set catalog/search/opensearch_server_hostname opensearch +bin/magento config:set catalog/search/opensearch_server_port 9200 +bin/magento indexer:reindex catalogsearch_fulltext +``` + +## Prerequisites + +### Required Setup +1. **RollDev Services**: Must be running (`roll svc up`) +2. **Magento Marketplace Credentials**: Configure globally: + ```bash + composer global config http-basic.repo.magento.com + ``` + +### Magento Marketplace Authentication +To obtain credentials: +1. Visit [Magento Marketplace](https://marketplace.magento.com/) +2. Go to My Profile → Access Keys +3. Generate new Access Key +4. Use **Public Key** as username and **Private Key** as password + +## Post-Installation + +### Access URLs +After successful installation, your project will be available at: + +- **Frontend**: `https://app..test/` +- **Admin Panel**: `https://app..test/shopmanager/` +- **RabbitMQ Management**: `https://rabbitmq..test/` +- **Elasticsearch/OpenSearch**: `https://elasticsearch..test/` or `https://opensearch..test/` + +### Admin Credentials +Check the `admin-credentials.txt` file in your project root for: +- Admin username and password +- 2FA setup URL +- QR code URL for mobile authenticator apps +- Backup codes for emergency access + +### 2FA Setup +1. Open the QR code URL in your browser +2. Scan with authenticator app (Google Authenticator, Authy, etc.) +3. Use generated codes to log into admin panel + +## Examples + +### Basic Project Creation +```bash +# Create with latest Magento version +roll magento2-init mystore +cd mystore +``` + +### Specific Version +```bash +# Create with Magento 2.4.7 +roll magento2-init ecommerce-site 2.4.7 +cd ecommerce-site +``` + +### Patch Version +```bash +# Create with specific patch version +roll magento2-init secure-shop 2.4.7-p3 +cd secure-shop +``` + +### Custom Location +```bash +# Create in specific directory +roll magento2-init client-project 2.4.8 ~/Sites/clients/ +cd ~/Sites/clients/client-project +``` + +### OpenSearch Project +```bash +# Create with OpenSearch (2.4.8+) +roll magento2-init modern-store 2.4.8 +cd modern-store +``` + +## Troubleshooting + +### Common Issues + +#### Composer Authentication +**Error**: `Could not authenticate package information` +**Solution**: Configure Magento Marketplace credentials: +```bash +composer global config http-basic.repo.magento.com +``` + +#### Service Connectivity +**Error**: `Database/Redis/Search engine not ready` +**Solution**: Ensure services are running: +```bash +roll env up +roll env logs +``` + +#### Permission Issues +**Error**: `Permission denied` during installation +**Solution**: Check Docker permissions and volume mounts: +```bash +roll env restart +``` + +#### Search Engine Fallback +**Warning**: `Installation used Elasticsearch fallback` +**Info**: This is normal for OpenSearch configurations that fail. The project will work with Elasticsearch. You can manually configure OpenSearch later using the provided commands. + +### Debug Commands + +Check service status: +```bash +roll env ps +roll env logs --tail 50 +``` + +Verify database connectivity: +```bash +roll db connect -e "SELECT 1;" +``` + +Check search engine health: +```bash +roll cli curl -f "http://elasticsearch:9200/_cluster/health" +roll cli curl -f "http://opensearch:9200/_cluster/health" +``` + +Test Redis connection: +```bash +roll redis ping +``` + +## Environment Management + +### Starting/Stopping +```bash +# Start environment +roll env up + +# Stop environment +roll env stop + +# Restart environment +roll env restart + +# Remove environment completely +roll env down -v +``` + +### Shell Access +```bash +# Enter project shell +roll shell + +# Run single command +roll cli bin/magento cache:flush +``` + +### Database Operations +```bash +# Connect to database +roll db connect + +# Import database dump +pv dump.sql.gz | gunzip -c | roll db import + +# Export database +roll db export > backup.sql +``` + +## Performance Tips + +### Development Mode +The installation automatically sets developer mode for optimal development: +- Disables block and page cache +- Enables file-based generation +- Shows detailed error messages + +### Production Simulation +To test production-like performance: +```bash +roll shell +bin/magento deploy:mode:set production +bin/magento static:content:deploy +bin/magento indexer:reindex +``` + +### Cache Management +```bash +# Flush all caches +roll cli bin/magento cache:flush + +# Enable/disable specific caches +roll cli bin/magento cache:enable block_html +roll cli bin/magento cache:disable full_page +``` + +## Advanced Configuration + +### Custom Environment Variables +Modify `.env.roll` after installation for custom configurations: + +```bash +# Enable additional services +ROLL_BLACKFIRE=1 +ROLL_MAGEPACK=1 +ROLL_SELENIUM=1 + +# Adjust service versions +PHP_VERSION=8.4 +ELASTICSEARCH_VERSION=8.0 +``` + +### Multi-Store Setup +Configure additional domains after installation: +```bash +roll sign-certificate store2.test +# Configure stores in Magento admin +``` + +### Custom SSL Certificates +```bash +# Sign additional certificates +roll sign-certificate api.myproject.test +roll sign-certificate admin.myproject.test +``` + +--- + +*For more information about RollDev environments and customization, see the [Environment Types](environments/types.md) and [Customization](environments/customizing.md) documentation.* \ No newline at end of file diff --git a/docs/usage.md b/docs/usage.md index 7084c84..84edd6b 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -2,6 +2,12 @@ ## Common Commands +### Project Initialization + +Create a new Magento 2 project (automated setup): + + roll magento2-init myproject 2.4.7 + Launch a shell session within the project environment's `php-fpm` container: roll shell From 6cdd27925772fca053574ffca3088172bd234f4c Mon Sep 17 00:00:00 2001 From: github-actions Date: Wed, 11 Jun 2025 08:12:01 +0000 Subject: [PATCH 36/69] Tagged 3.2 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 9e11b32..a3ec5a4 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.3.1 +3.2 From 76299c2296b25f0da7d6720114f0045f235ad7d0 Mon Sep 17 00:00:00 2001 From: github-actions Date: Wed, 11 Jun 2025 08:15:37 +0000 Subject: [PATCH 37/69] Tagged 0.3.2 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index a3ec5a4..d15723f 100644 --- a/version +++ b/version @@ -1 +1 @@ -3.2 +0.3.2 From e74fae086770516248332087e1391984a5529016 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Mon, 16 Jun 2025 13:40:15 +0200 Subject: [PATCH 38/69] add RedisInsight support to roll-docker-stack with configuration, environment updates, and service definition --- commands/env.cmd | 3 ++ commands/magento2-init.cmd | 36 +++++++-------------- environments/includes/redisinsight.base.yml | 20 ++++++++++++ environments/laravel/init.env | 1 + environments/magento1/init.env | 1 + environments/magento2/init.env | 1 + environments/shopware/init.env | 1 + environments/symfony/init.env | 1 + environments/typo3/init.env | 1 + utils/config.sh | 1 + 10 files changed, 41 insertions(+), 25 deletions(-) create mode 100644 environments/includes/redisinsight.base.yml diff --git a/commands/env.cmd b/commands/env.cmd index 30e063c..e59ee39 100644 --- a/commands/env.cmd +++ b/commands/env.cmd @@ -150,6 +150,9 @@ fi [[ ${ROLL_REDIS} -eq 1 ]] \ && appendEnvPartialIfExists "redis" +[[ ${ROLL_REDISINSIGHT} -eq 1 ]] \ + && appendEnvPartialIfExists "redisinsight" + [[ ${ROLL_DRAGONFLY} -eq 1 ]] \ && appendEnvPartialIfExists "dragonfly" diff --git a/commands/magento2-init.cmd b/commands/magento2-init.cmd index 41f80a0..a1b352a 100755 --- a/commands/magento2-init.cmd +++ b/commands/magento2-init.cmd @@ -89,7 +89,7 @@ get_software_versions() { PHP_VERSION="8.4" DB_DISTRIBUTION_VERSION="11.4" ELASTICSEARCH_VERSION="2.19" # OpenSearch - REDIS_VERSION="8" # Valkey + REDIS_VERSION="8.0" RABBITMQ_VERSION="4.1" VARNISH_VERSION="7.7" COMPOSER_VERSION="2" @@ -98,7 +98,7 @@ get_software_versions() { PHP_VERSION="8.3" DB_DISTRIBUTION_VERSION="11.4" ELASTICSEARCH_VERSION="2.19" # OpenSearch - REDIS_VERSION="8" # Valkey + REDIS_VERSION="8.0" RABBITMQ_VERSION="4.1" VARNISH_VERSION="7.7" COMPOSER_VERSION="2" @@ -216,16 +216,10 @@ if [[ "${ELASTICSEARCH_VERSION}" == "2."* ]]; then OPENSEARCH_VERSION="${ELASTICSEARCH_VERSION}" sed -i.bak "s/^ROLL_ELASTICSEARCH=.*/ROLL_ELASTICSEARCH=0/" "${ENV_FILE}" sed -i.bak "s/^ROLL_OPENSEARCH=.*/ROLL_OPENSEARCH=1/" "${ENV_FILE}" - # Set OpenSearch version - if ! grep -q "^OPENSEARCH_VERSION=" "${ENV_FILE}"; then - echo "OPENSEARCH_VERSION=${OPENSEARCH_VERSION}" >> "${ENV_FILE}" - else - sed -i.bak "s/^OPENSEARCH_VERSION=.*/OPENSEARCH_VERSION=${OPENSEARCH_VERSION}/" "${ENV_FILE}" - fi - # Keep Elasticsearch version for compatibility, but disabled - sed -i.bak "s/^ELASTICSEARCH_VERSION=.*/ELASTICSEARCH_VERSION=7.17/" "${ENV_FILE}" + # Replace ELASTICSEARCH_VERSION with OPENSEARCH_VERSION + sed -i.bak "s/^ELASTICSEARCH_VERSION=.*/OPENSEARCH_VERSION=${OPENSEARCH_VERSION}/" "${ENV_FILE}" # Set actual search engine version for configuration - ELASTICSEARCH_VERSION="7.17" # Fallback version + ELASTICSEARCH_VERSION="7.17" # Fallback version for installation compatibility echo -e " OpenSearch: ${OPENSEARCH_VERSION} (primary)" echo -e " Elasticsearch: ${ELASTICSEARCH_VERSION} (fallback)" else @@ -233,28 +227,20 @@ else sed -i.bak "s/^ROLL_ELASTICSEARCH=.*/ROLL_ELASTICSEARCH=1/" "${ENV_FILE}" sed -i.bak "s/^ROLL_OPENSEARCH=.*/ROLL_OPENSEARCH=0/" "${ENV_FILE}" sed -i.bak "s/^ELASTICSEARCH_VERSION=.*/ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION}/" "${ENV_FILE}" - # Ensure OpenSearch is disabled + # Ensure OpenSearch version is not present if grep -q "^OPENSEARCH_VERSION=" "${ENV_FILE}"; then sed -i.bak "/^OPENSEARCH_VERSION=/d" "${ENV_FILE}" fi fi -# Handle Redis vs Valkey (Valkey for version 8+) -if [[ "${REDIS_VERSION}" == "8" ]]; then - # Use Valkey (Redis fork) for version 8 - sed -i.bak "s/^ROLL_REDIS=.*/ROLL_REDIS=0/" "${ENV_FILE}" - sed -i.bak "s/^ROLL_DRAGONFLY=.*/ROLL_DRAGONFLY=1/" "${ENV_FILE}" - # Add Dragonfly/Valkey version if not present - if ! grep -q "^DRAGONFLY_VERSION=" "${ENV_FILE}"; then - echo "DRAGONFLY_VERSION=${REDIS_VERSION}" >> "${ENV_FILE}" - else - sed -i.bak "s/^DRAGONFLY_VERSION=.*/DRAGONFLY_VERSION=${REDIS_VERSION}/" "${ENV_FILE}" - fi -else - # Use Redis for traditional versions +# Handle Redis configuration (always use Redis for Magento 2) +# Magento 2 works best with traditional Redis, so we always use Redis regardless of version sed -i.bak "s/^ROLL_REDIS=.*/ROLL_REDIS=1/" "${ENV_FILE}" sed -i.bak "s/^ROLL_DRAGONFLY=.*/ROLL_DRAGONFLY=0/" "${ENV_FILE}" sed -i.bak "s/^REDIS_VERSION=.*/REDIS_VERSION=${REDIS_VERSION}/" "${ENV_FILE}" +# Ensure Dragonfly version is not present +if grep -q "^DRAGONFLY_VERSION=" "${ENV_FILE}"; then + sed -i.bak "/^DRAGONFLY_VERSION=/d" "${ENV_FILE}" fi # Clean up backup file diff --git a/environments/includes/redisinsight.base.yml b/environments/includes/redisinsight.base.yml new file mode 100644 index 0000000..31f9e01 --- /dev/null +++ b/environments/includes/redisinsight.base.yml @@ -0,0 +1,20 @@ +services: + redisinsight: + hostname: "${ROLL_ENV_NAME}-redisinsight" + image: redis/redisinsight:latest + labels: + - traefik.enable=true + - traefik.http.routers.${ROLL_ENV_NAME}-redisinsight.tls=true + - traefik.http.routers.${ROLL_ENV_NAME}-redisinsight.rule=Host(`redisinsight.${TRAEFIK_DOMAIN}`) + - traefik.http.services.${ROLL_ENV_NAME}-redisinsight.loadbalancer.server.port=5540 + - traefik.docker.network=${ROLL_ENV_NAME}_default + environment: + - RI_REDIS_HOST=${ROLL_ENV_NAME}-redis + - RI_REDIS_PORT=6379 + - RI_REDIS_ALIAS=${ROLL_ENV_NAME} Redis + - RI_DATABASE_MANAGEMENT=false + volumes: + - redisinsight:/data + +volumes: + redisinsight: \ No newline at end of file diff --git a/environments/laravel/init.env b/environments/laravel/init.env index 3a10cc8..1f8ed51 100644 --- a/environments/laravel/init.env +++ b/environments/laravel/init.env @@ -10,6 +10,7 @@ MONGODB_VERSION=7 ROLL_DB=1 ROLL_REDIS=1 +ROLL_REDISINSIGHT=1 ROLL_MONGODB=0 ## Laravel Config diff --git a/environments/magento1/init.env b/environments/magento1/init.env index e45f38b..6583e92 100644 --- a/environments/magento1/init.env +++ b/environments/magento1/init.env @@ -1,6 +1,7 @@ ROLL_DB=1 ROLL_REDIS=1 +ROLL_REDISINSIGHT=1 ROLL_DRAGONFLY=0 DB_DISTRIBUTION=mariadb diff --git a/environments/magento2/init.env b/environments/magento2/init.env index 66edd3b..3e1b135 100644 --- a/environments/magento2/init.env +++ b/environments/magento2/init.env @@ -6,6 +6,7 @@ ROLL_ELASTICVUE=0 ROLL_VARNISH=1 ROLL_RABBITMQ=1 ROLL_REDIS=1 +ROLL_REDISINSIGHT=1 ROLL_DRAGONFLY=0 DB_DISTRIBUTION=mariadb diff --git a/environments/shopware/init.env b/environments/shopware/init.env index c5fb5ac..de898f7 100644 --- a/environments/shopware/init.env +++ b/environments/shopware/init.env @@ -1,6 +1,7 @@ ROLL_DB=1 ROLL_REDIS=1 +ROLL_REDISINSIGHT=1 ROLL_DRAGONFLY=0 ROLL_RABBITMQ=0 ROLL_ELASTICSEARCH=0 diff --git a/environments/symfony/init.env b/environments/symfony/init.env index c5fb5ac..de898f7 100644 --- a/environments/symfony/init.env +++ b/environments/symfony/init.env @@ -1,6 +1,7 @@ ROLL_DB=1 ROLL_REDIS=1 +ROLL_REDISINSIGHT=1 ROLL_DRAGONFLY=0 ROLL_RABBITMQ=0 ROLL_ELASTICSEARCH=0 diff --git a/environments/typo3/init.env b/environments/typo3/init.env index c15f747..ff048b3 100644 --- a/environments/typo3/init.env +++ b/environments/typo3/init.env @@ -1,6 +1,7 @@ ROLL_DB=1 ROLL_REDIS=1 +ROLL_REDISINSIGHT=1 ROLL_DRAGONFLY=0 DB_DISTRIBUTION=mariadb diff --git a/utils/config.sh b/utils/config.sh index d485b01..a7d0230 100644 --- a/utils/config.sh +++ b/utils/config.sh @@ -75,6 +75,7 @@ function initConfigSchema() { ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ELASTICSEARCH); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_OPENSEARCH); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ELASTICVUE); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_REDISINSIGHT); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_RABBITMQ); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_MONGODB); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_BROWSERSYNC); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") From 7107268fadf1544ac5f5185169e10c22623c0556 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Mon, 16 Jun 2025 13:54:06 +0200 Subject: [PATCH 39/69] add comprehensive README to roll-docker-stack with installation steps, feature overview, supported environments, and contribution guide --- README.md | 105 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 104 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c941a36..b834627 100644 --- a/README.md +++ b/README.md @@ -1 +1,104 @@ -# roll-docker-stack +# Roll Docker Stack + +🚀 **A powerful, flexible Docker development environment for modern web applications** + +Roll Docker Stack provides pre-configured Docker environments for various frameworks and CMS platforms, making it easy to spin up consistent development environments with all the tools you need. + +## 🌟 Features + +- **Multi-Framework Support**: Magento 2, Laravel, Symfony, TYPO3, Shopware, WordPress, and more +- **Service Integration**: PHP-FPM, Nginx, MySQL/MariaDB, Redis, Elasticsearch, RabbitMQ, Varnish +- **Developer Tools**: Xdebug, MailPit (Better Mailhog Alternative), Redis Insight, ElasticVue, and more +- **Cross-Platform**: macOS, Linux, and Windows (WSL2) support +- **Local Development**: Optimized for local development environments +- **Easy Configuration**: Environment-specific settings with sensible defaults + +## 🚀 Installation + +### Installing via Homebrew (Recommended) + +RollDev may be installed via Homebrew on both macOS and Linux hosts: + +```bash +brew install dockergiant/roll/roll +roll svc up +``` + +**Updating via Homebrew:** +```bash +brew upgrade dockergiant/roll/roll +roll svc restart +``` + + +### Windows Installation (via WSL2) + +1. Install and enable WSL2 in Windows 10 +2. Install Ubuntu 20.04 or other compatible Linux version from the Windows store +3. Launch Docker for Windows, ensure WSL2 integration is enabled +4. Launch WSL from your terminal: + +```bash +wsl +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" +brew install dockergiant/roll/roll +roll svc up +``` + +> **⚠️ Performance Warning**: For optimal performance, code should be located in the WSL Linux home path (`~/code/projectname`) NOT the default `/mnt/c` path mapping. + +> **💡 GUI Tools**: Windows GUI tools should use network paths: `\\wsl$\Ubuntu-20.04\home\\` + +## ⚙️ Next Steps + +### Automatic DNS Resolution + +- **Linux**: Configure DNS to resolve `*.test` to `127.0.0.1` or use `/etc/hosts` entries +- **macOS**: Automatic via BSD per-TLD resolver at `/etc/resolver/test` +- **Windows**: Manual configuration of network adapter DNS server required + +### Trusted CA Root Certificate + +RollDev uses a CA root certificate for trusted SSL certificates. The CA root is located at `~/.roll/ssl/rootca/certs/ca.cert.pem`. + +- **macOS**: Automatically added to Keychain (search for 'RollDev Proxy Local CA') +- **Linux**: Added to system trust bundle automatically +- **Firefox**: Import CA manually via Preferences → Privacy & Security → View Certificates → Authorities → Import +- **Chrome (Linux)**: Import CA via Settings → Privacy And Security → Manage Certificates → Authorities → Import + +## 📚 Full Documentation + +For complete installation instructions, configuration options, troubleshooting, and advanced usage, visit our comprehensive documentation: + +**👉 [dockergiant.github.io/rolldev](https://dockergiant.github.io/rolldev)** + +## 🛠️ Supported Environments + +- **Magento 2** - Complete e-commerce development stack +- **Magento 1** - Legacy Magento support +- **Laravel** - Modern PHP framework environment +- **Symfony** - Professional PHP development +- **TYPO3** - Enterprise CMS platform +- **Shopware** - E-commerce platform +- **WordPress** - Popular CMS environment +- **Akeneo** - PIM platform support +- **PHP** - Generic PHP development environment + +## 🤝 Contributing + +We welcome contributions! Please see our [contribution guidelines](https://dockergiant.github.io/rolldev/contributing/) for details. + +## 📄 License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## 🔗 Links + +- **Documentation**: [dockergiant.github.io/rolldev](https://dockergiant.github.io/rolldev) +- **CLI Repository**: [github.com/dockergiant/rolldev](https://github.com/dockergiant/rolldev) +- **Issues**: [github.com/dockergiant/rolldev/issues](https://github.com/dockergiant/rolldev/issues) +- **Container Packages**: [github.com/orgs/dockergiant/packages](https://github.com/orgs/dockergiant/packages?repo_name=rolldev) + +--- + +**Made with ❤️ by the Docker Giant & Disrex team** From 1b8e57d4257748e084d7c2d9a3a13a9b4ffb4dc6 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Mon, 16 Jun 2025 13:54:06 +0200 Subject: [PATCH 40/69] add comprehensive README to roll-docker-stack with installation steps, feature overview, supported environments, and contribution guide --- README.md | 105 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 104 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c941a36..b834627 100644 --- a/README.md +++ b/README.md @@ -1 +1,104 @@ -# roll-docker-stack +# Roll Docker Stack + +🚀 **A powerful, flexible Docker development environment for modern web applications** + +Roll Docker Stack provides pre-configured Docker environments for various frameworks and CMS platforms, making it easy to spin up consistent development environments with all the tools you need. + +## 🌟 Features + +- **Multi-Framework Support**: Magento 2, Laravel, Symfony, TYPO3, Shopware, WordPress, and more +- **Service Integration**: PHP-FPM, Nginx, MySQL/MariaDB, Redis, Elasticsearch, RabbitMQ, Varnish +- **Developer Tools**: Xdebug, MailPit (Better Mailhog Alternative), Redis Insight, ElasticVue, and more +- **Cross-Platform**: macOS, Linux, and Windows (WSL2) support +- **Local Development**: Optimized for local development environments +- **Easy Configuration**: Environment-specific settings with sensible defaults + +## 🚀 Installation + +### Installing via Homebrew (Recommended) + +RollDev may be installed via Homebrew on both macOS and Linux hosts: + +```bash +brew install dockergiant/roll/roll +roll svc up +``` + +**Updating via Homebrew:** +```bash +brew upgrade dockergiant/roll/roll +roll svc restart +``` + + +### Windows Installation (via WSL2) + +1. Install and enable WSL2 in Windows 10 +2. Install Ubuntu 20.04 or other compatible Linux version from the Windows store +3. Launch Docker for Windows, ensure WSL2 integration is enabled +4. Launch WSL from your terminal: + +```bash +wsl +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" +brew install dockergiant/roll/roll +roll svc up +``` + +> **⚠️ Performance Warning**: For optimal performance, code should be located in the WSL Linux home path (`~/code/projectname`) NOT the default `/mnt/c` path mapping. + +> **💡 GUI Tools**: Windows GUI tools should use network paths: `\\wsl$\Ubuntu-20.04\home\\` + +## ⚙️ Next Steps + +### Automatic DNS Resolution + +- **Linux**: Configure DNS to resolve `*.test` to `127.0.0.1` or use `/etc/hosts` entries +- **macOS**: Automatic via BSD per-TLD resolver at `/etc/resolver/test` +- **Windows**: Manual configuration of network adapter DNS server required + +### Trusted CA Root Certificate + +RollDev uses a CA root certificate for trusted SSL certificates. The CA root is located at `~/.roll/ssl/rootca/certs/ca.cert.pem`. + +- **macOS**: Automatically added to Keychain (search for 'RollDev Proxy Local CA') +- **Linux**: Added to system trust bundle automatically +- **Firefox**: Import CA manually via Preferences → Privacy & Security → View Certificates → Authorities → Import +- **Chrome (Linux)**: Import CA via Settings → Privacy And Security → Manage Certificates → Authorities → Import + +## 📚 Full Documentation + +For complete installation instructions, configuration options, troubleshooting, and advanced usage, visit our comprehensive documentation: + +**👉 [dockergiant.github.io/rolldev](https://dockergiant.github.io/rolldev)** + +## 🛠️ Supported Environments + +- **Magento 2** - Complete e-commerce development stack +- **Magento 1** - Legacy Magento support +- **Laravel** - Modern PHP framework environment +- **Symfony** - Professional PHP development +- **TYPO3** - Enterprise CMS platform +- **Shopware** - E-commerce platform +- **WordPress** - Popular CMS environment +- **Akeneo** - PIM platform support +- **PHP** - Generic PHP development environment + +## 🤝 Contributing + +We welcome contributions! Please see our [contribution guidelines](https://dockergiant.github.io/rolldev/contributing/) for details. + +## 📄 License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## 🔗 Links + +- **Documentation**: [dockergiant.github.io/rolldev](https://dockergiant.github.io/rolldev) +- **CLI Repository**: [github.com/dockergiant/rolldev](https://github.com/dockergiant/rolldev) +- **Issues**: [github.com/dockergiant/rolldev/issues](https://github.com/dockergiant/rolldev/issues) +- **Container Packages**: [github.com/orgs/dockergiant/packages](https://github.com/orgs/dockergiant/packages?repo_name=rolldev) + +--- + +**Made with ❤️ by the Docker Giant & Disrex team** From 94a8e2b94a7df7decb0ff908d7260ba8de8b620b Mon Sep 17 00:00:00 2001 From: github-actions Date: Mon, 16 Jun 2025 11:58:40 +0000 Subject: [PATCH 41/69] Tagged 0.4.0 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index d15723f..1d0ba9e 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.3.2 +0.4.0 From 3ad13c65f7d509809c4e38e3cc7275012a958006 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Thu, 19 Jun 2025 14:02:46 +0200 Subject: [PATCH 42/69] fix: resolve Magento 2.4.8+ 2FA configuration bug in magento2-init Implements workaround for Adobe Commerce core issue #39836 where using --lock-env with twofactorauth/general/force_providers causes DuoSecurity provider to fail due to array vs string storage mismatch in env.php. --- commands/magento2-init.cmd | 180 ++++++++++++++++++++++++++++--------- 1 file changed, 140 insertions(+), 40 deletions(-) diff --git a/commands/magento2-init.cmd b/commands/magento2-init.cmd index a1b352a..ca0b78b 100755 --- a/commands/magento2-init.cmd +++ b/commands/magento2-init.cmd @@ -594,46 +594,87 @@ echo -e "\033[36m[11/12] Running initial indexing...\033[0m" echo -e "\033[36m[11/12] Creating admin user and configuring 2FA...\033[0m" -# Generate admin user and 2FA setup for Magento 2.4.6+ (all supported versions require 2FA) -"${ROLL_DIR}/bin/roll" cli bash -c " - set -e - - # Generate admin credentials - ADMIN_PASS=\"\$(pwgen -n1 16)\" - ADMIN_USER=admin - - echo 'Creating admin user...' - bin/magento admin:user:create \\ - --admin-password=\"\${ADMIN_PASS}\" \\ - --admin-user=\"\${ADMIN_USER}\" \\ - --admin-firstname=\"Local\" \\ - --admin-lastname=\"Admin\" \\ - --admin-email=\"\${ADMIN_USER}@example.com\" - - echo \"Admin Username: \${ADMIN_USER}\" - echo \"Admin Password: \${ADMIN_PASS}\" - - # Configure 2FA - echo 'Configuring 2FA...' - TFA_SECRET=\$(python3 -c \"import base64; print(base64.b32encode('\$(pwgen -A1 128)'.encode()).decode().strip('='))\") - OTPAUTH_URL=\$(printf \"otpauth://totp/%s%%3Alocaladmin%%40example.com?issuer=%s&secret=%s\" \\ - \"app.${PROJECT_NAME}.test\" \"app.${PROJECT_NAME}.test\" \"\${TFA_SECRET}\" - ) - - bin/magento config:set --lock-env twofactorauth/general/force_providers google - bin/magento security:tfa:google:set-secret \"\${ADMIN_USER}\" \"\${TFA_SECRET}\" +# Function to check if version is 2.4.8 or higher +is_magento_248_or_higher() { + local version="$1" + local base_version - echo \"2FA Setup URL: \${OTPAUTH_URL}\" - echo \"2FA Backup Codes:\" - oathtool -s 30 -w 10 --totp --base32 \"\${TFA_SECRET}\" + # Extract base version (remove patch info) + if [[ "${version}" =~ ^([0-9]+\.[0-9]+\.[0-9x]+) ]]; then + base_version="${BASH_REMATCH[1]}" + else + base_version="${version}" + fi - # Generate QR code - segno \"\${OTPAUTH_URL}\" -s 4 -o \"pub/media/\${ADMIN_USER}-totp-qr.png\" - QR_URL=\"https://app.${PROJECT_NAME}.test/media/\${ADMIN_USER}-totp-qr.png?t=\$(date +%s)\" - echo \"QR Code URL: \${QR_URL}\" + # Check if version is 2.4.8+ or 2.4.x (which defaults to latest) + case "${base_version}" in + "2.4.x"|"2.4.9"*|"2.4.8"*) + return 0 # true + ;; + *) + return 1 # false + ;; + esac +} + +# Generate admin user and 2FA setup for Magento 2.4.6+ (all supported versions require 2FA) +if is_magento_248_or_higher "${MAGENTO_VERSION}"; then + echo -e "\033[33m🔧 Detected Magento 2.4.8+ - Using workaround for 2FA configuration issue\033[0m" + echo -e "\033[33m (Adobe Commerce core issue #39836 - DuoSecurity provider array handling)\033[0m" - # Save credentials to file for user reference - cat > /var/www/html/admin-credentials.txt << EOL + # Magento 2.4.8+ workaround for 2FA configuration bug + "${ROLL_DIR}/bin/roll" cli bash -c " + set -e + + # Generate admin credentials + ADMIN_PASS=\"\$(pwgen -n1 16)\" + ADMIN_USER=admin + + echo 'Creating admin user...' + bin/magento admin:user:create \\ + --admin-password=\"\${ADMIN_PASS}\" \\ + --admin-user=\"\${ADMIN_USER}\" \\ + --admin-firstname=\"Local\" \\ + --admin-lastname=\"Admin\" \\ + --admin-email=\"\${ADMIN_USER}@example.com\" + + echo \"Admin Username: \${ADMIN_USER}\" + echo \"Admin Password: \${ADMIN_PASS}\" + + # Configure 2FA - using workaround for 2.4.8+ core bug + echo 'Configuring 2FA (using 2.4.8+ workaround)...' + TFA_SECRET=\$(python3 -c \"import base64; print(base64.b32encode('\$(pwgen -A1 128)'.encode()).decode().strip('='))\") + OTPAUTH_URL=\$(printf \"otpauth://totp/%s%%3Alocaladmin%%40example.com?issuer=%s&secret=%s\" \\ + \"app.${PROJECT_NAME}.test\" \"app.${PROJECT_NAME}.test\" \"\${TFA_SECRET}\" + ) + + # Step 1: Set 2FA provider without --lock-env to avoid array storage bug + echo 'Setting 2FA provider (step 1/4)...' + bin/magento config:set twofactorauth/general/force_providers google + + # Step 2: Run DI compile to ensure TFA commands are available + echo 'Compiling DI container (step 2/4)...' + bin/magento setup:di:compile --quiet + + # Step 3: Set the TFA secret + echo 'Setting 2FA secret (step 3/4)...' + bin/magento security:tfa:google:set-secret \"\${ADMIN_USER}\" \"\${TFA_SECRET}\" + + # Step 4: Run setup:upgrade to ensure all configs are applied + echo 'Running setup upgrade (step 4/4)...' + bin/magento setup:upgrade --keep-generated + + echo \"2FA Setup URL: \${OTPAUTH_URL}\" + echo \"2FA Backup Codes:\" + oathtool -s 30 -w 10 --totp --base32 \"\${TFA_SECRET}\" + + # Generate QR code + segno \"\${OTPAUTH_URL}\" -s 4 -o \"pub/media/\${ADMIN_USER}-totp-qr.png\" + QR_URL=\"https://app.${PROJECT_NAME}.test/media/\${ADMIN_USER}-totp-qr.png?t=\$(date +%s)\" + echo \"QR Code URL: \${QR_URL}\" + + # Save credentials to file for user reference + cat > /var/www/html/admin-credentials.txt << EOL Magento Admin Credentials ======================== Username: \${ADMIN_USER} @@ -645,10 +686,69 @@ Admin Panel: https://app.${PROJECT_NAME}.test/shopmanager/ Frontend: https://app.${PROJECT_NAME}.test/ Generated on: \$(date) + +Note: This installation used the 2.4.8+ workaround for Adobe Commerce core issue #39836 EOL - - echo 'Admin credentials saved to admin-credentials.txt' -" + + echo 'Admin credentials saved to admin-credentials.txt' + " +else + # Standard 2FA setup for Magento 2.4.6-2.4.7 + "${ROLL_DIR}/bin/roll" cli bash -c " + set -e + + # Generate admin credentials + ADMIN_PASS=\"\$(pwgen -n1 16)\" + ADMIN_USER=admin + + echo 'Creating admin user...' + bin/magento admin:user:create \\ + --admin-password=\"\${ADMIN_PASS}\" \\ + --admin-user=\"\${ADMIN_USER}\" \\ + --admin-firstname=\"Local\" \\ + --admin-lastname=\"Admin\" \\ + --admin-email=\"\${ADMIN_USER}@example.com\" + + echo \"Admin Username: \${ADMIN_USER}\" + echo \"Admin Password: \${ADMIN_PASS}\" + + # Configure 2FA - standard method for 2.4.6-2.4.7 + echo 'Configuring 2FA...' + TFA_SECRET=\$(python3 -c \"import base64; print(base64.b32encode('\$(pwgen -A1 128)'.encode()).decode().strip('='))\") + OTPAUTH_URL=\$(printf \"otpauth://totp/%s%%3Alocaladmin%%40example.com?issuer=%s&secret=%s\" \\ + \"app.${PROJECT_NAME}.test\" \"app.${PROJECT_NAME}.test\" \"\${TFA_SECRET}\" + ) + + bin/magento config:set --lock-env twofactorauth/general/force_providers google + bin/magento security:tfa:google:set-secret \"\${ADMIN_USER}\" \"\${TFA_SECRET}\" + + echo \"2FA Setup URL: \${OTPAUTH_URL}\" + echo \"2FA Backup Codes:\" + oathtool -s 30 -w 10 --totp --base32 \"\${TFA_SECRET}\" + + # Generate QR code + segno \"\${OTPAUTH_URL}\" -s 4 -o \"pub/media/\${ADMIN_USER}-totp-qr.png\" + QR_URL=\"https://app.${PROJECT_NAME}.test/media/\${ADMIN_USER}-totp-qr.png?t=\$(date +%s)\" + echo \"QR Code URL: \${QR_URL}\" + + # Save credentials to file for user reference + cat > /var/www/html/admin-credentials.txt << EOL +Magento Admin Credentials +======================== +Username: \${ADMIN_USER} +Password: \${ADMIN_PASS} +2FA Setup URL: \${OTPAUTH_URL} +QR Code URL: \${QR_URL} + +Admin Panel: https://app.${PROJECT_NAME}.test/shopmanager/ +Frontend: https://app.${PROJECT_NAME}.test/ + +Generated on: \$(date) +EOL + + echo 'Admin credentials saved to admin-credentials.txt' + " +fi echo -e "\033[36m[12/12] Finalizing setup...\033[0m" From 1a1f74622fc7c90541c61be21b076aac7c4a1822 Mon Sep 17 00:00:00 2001 From: github-actions Date: Thu, 19 Jun 2025 12:03:58 +0000 Subject: [PATCH 43/69] Tagged 0.4.1 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 1d0ba9e..267577d 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.4.0 +0.4.1 From a92428c157e918addc380ca69763e348f4a9018f Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Sun, 22 Jun 2025 15:18:32 +0200 Subject: [PATCH 44/69] update: switch elasticvue to official image and adjust configuration --- environments/includes/elasticvue.base.yml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/environments/includes/elasticvue.base.yml b/environments/includes/elasticvue.base.yml index 331d179..869622c 100644 --- a/environments/includes/elasticvue.base.yml +++ b/environments/includes/elasticvue.base.yml @@ -1,16 +1,12 @@ services: elasticvue: + image: cars10/elasticvue hostname: "${ROLL_ENV_NAME}-elasticvue" - image: rollupdev/elasticvue:latest labels: - traefik.enable=true - - traefik.http.routers.${ROLL_ENV_NAME}-elasticvue.tls=true - traefik.http.routers.${ROLL_ENV_NAME}-elasticvue.rule=Host(`elasticvue.${TRAEFIK_DOMAIN}`) + - traefik.http.routers.${ROLL_ENV_NAME}-elasticvue.tls=true - traefik.http.services.${ROLL_ENV_NAME}-elasticvue.loadbalancer.server.port=8080 - traefik.docker.network=${ROLL_ENV_NAME}_default environment: - - DEFAULT_NAME=${ROLL_ENV_NAME} - - DEFAULT_HOST=https://elasticsearch.${TRAEFIK_DOMAIN} - -volumes: - esdata: + - ELASTICVUE_CLUSTERS=[{"name":"${ROLL_ENV_NAME}","uri":"https://elasticsearch.${TRAEFIK_DOMAIN}"}] From ac4c7eefc9928b09009c2291af9dcb352fd30fac Mon Sep 17 00:00:00 2001 From: github-actions Date: Sun, 22 Jun 2025 13:22:00 +0000 Subject: [PATCH 45/69] Tagged 0.4.2 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 267577d..2b7c5ae 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.4.1 +0.4.2 From bb559f1c9fb7cf8d628637825dc8fbcd2cfcc665 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 19 Aug 2025 10:14:25 +0200 Subject: [PATCH 46/69] Update README --- README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b834627..ddd38a9 100644 --- a/README.md +++ b/README.md @@ -101,4 +101,14 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file --- -**Made with ❤️ by the Docker Giant & Disrex team** +Disrex T-Rex Mascot Waving + +## Sponsored by + + + + Disrex Logo + + + +**Made with ❤️** From 5ec5a3d2d9c7e28865181f14b2ec60227f9b706d Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Tue, 19 Aug 2025 10:15:24 +0200 Subject: [PATCH 47/69] fix: handle empty projectNetworkList in status.cmd to avoid errors --- commands/status.cmd | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/commands/status.cmd b/commands/status.cmd index 65de9fd..ad420b1 100644 --- a/commands/status.cmd +++ b/commands/status.cmd @@ -23,8 +23,12 @@ fi IFS="$OLDIFS" messageList=() -lastIdx=$(( ${#projectNetworkList[@]} - 1 )) -lastNetwork="${projectNetworkList[$lastIdx]}" +if (( ${#projectNetworkList[@]} > 0 )); then + lastIdx=$(( ${#projectNetworkList[@]} - 1 )) + lastNetwork="${projectNetworkList[$lastIdx]}" +else + lastNetwork="" +fi for projectNetwork in "${projectNetworkList[@]}"; do [[ -z "${projectNetwork}" || "${projectNetwork}" == "${rollNetworkName}" ]] && continue # Skip empty project network names (if any) From d39b4b08125958684e8e9505e85cc6cc7b786951 Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 19 Aug 2025 08:16:17 +0000 Subject: [PATCH 48/69] Tagged 0.4.3 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 2b7c5ae..17b2ccd 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.4.2 +0.4.3 From b88b1acc472335067a736f787b78be5c8acb0ace Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Thu, 11 Sep 2025 16:38:15 +0200 Subject: [PATCH 49/69] add: New Relic monitoring configuration to all environment init files --- environments/akeneo/init.env | 6 ++++++ environments/laravel/init.env | 6 ++++++ environments/magento1/init.env | 7 ++++++- environments/magento2/init.env | 8 +++++++- environments/php/init.env | 6 ++++++ environments/shopware/init.env | 6 ++++++ environments/symfony/init.env | 6 ++++++ environments/typo3/init.env | 7 ++++++- environments/wordpress/init.env | 6 ++++++ 9 files changed, 55 insertions(+), 3 deletions(-) diff --git a/environments/akeneo/init.env b/environments/akeneo/init.env index e6056ab..3422488 100644 --- a/environments/akeneo/init.env +++ b/environments/akeneo/init.env @@ -9,3 +9,9 @@ COMPOSER_VERSION=2 PHP_VERSION=7.4 PHP_XDEBUG_3=1 + +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 diff --git a/environments/laravel/init.env b/environments/laravel/init.env index 1f8ed51..6e6dc30 100644 --- a/environments/laravel/init.env +++ b/environments/laravel/init.env @@ -34,3 +34,9 @@ REDIS_HOST=redis REDIS_PORT=6379 MAIL_DRIVER=sendmail + +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 diff --git a/environments/magento1/init.env b/environments/magento1/init.env index 6583e92..46c4ada 100644 --- a/environments/magento1/init.env +++ b/environments/magento1/init.env @@ -14,4 +14,9 @@ REDIS_VERSION=5.0 DRAGONFLY_VERSION=0.15 # Set to 1 for enable static content browser caching -ROLL_MAGENTO_STATIC_CACHING=1 \ No newline at end of file +ROLL_MAGENTO_STATIC_CACHING=1 +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 diff --git a/environments/magento2/init.env b/environments/magento2/init.env index 3e1b135..2337c8f 100644 --- a/environments/magento2/init.env +++ b/environments/magento2/init.env @@ -31,4 +31,10 @@ ROLL_MAGENTO_STATIC_CACHING=1 # Auto login prefilling fields when accessing admin url on /shopmanager or /backend # Only works when you did run the auto login setup script > roll setup-autologin (only available for Magento 2 projects) -ROLL_ADMIN_AUTOLOGIN=0 \ No newline at end of file +ROLL_ADMIN_AUTOLOGIN=0 + +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 \ No newline at end of file diff --git a/environments/php/init.env b/environments/php/init.env index 9c7e204..75d2687 100644 --- a/environments/php/init.env +++ b/environments/php/init.env @@ -10,3 +10,9 @@ PHP_XDEBUG_3=1 REDIS_VERSION=6.2 DRAGONFLY_VERSION=1.3 +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 + diff --git a/environments/shopware/init.env b/environments/shopware/init.env index de898f7..5e04d7c 100644 --- a/environments/shopware/init.env +++ b/environments/shopware/init.env @@ -18,3 +18,9 @@ REDIS_VERSION=5.0 DRAGONFLY_VERSION=1.3 VARNISH_VERSION=6.0 + +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 diff --git a/environments/symfony/init.env b/environments/symfony/init.env index de898f7..5e04d7c 100644 --- a/environments/symfony/init.env +++ b/environments/symfony/init.env @@ -18,3 +18,9 @@ REDIS_VERSION=5.0 DRAGONFLY_VERSION=1.3 VARNISH_VERSION=6.0 + +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 diff --git a/environments/typo3/init.env b/environments/typo3/init.env index ff048b3..97cac8a 100644 --- a/environments/typo3/init.env +++ b/environments/typo3/init.env @@ -11,4 +11,9 @@ COMPOSER_VERSION=2 PHP_VERSION=7.4 PHP_XDEBUG_3=1 REDIS_VERSION=5.0 -DRAGONFLY_VERSION=1.3 \ No newline at end of file +DRAGONFLY_VERSION=1.3 +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 diff --git a/environments/wordpress/init.env b/environments/wordpress/init.env index 1c1ea09..56ebdc1 100644 --- a/environments/wordpress/init.env +++ b/environments/wordpress/init.env @@ -19,3 +19,9 @@ DB_PORT=3306 DB_DATABASE=wordpress DB_USERNAME=wordpress DB_PASSWORD=wordpress + +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here + +# New Relic monitoring (set to 1 to enable) +ROLL_NEWRELIC=0 From 50c7c845f9460e64b7d95181d28559359cb40d89 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Thu, 11 Sep 2025 16:38:23 +0200 Subject: [PATCH 50/69] remove: Blackfire profiling documentation from roll-docker-stack --- docs/configuration/blackfire.md | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 docs/configuration/blackfire.md diff --git a/docs/configuration/blackfire.md b/docs/configuration/blackfire.md deleted file mode 100644 index 16eeddc..0000000 --- a/docs/configuration/blackfire.md +++ /dev/null @@ -1,22 +0,0 @@ -# Blackfire Profiling - -For information on what Blackfire is, please see the [introduction to Blackfire](https://blackfire.io/docs/introduction) in Blackfire documentation. - -Blackfire may be enabled on both `magento1` and `magento2` env types by adding the following to the project's `.env.roll` file (or exporting them to environment variables prior to starting the environment): - -``` -ROLL_BLACKFIRE=1 - -BLACKFIRE_CLIENT_ID= -BLACKFIRE_CLIENT_TOKEN= -BLACKFIRE_SERVER_ID= -BLACKFIRE_SERVER_TOKEN= -``` - -Note: You can obtain the IDs and Tokens used in the above from within your Blackfire account under Account Settings -> Credentials or from the credentials are of the environment you're pushing profile information into. - -## CLI Tool - -To use the Blackfire CLI Tool, you can run `ollblackfire [arguments]`. - -For more information on the CLI tool, please see [Profiling CLI Commands](https://blackfire.io/docs/cookbooks/profiling-cli) in Blackfire's documentation. From d82f9b691cfc264d723b1ced170d81edd7d6d0c1 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Thu, 11 Sep 2025 16:38:28 +0200 Subject: [PATCH 51/69] add: documentation for enabling New Relic monitoring with configuration examples and troubleshooting steps --- docs/configuration/newrelic.md | 159 +++++++++++++++++++++++++++++++++ docs/services.md | 3 + 2 files changed, 162 insertions(+) create mode 100644 docs/configuration/newrelic.md diff --git a/docs/configuration/newrelic.md b/docs/configuration/newrelic.md new file mode 100644 index 0000000..4221234 --- /dev/null +++ b/docs/configuration/newrelic.md @@ -0,0 +1,159 @@ +# New Relic Monitoring + +New Relic may be enabled on all environment types to provide application performance monitoring for PHP applications. + +New Relic integration provides: +- **PHP APM** - Application Performance Monitoring for PHP applications with distributed tracing +- **Conditional Loading** - Zero performance impact when disabled +- **Local Development Optimized** - Raw SQL queries and enhanced debugging for local development + +## Configuration Levels + +### Global Configuration (One-time setup) + +Set your New Relic license key globally in `~/.roll/.env`: + +``` +NEWRELIC_LICENSE_KEY=your_license_key_here +``` + +### Project Configuration + +New Relic may be enabled by adding the following to the project's `.env` file: + +``` +ROLL_NEWRELIC=1 + +# Optional: Set custom app name (defaults to "RollDev-LocalEnv-{project-name}") +NEWRELIC_APP_NAME=my-awesome-app + +# Optional: Override global license key for this project +NEWRELIC_LICENSE_KEY=project_specific_key +``` + +**Note**: All environment `init.env` files include a commented example of the license key variable with instructions about global configuration: + +``` +# New Relic license key (can be set globally in $HOME/.roll/.env) +# NEWRELIC_LICENSE_KEY=your_license_key_here +``` + +## Usage Examples + +### Enable New Relic with Global License Key + +1. Set global license key (one-time setup): + ```bash + echo "NEWRELIC_LICENSE_KEY=your_license_key" >> ~/.roll/.env + ``` + +2. Enable for specific project: + ```bash + echo "ROLL_NEWRELIC=1" >> .env + echo "NEWRELIC_APP_NAME=my-project" >> .env + ``` + +3. Start/restart containers: + ```bash + roll up + # or + roll restart + ``` + +### Disable New Relic + +```bash +# Disable for current project +echo "ROLL_NEWRELIC=0" >> .env +roll restart +``` + +## How It Works + +### PHP Agent +- **Installed but not loaded** by default in all PHP-FPM containers +- **Conditionally loaded** only when `ROLL_NEWRELIC=1` +- **Zero overhead** when disabled +- **Configuration**: Uses template at `/usr/local/etc/php/conf.d/newrelic.ini.template` +- **Auto App Naming**: Automatically generates app names using `RollDev-LocalEnv-{project-name}` pattern +- **Local Development**: Optimized with raw SQL queries, 1ms threshold, and enhanced debugging + +### Configuration Precedence +1. **Project `.env`** (highest priority) +2. **Global `$HOME/.roll/.env`** (fallback) +3. **Default values** (disabled) + +## Troubleshooting + +### Check if New Relic is Enabled + +```bash +# Check PHP configuration +roll exec php-fpm php -m | grep newrelic + +# Check environment variables +roll exec php-fpm env | grep NEWRELIC + +# Check New Relic logs +roll exec php-fpm tail -f /var/log/newrelic/php_agent.log + +# Check daemon logs +roll exec php-fpm tail -f /var/log/newrelic/newrelic-daemon.log +``` + +### Common Issues + +**"New Relic enabled but no license key"** +- Ensure `NEWRELIC_LICENSE_KEY` is set in project `.env` or global `~/.roll/.env` + +**"PHP extension not loading"** +- Verify `ROLL_NEWRELIC=1` in project `.env` +- Restart containers: `roll restart` + +**"Transaction data too large"** +- New Relic daemon shows "maximum message size exceeded" errors +- This is normal for large database queries with many segments +- Data is still captured but trace details may be limited + +## Environment Variables Reference + +| Variable | Scope | Default | Description | +|----------|-------|---------|-------------| +| `ROLL_NEWRELIC` | Project | `0` | Enable/disable New Relic (0/1) | +| `NEWRELIC_LICENSE_KEY` | Global/Project | - | New Relic license key | +| `NEWRELIC_APP_NAME` | Project | `RollDev-LocalEnv-{project-name}` | Application name in New Relic | + +## PHP Versions Supported + +New Relic PHP agent is installed using the reliable `install-php-extensions` tool: + +| PHP Version | Status | +|-------------|---------| +| 8.4+ | ✅ Supported | +| 8.3+ | ✅ Supported | +| 8.2 | ✅ Supported | +| 8.1 | ✅ Supported | +| 8.0 | ✅ Supported | +| 7.4 | ✅ Supported | +| 7.3 | ✅ Supported | +| 7.2 | ✅ Supported | +| 7.1 | ✅ Supported | +| 7.0 | ✅ Supported | +| < 7.0 | ❌ Not Supported | + +## Architecture + +``` +Project Level: +├── .env (ROLL_NEWRELIC=1, NEWRELIC_APP_NAME=app) +│ +Global Level: +├── ~/.roll/.env (NEWRELIC_LICENSE_KEY=xxx) +│ +Docker Stack: +├── PHP-FPM (conditional New Relic PHP extension) +├── PHP-Debug (conditional New Relic PHP extension) +└── Environment Templates (ROLL_NEWRELIC=0 by default) +``` + +This integration follows Roll Docker Stack patterns for optional services while providing PHP application performance monitoring capabilities optimized for local development debugging. \ No newline at end of file diff --git a/docs/services.md b/docs/services.md index 8ad857f..7b44e0d 100644 --- a/docs/services.md +++ b/docs/services.md @@ -7,6 +7,9 @@ After running `roll svc up` for the first time following installation, the follo * [https://dnsmasq.roll.test/](https://dnsmasq.roll.test/) * [https://mailhog.roll.test/](https://mailhog.roll.test/) +Additional services available include: +* New Relic Infrastructure monitoring (when enabled with `--profile newrelic`) + ## Customizable Settings When spinning up global services via `docker-compose` RollDev uses `~/.roll` as the project directory allowing a `.env.roll` placed at `~/.roll/.env.roll` to function for overriding variables in the `docker-compose` configuration used to deploy these services. From e129c6663f94e9a3f549ba9e752738bd6b001027 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Sat, 13 Sep 2025 19:08:03 +0200 Subject: [PATCH 52/69] update: refine New Relic PHP agent documentation and add environment configuration support --- docs/configuration/newrelic.md | 30 +++++++++++++------------- environments/includes/php-fpm.base.yml | 8 +++++++ utils/config.sh | 5 +++++ 3 files changed, 28 insertions(+), 15 deletions(-) diff --git a/docs/configuration/newrelic.md b/docs/configuration/newrelic.md index 4221234..a07dc05 100644 --- a/docs/configuration/newrelic.md +++ b/docs/configuration/newrelic.md @@ -125,21 +125,21 @@ roll exec php-fpm tail -f /var/log/newrelic/newrelic-daemon.log ## PHP Versions Supported -New Relic PHP agent is installed using the reliable `install-php-extensions` tool: - -| PHP Version | Status | -|-------------|---------| -| 8.4+ | ✅ Supported | -| 8.3+ | ✅ Supported | -| 8.2 | ✅ Supported | -| 8.1 | ✅ Supported | -| 8.0 | ✅ Supported | -| 7.4 | ✅ Supported | -| 7.3 | ✅ Supported | -| 7.2 | ✅ Supported | -| 7.1 | ✅ Supported | -| 7.0 | ✅ Supported | -| < 7.0 | ❌ Not Supported | +New Relic PHP agent is installed in RollDev containers: + +| PHP Version | amd64 (x86_64) | arm64 (Apple Silicon) | +|-------------|----------------|----------------------| +| 8.4+ | ✅ Supported | ✅ Supported | +| 8.3+ | ✅ Supported | ✅ Supported | +| 8.2 | ✅ Supported | ✅ Supported | +| 8.1 | ✅ Supported | ✅ Supported | +| 8.0 | ✅ Supported | ✅ Supported | +| 7.4 | ✅ Supported | ❌ Not Available | +| < 7.4 | ❌ Not Available | ❌ Not Available | + +**Note:** +- New Relic is only installed in PHP 7.4+ containers +- On arm64 architecture (Apple Silicon), New Relic PHP agent is only available for PHP 8.0 and higher ## Architecture diff --git a/environments/includes/php-fpm.base.yml b/environments/includes/php-fpm.base.yml index 09110a8..5593fcf 100644 --- a/environments/includes/php-fpm.base.yml +++ b/environments/includes/php-fpm.base.yml @@ -32,6 +32,10 @@ services: - GROUP_ID=${GROUP_ID:-33} - OSTYPE=${OSTYPE:-linux} - ADD_PHP_EXT=${ADD_PHP_EXT:-} + - ROLL_ENV_NAME=${ROLL_ENV_NAME:-} + - ROLL_NEWRELIC=${ROLL_NEWRELIC:-0} + - NEWRELIC_LICENSE_KEY=${NEWRELIC_LICENSE_KEY:-} + - NEWRELIC_APP_NAME=${NEWRELIC_APP_NAME:-} volumes: *volumes extra_hosts: *extra_hosts @@ -52,6 +56,10 @@ services: - GROUP_ID=${GROUP_ID:-33} - OSTYPE=${OSTYPE:-linux} - ADD_PHP_EXT=${ADD_PHP_EXT:-} + - ROLL_ENV_NAME=${ROLL_ENV_NAME:-} + - ROLL_NEWRELIC=${ROLL_NEWRELIC:-0} + - NEWRELIC_LICENSE_KEY=${NEWRELIC_LICENSE_KEY:-} + - NEWRELIC_APP_NAME=${NEWRELIC_APP_NAME:-} volumes: *volumes extra_hosts: *extra_hosts depends_on: diff --git a/utils/config.sh b/utils/config.sh index a7d0230..69c1087 100644 --- a/utils/config.sh +++ b/utils/config.sh @@ -136,6 +136,11 @@ function initConfigSchema() { # Extensions and customizations ROLL_CONFIG_SCHEMA_KEYS+=(ADD_PHP_EXT); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + # New Relic configuration + ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_NEWRELIC); ROLL_CONFIG_SCHEMA_VALUES+=("boolean:0") + ROLL_CONFIG_SCHEMA_KEYS+=(NEWRELIC_LICENSE_KEY); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + ROLL_CONFIG_SCHEMA_KEYS+=(NEWRELIC_APP_NAME); ROLL_CONFIG_SCHEMA_VALUES+=("string:optional") + # Container configuration ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_SHELL_CONTAINER); ROLL_CONFIG_SCHEMA_VALUES+=("string:php-fpm") ROLL_CONFIG_SCHEMA_KEYS+=(ROLL_ENV_SHELL_COMMAND); ROLL_CONFIG_SCHEMA_VALUES+=("string:bash") From 64f71682192e8e151175c62bdedb933d52a67d06 Mon Sep 17 00:00:00 2001 From: github-actions Date: Sat, 13 Sep 2025 17:10:17 +0000 Subject: [PATCH 53/69] Tagged 0.5.0 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 17b2ccd..8f0916f 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.4.3 +0.5.0 From b3735143539880b3d809e65c39def7757cd59974 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 10 Dec 2025 14:40:31 +0100 Subject: [PATCH 54/69] add: multistore command for managing Magento multi-store configurations with automated config generation and SSL support --- bin/roll | 2 +- commands/multistore.cmd | 232 +++++++++++++++++++++++++++++++++++++++ commands/multistore.help | 55 ++++++++++ 3 files changed, 288 insertions(+), 1 deletion(-) create mode 100755 commands/multistore.cmd create mode 100644 commands/multistore.help diff --git a/bin/roll b/bin/roll index a124320..a18422f 100755 --- a/bin/roll +++ b/bin/roll @@ -40,7 +40,7 @@ declare ROLL_PARAMS=() declare ROLL_CMD_VERB= declare ROLL_CMD_EXEC= declare ROLL_CMD_HELP= -declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore restore-full duplicate magento2-init) +declare ROLL_CMD_ANYARGS=(svc env db redis sync shell debug rootnotty rootshell clinotty root node npm cli copyfromcontainer copytocontainer composer grunt magento magerun backup restore restore-full duplicate magento2-init tapsync) ## parse first argument as command and determine validity if (( "$#" )); then diff --git a/commands/multistore.cmd b/commands/multistore.cmd new file mode 100755 index 0000000..52814b2 --- /dev/null +++ b/commands/multistore.cmd @@ -0,0 +1,232 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_ENV_PATH="$(locateEnvPath)" || exit $? +loadEnvConfig "${ROLL_ENV_PATH}" || exit $? + +STORES_JSON="${ROLL_ENV_PATH}/.roll/stores.json" +ROLL_ENV_YML="${ROLL_ENV_PATH}/.roll/roll-env.yml" +NGINX_MAP_DIR="${ROLL_ENV_PATH}/.roll/nginx" +NGINX_MAP_FILE="${NGINX_MAP_DIR}/stores.map" + +function show_help() { + echo "Usage: roll multistore " + echo "" + echo "Commands:" + echo " init Generate configs from .roll/stores.json and sign SSL certificates" + echo " refresh Regenerate configs from .roll/stores.json (no SSL signing)" + echo " list Show current store configuration" + echo " help Show this help message" + echo "" + echo "Configuration:" + echo " Create .roll/stores.json with hostname to store code mapping:" + echo "" + echo ' {' + echo ' "stores": {' + echo ' "store-nl.test": "store_nl",' + echo ' "store-be.test": "store_be",' + echo ' "main.test": ""' + echo ' },' + echo ' "run_type": "store"' + echo ' }' + echo "" + echo "Generated files:" + echo " .roll/roll-env.yml - Docker Compose override for Traefik routing" + echo " .roll/nginx/stores.map - Nginx hostname to store code mapping" +} + +function check_stores_json() { + if [[ ! -f "${STORES_JSON}" ]]; then + fatal "Missing ${STORES_JSON}. Please create it first. Run 'roll multistore help' for format." + fi + + if ! command -v jq &> /dev/null; then + fatal "jq is required for parsing JSON. Please install it: brew install jq" + fi +} + +function get_all_hostnames() { + jq -r '.stores | keys[]' "${STORES_JSON}" +} + +function get_store_code() { + local hostname="$1" + jq -r --arg h "$hostname" '.stores[$h] // ""' "${STORES_JSON}" +} + +function get_run_type() { + jq -r '.run_type // "store"' "${STORES_JSON}" +} + +function escape_hostname_for_regex() { + echo "$1" | sed 's/\./\\\\./g' +} + +function generate_nginx_map() { + info "Generating nginx store mapping..." + + mkdir -p "${NGINX_MAP_DIR}" + + local run_type + run_type=$(get_run_type) + + cat > "${NGINX_MAP_FILE}" <> "${NGINX_MAP_FILE}" + else + echo " # ${hostname} - uses default store code" >> "${NGINX_MAP_FILE}" + fi + done < <(get_all_hostnames) + + cat >> "${NGINX_MAP_FILE}" < "${ROLL_ENV_YML}" < + +Commands: + init Generate configs from .roll/stores.json and sign SSL certificates + refresh Regenerate configs from .roll/stores.json (without re-signing SSL) + list Show current store configuration and status + help Show this help message + +Configuration: + Create .roll/stores.json in your project root with hostname to store code mapping: + + { + "stores": { + "store-nl.test": "store_nl", + "store-be.test": "store_be", + "main.test": "" + }, + "run_type": "store" + } + + - "stores": Object mapping hostnames to Magento store codes + - Empty string ("") means use the default store code + - "run_type": Either "store" or "website" (default: "store") + +Generated Files: + .roll/roll-env.yml Docker Compose override with: + - Traefik routing rules for all hostnames + - Nginx volume mount for store mapping + - extra_hosts for PHP containers + + .roll/nginx/stores.map Nginx configuration with: + - $mage_run_code variable mapping + - $mage_run_type variable + +Workflow: + 1. Create .roll/stores.json with your store configuration + 2. Run: roll multistore init + 3. Run: roll env up + + To add/modify stores later: + 1. Edit .roll/stores.json + 2. Run: roll multistore refresh + 3. Run: roll env restart nginx + +Prerequisites: + - jq must be installed (brew install jq) + - Project must have .env.roll configured + +Examples: + roll multistore init # First time setup + roll multistore refresh # After editing stores.json + roll multistore list # Check current configuration From 27e387ea114cbaab27a3b2f41aeaecf71eee5aa9 Mon Sep 17 00:00:00 2001 From: github-actions Date: Wed, 10 Dec 2025 13:44:07 +0000 Subject: [PATCH 55/69] Tagged 0.5.1 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 8f0916f..4b9fcbe 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.5.0 +0.5.1 From 822b6e2b94de75f3e70bc8bd44706b8941a13fac Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Wed, 10 Dec 2025 16:46:34 +0100 Subject: [PATCH 56/69] add: describe command to display RollDev environment details, including service status, URLs, and configurations --- commands/describe.cmd | 176 +++++++++++++++++++++++++++++++++++++++++ commands/describe.help | 22 ++++++ 2 files changed, 198 insertions(+) create mode 100755 commands/describe.cmd create mode 100644 commands/describe.help diff --git a/commands/describe.cmd b/commands/describe.cmd new file mode 100755 index 0000000..fe9dba7 --- /dev/null +++ b/commands/describe.cmd @@ -0,0 +1,176 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_ENV_PATH="$(locateEnvPath)" || exit $? +loadEnvConfig "${ROLL_ENV_PATH}" || exit $? + +# Colors +GREEN='\033[32m' +RED='\033[31m' +CYAN='\033[36m' +BOLD='\033[1m' +DIM='\033[2m' +NC='\033[0m' + +# Get container status (returns "running" or "stopped") +get_status_text() { + local service=$1 + local container="${ROLL_ENV_NAME}-${service}-1" + local status=$(docker inspect --format '{{.State.Status}}' "$container" 2>/dev/null) + if [[ "$status" == "running" ]]; then + echo "running" + else + echo "stopped" + fi +} + +# Print status with color +print_status() { + local status=$1 + if [[ "$status" == "running" ]]; then + printf "${GREEN}%-8s${NC}" "running" + else + printf "${RED}%-8s${NC}" "stopped" + fi +} + +# Table dimensions +W=95 + +# Horizontal lines +line_top() { + printf "${CYAN}┌" + printf '─%.0s' $(seq 1 $((W-2))) + printf "┐${NC}\n" +} + +line_mid() { + printf "${CYAN}├" + printf '─%.0s' $(seq 1 14) + printf "┼" + printf '─%.0s' $(seq 1 10) + printf "┼" + printf '─%.0s' $(seq 1 45) + printf "┼" + printf '─%.0s' $(seq 1 22) + printf "┤${NC}\n" +} + +line_bot() { + printf "${CYAN}└" + printf '─%.0s' $(seq 1 14) + printf "┴" + printf '─%.0s' $(seq 1 10) + printf "┴" + printf '─%.0s' $(seq 1 45) + printf "┴" + printf '─%.0s' $(seq 1 22) + printf "┘${NC}\n" +} + +# Header row +header_row() { + printf "${CYAN}│${NC} ${BOLD}%-12s${NC} ${CYAN}│${NC} ${BOLD}%-8s${NC} ${CYAN}│${NC} ${BOLD}%-43s${NC} ${CYAN}│${NC} ${BOLD}%-20s${NC} ${CYAN}│${NC}\n" "$1" "$2" "$3" "$4" +} + +# Data row with status +data_row() { + local name=$1 + local status=$2 + local url=$3 + local info=$4 + printf "${CYAN}│${NC} %-12s ${CYAN}│${NC} " "$name" + print_status "$status" + printf " ${CYAN}│${NC} %-43s ${CYAN}│${NC} %-20s ${CYAN}│${NC}\n" "$url" "$info" +} + +# Sub row (continuation, no status) +sub_row() { + printf "${CYAN}│${NC} %-12s ${CYAN}│${NC} %-8s ${CYAN}│${NC} ${DIM}%-43s${NC} ${CYAN}│${NC} %-20s ${CYAN}│${NC}\n" "" "" "$1" "$2" +} + +# Info row (spans columns) +info_row() { + printf "${CYAN}│${NC} ${BOLD}%-12s${NC} ${CYAN}│${NC} %-76s ${CYAN}│${NC}\n" "$1" "$2" +} + +# Text row (spans columns, for URLs) +text_row() { + printf "${CYAN}│${NC} %-12s ${CYAN}│${NC} %-76s ${CYAN}│${NC}\n" "" "$1" +} + +echo "" + +# Header box +line_top +printf "${CYAN}│${NC} ${BOLD}Project:${NC} %-83s ${CYAN}│${NC}\n" "${ROLL_ENV_NAME} ${ROLL_ENV_PATH}" +printf "${CYAN}│${NC} ${BOLD}Domain:${NC} %-83s ${CYAN}│${NC}\n" "https://${TRAEFIK_SUBDOMAIN:-app}.${TRAEFIK_DOMAIN}" +printf "${CYAN}│${NC} ${BOLD}Type:${NC} %-83s ${CYAN}│${NC}\n" "${ROLL_ENV_TYPE} PHP ${PHP_VERSION:-8.2} | Node ${NODE_VERSION:-18}" +printf "${CYAN}│${NC} ${BOLD}Router:${NC} %-83s ${CYAN}│${NC}\n" "traefik" +line_mid + +# Table header +header_row "SERVICE" "STATUS" "URL/PORT" "INFO" +line_mid + +# Services +data_row "nginx" "$(get_status_text nginx)" "https://${TRAEFIK_SUBDOMAIN:-app}.${TRAEFIK_DOMAIN}" "${ROLL_ENV_TYPE}" +sub_row "InDocker: nginx:80,443" "Server: nginx-fpm" + +data_row "php-fpm" "$(get_status_text php-fpm)" "InDocker: php-fpm:9000" "PHP ${PHP_VERSION:-8.2}" + +if [[ "${ROLL_XDEBUG:-0}" == "1" ]] || [[ "${PHP_XDEBUG_3:-0}" == "1" ]]; then + data_row "php-debug" "$(get_status_text php-debug)" "InDocker: php-debug:9000" "Xdebug 3" +fi + +if [[ "${ROLL_DB:-1}" == "1" ]]; then + DB_TYPE="${DB_DISTRIBUTION:-mariadb}:${DB_DISTRIBUTION_VERSION:-10.4}" + data_row "db" "$(get_status_text db)" "InDocker: db:3306" "${DB_TYPE}" + sub_row "" "magento/magento" +fi + +if [[ "${ROLL_REDIS:-0}" == "1" ]]; then + data_row "redis" "$(get_status_text redis)" "InDocker: redis:6379" "Redis ${REDIS_VERSION:-7.2}" +fi + +if [[ "${ROLL_REDISINSIGHT:-0}" == "1" ]]; then + data_row "redisinsight" "$(get_status_text redisinsight)" "https://insight.${TRAEFIK_DOMAIN}" "" +fi + +if [[ "${ROLL_ELASTICSEARCH:-0}" == "1" ]]; then + data_row "elasticsearch" "$(get_status_text elasticsearch)" "InDocker: elasticsearch:9200" "ES ${ELASTICSEARCH_VERSION:-7.17}" +fi + +if [[ "${ROLL_OPENSEARCH:-0}" == "1" ]]; then + data_row "opensearch" "$(get_status_text opensearch)" "InDocker: opensearch:9200" "OS ${OPENSEARCH_VERSION:-2.5}" +fi + +if [[ "${ROLL_RABBITMQ:-0}" == "1" ]]; then + data_row "rabbitmq" "$(get_status_text rabbitmq)" "https://rabbitmq.${TRAEFIK_DOMAIN}" "Management UI" +fi + +if [[ "${ROLL_VARNISH:-0}" == "1" ]]; then + data_row "varnish" "$(get_status_text varnish)" "InDocker: varnish:80" "" +fi + +if docker ps -a --format '{{.Names}}' | grep -q "${ROLL_ENV_NAME}-mailhog-1"; then + data_row "mailhog" "$(get_status_text mailhog)" "https://mailhog.${TRAEFIK_DOMAIN}" "Mail catcher" +fi + +line_mid + +# Project URLs +if [[ -f "${ROLL_ENV_PATH}/.roll/stores.json" ]] && command -v jq &> /dev/null; then + info_row "Project URLs" "" + + # Main URL + text_row "https://${TRAEFIK_SUBDOMAIN:-app}.${TRAEFIK_DOMAIN}" + + # Store URLs + jq -r '.stores | keys[]' "${ROLL_ENV_PATH}/.roll/stores.json" 2>/dev/null | while read hostname; do + text_row "https://${hostname}" + done +fi + +line_bot +echo "" diff --git a/commands/describe.help b/commands/describe.help new file mode 100644 index 0000000..f695bba --- /dev/null +++ b/commands/describe.help @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 + +ROLL_USAGE=$(cat < Date: Wed, 10 Dec 2025 16:46:48 +0100 Subject: [PATCH 57/69] add: describe subcommand to env.cmd and enhance multistore documentation with improved clarity and structure --- commands/env.cmd | 6 ++++++ commands/multistore.cmd | 2 +- commands/multistore.help | 45 +++++++++++++++++++--------------------- 3 files changed, 28 insertions(+), 25 deletions(-) diff --git a/commands/env.cmd b/commands/env.cmd index e59ee39..62b2cb6 100644 --- a/commands/env.cmd +++ b/commands/env.cmd @@ -186,6 +186,12 @@ else export ROLL_SELENIUM_DEBUG= fi +## handle describe subcommand +if [[ "${ROLL_PARAMS[0]}" == "describe" ]]; then + source "${ROLL_DIR}/commands/describe.cmd" + exit $? +fi + ## disconnect peered service containers from environment network if [[ "${ROLL_PARAMS[0]}" == "down" ]]; then disconnectPeeredServices "$(renderEnvNetworkName)" diff --git a/commands/multistore.cmd b/commands/multistore.cmd index 52814b2..acd0409 100755 --- a/commands/multistore.cmd +++ b/commands/multistore.cmd @@ -117,7 +117,7 @@ function generate_roll_env_yml() { else traefik_rules+=" || " fi - traefik_rules+="HostRegexp(\\\`{subdomain:.+}.${hostname}\\\`) || Host(\\\`${hostname}\\\`)" + traefik_rules+="HostRegexp(\`{subdomain:.+}.${hostname}\`) || Host(\`${hostname}\`)" done < <(get_all_hostnames) # Build extra_hosts entries diff --git a/commands/multistore.help b/commands/multistore.help index 1905a8f..c7557dc 100644 --- a/commands/multistore.help +++ b/commands/multistore.help @@ -1,16 +1,21 @@ -Manage Magento multi-store configuration with automatic config generation. +#!/usr/bin/env bash +[[ ! ${ROLL_DIR} ]] && >&2 echo -e "\033[31mThis script is not intended to be run directly!\033[0m" && exit 1 -Usage: +ROLL_USAGE=$(cat < -Commands: +\033[33mCommands:\033[0m init Generate configs from .roll/stores.json and sign SSL certificates refresh Regenerate configs from .roll/stores.json (without re-signing SSL) list Show current store configuration and status help Show this help message -Configuration: - Create .roll/stores.json in your project root with hostname to store code mapping: +\033[33mConfiguration:\033[0m + Create .roll/stores.json in your project root: { "stores": { @@ -25,31 +30,23 @@ Configuration: - Empty string ("") means use the default store code - "run_type": Either "store" or "website" (default: "store") -Generated Files: - .roll/roll-env.yml Docker Compose override with: - - Traefik routing rules for all hostnames - - Nginx volume mount for store mapping - - extra_hosts for PHP containers - - .roll/nginx/stores.map Nginx configuration with: - - $mage_run_code variable mapping - - $mage_run_type variable +\033[33mGenerated Files:\033[0m + .roll/roll-env.yml Traefik routing, nginx volume, extra_hosts + .roll/nginx/stores.map Nginx hostname-to-store-code mapping -Workflow: +\033[33mWorkflow:\033[0m 1. Create .roll/stores.json with your store configuration 2. Run: roll multistore init 3. Run: roll env up - To add/modify stores later: + To add/modify stores: 1. Edit .roll/stores.json 2. Run: roll multistore refresh 3. Run: roll env restart nginx -Prerequisites: - - jq must be installed (brew install jq) - - Project must have .env.roll configured - -Examples: - roll multistore init # First time setup - roll multistore refresh # After editing stores.json - roll multistore list # Check current configuration +\033[33mExamples:\033[0m + roll multistore init First time setup + roll multistore refresh After editing stores.json + roll multistore list Check current configuration +EOF +) From 4bdb7c3674987e7e1c066f5abd5e46ea9323778a Mon Sep 17 00:00:00 2001 From: github-actions Date: Wed, 10 Dec 2025 15:48:06 +0000 Subject: [PATCH 58/69] Tagged 0.5.2 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 4b9fcbe..cb0c939 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.5.1 +0.5.2 From 0af1b7ad5216740dccbfe185bba672a24559fb61 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Fri, 19 Dec 2025 11:56:33 +0100 Subject: [PATCH 59/69] fix: handle help flag to properly support commands with arguments --- bin/roll | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/roll b/bin/roll index a18422f..5c56240 100755 --- a/bin/roll +++ b/bin/roll @@ -69,6 +69,8 @@ fi while (( "$#" )); do case "$1" in -h|--help) + ## pass --help to command if it accepts any args, otherwise show roll help + containsElement "${ROLL_CMD_VERB}" "${ROLL_CMD_ANYARGS[@]}" && break ROLL_HELP=1 break ;; From 85243303dc733cade8dc439acc2bc717f8b6b174 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Mon, 19 Jan 2026 11:53:11 +0100 Subject: [PATCH 60/69] add: verbose logging support for restore and restore-full commands + fix issue with restoring full backup for first time - Added `--verbose` (`-v`) option for detailed debug output. - Updated help documentation for both commands. - Integrated additional logging into restore processes, including extracting backups, stopping environments, and restoring volumes. --- commands/restore-full.cmd | 141 ++++++++++++++++++++++++++++++------- commands/restore-full.help | 2 + commands/restore.cmd | 88 +++++++++++++++++------ commands/restore.help | 2 + 4 files changed, 183 insertions(+), 50 deletions(-) diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd index cee20d8..34b42e4 100644 --- a/commands/restore-full.cmd +++ b/commands/restore-full.cmd @@ -11,6 +11,7 @@ RESTORE_VERIFY=1 RESTORE_FORCE=0 RESTORE_DRY_RUN=0 RESTORE_QUIET=0 +RESTORE_VERBOSE=0 RESTORE_DECRYPT="" RESTORE_BACKUP_FILE="" RESTORE_OUTPUT_DIR="" @@ -57,6 +58,10 @@ while [[ $# -gt 0 ]]; do PROGRESS=0 shift ;; + --verbose|-v) + RESTORE_VERBOSE=1 + shift + ;; --decrypt=*) RESTORE_DECRYPT="${1#*=}" shift @@ -187,9 +192,14 @@ function logMessage() { SUCCESS) success "$@" ;; WARNING) warning "$@" ;; ERROR) error "$@" ;; + VERBOSE) [[ $RESTORE_VERBOSE -eq 1 ]] && info "[VERBOSE] $@" ;; esac } +function logVerbose() { + [[ $RESTORE_VERBOSE -eq 1 ]] && logMessage INFO "$@" +} + function performLegacyMigration() { if [[ $RESTORE_LEGACY_MIGRATION -eq 0 ]]; then return 0 @@ -304,7 +314,7 @@ function extractBackupArchive() { *.tar.lz4) decompress_cmd="lz4 -d" ;; esac - if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1; then + if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1 2>/dev/null; then echo "$extract_dir" return 0 else @@ -321,7 +331,13 @@ function extractBackupArchiveFile() { base_name="${base_name%%.tar*}" local extract_dir="$backup_dir/${base_name}_extracted" + logVerbose "Extracting backup archive file" + logVerbose "Archive file: $archive_file" + logVerbose "Backup directory: $backup_dir" + logVerbose "Extract directory: $extract_dir" + if [[ -d "$extract_dir" ]]; then + logVerbose "Found already extracted backup at: $extract_dir" echo "$extract_dir" return 0 fi @@ -335,7 +351,10 @@ function extractBackupArchiveFile() { *.tar.lz4) decompress_cmd="lz4 -d" ;; esac - if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1; then + logVerbose "Using decompression command: $decompress_cmd" + + if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1 2>/dev/null; then + logVerbose "Successfully extracted to: $extract_dir" echo "$extract_dir" return 0 else @@ -420,12 +439,45 @@ function stopEnvironment() { logMessage INFO "[DRY RUN] Would stop environment" return 0 fi - + logMessage INFO "Stopping environment for consistent restore..." - - local running_containers=$(roll env ps --services --filter "status=running" 2>/dev/null | grep 'php-fpm' | sed 's/ *$//g') - if [[ -n "$running_containers" ]]; then - "${ROLL_DIR}/bin/roll" env down >/dev/null 2>&1 + logVerbose "Environment path: ${ROLL_ENV_PATH}" + + # Check if environment is configured before trying to stop it + # The env commands require .env.roll to exist, which may not be the case + # during a full restore (config comes from backup) + if [[ ! -f "${ROLL_ENV_PATH}/.env.roll" ]]; then + logMessage INFO "No environment configuration found yet, skipping stop" + logVerbose "Missing file: ${ROLL_ENV_PATH}/.env.roll" + return 0 + fi + + # Use docker compose directly instead of roll env to avoid exit on error + # when environment is not fully configured + local project_name="${ROLL_ENV_NAME:-}" + if [[ -z "$project_name" ]]; then + # Try to extract from .env.roll if available + logVerbose "ROLL_ENV_NAME not set, extracting from .env.roll" + project_name=$(grep -E "^ROLL_ENV_NAME=" "${ROLL_ENV_PATH}/.env.roll" 2>/dev/null | cut -d'=' -f2 | tr -d '"'"'" || true) + fi + + logVerbose "Project name: ${project_name:-}" + + if [[ -n "$project_name" ]]; then + # Check if any containers are running for this project + logVerbose "Checking for running containers with label: com.docker.compose.project=${project_name}" + local running_containers=$(docker ps --filter "label=com.docker.compose.project=${project_name}" --format '{{.Names}}' 2>/dev/null || true) + if [[ -n "$running_containers" ]]; then + logVerbose "Found running containers: $running_containers" + logMessage INFO "Stopping running containers..." + logVerbose "Executing: docker compose -p ${project_name} down" + docker compose -p "${project_name}" down >/dev/null 2>&1 || true + logVerbose "Containers stopped" + else + logVerbose "No running containers found" + fi + else + logVerbose "No project name available, skipping container stop" fi } @@ -456,16 +508,20 @@ function restoreVolume() { local backup_path="$2" local step="$3" local total="$4" - + showProgress $step $total "Restoring $service_name volume" - + local volume_mapping=$(getVolumeMapping "$service_name") IFS=':' read -r volume_name service_type <<< "$volume_mapping" - + + logVerbose "Restoring service: $service_name" + logVerbose "Volume mapping: $volume_mapping" + logVerbose "Volume name: $volume_name, Service type: $service_type" + # Determine backup file location (check for both encrypted and unencrypted) local backup_file="" local is_encrypted=false - + # Check for encrypted files first (.gpg extension) if [[ -f "$backup_path/volumes/${service_name}.tar.gz.gpg" ]]; then backup_file="$backup_path/volumes/${service_name}.tar.gz.gpg" @@ -493,8 +549,12 @@ function restoreVolume() { backup_file="$backup_path/${service_name}.tar.gz" else logMessage WARNING "Backup file not found for service: $service_name" + logVerbose "Searched in: $backup_path/volumes/ and $backup_path/" return 0 fi + + logVerbose "Found backup file: $backup_file" + logVerbose "Encrypted: $is_encrypted" if [[ $RESTORE_DRY_RUN -eq 1 ]]; then if [[ $is_encrypted == true ]]; then @@ -516,26 +576,37 @@ function restoreVolume() { # Get Docker Compose version for proper labeling local docker_compose_version=$(docker compose version 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+' | head -1) local volume_base_name=$(echo "$volume_name" | sed "s/${ROLL_ENV_NAME}_//") - + + logVerbose "Docker Compose version: $docker_compose_version" + logVerbose "Volume base name: $volume_base_name" + # Remove existing volume if it exists if docker volume inspect "$volume_name" >/dev/null 2>&1; then if [[ $RESTORE_FORCE -eq 1 ]]; then logMessage INFO "Removing existing volume: $volume_name" + logVerbose "Executing: docker volume rm $volume_name" docker volume rm "$volume_name" >/dev/null 2>&1 else logMessage ERROR "Volume $volume_name already exists. Use --force to overwrite." return 1 fi + else + logVerbose "Volume $volume_name does not exist yet" fi - + # Create new volume with proper labels + logVerbose "Creating volume: $volume_name with labels" + logVerbose " - com.docker.compose.project=$ROLL_ENV_NAME" + logVerbose " - com.docker.compose.version=$docker_compose_version" + logVerbose " - com.docker.compose.volume=$volume_base_name" docker volume create "$volume_name" \ --label com.docker.compose.project="$ROLL_ENV_NAME" \ --label com.docker.compose.version="$docker_compose_version" \ --label com.docker.compose.volume="$volume_base_name" >/dev/null 2>&1 - + # Restore the volume data with decryption if needed local temp_container="${ROLL_ENV_NAME}_restore_${service_name}_$$" + logVerbose "Temp container name: $temp_container" if [[ $is_encrypted == true ]]; then # Decrypt and decompress pipeline - use ubuntu and original tar approach with strip components @@ -801,7 +872,7 @@ function restoreSourceCode() { logMessage ERROR "Encrypted source archive found but no decryption password provided" return 1 fi - if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$src_file" | $decompress_cmd | tar -xf - -C "$target_dir"; then + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$src_file" | $decompress_cmd | tar -xf - -C "$target_dir" 2>/dev/null; then logMessage SUCCESS "Source code restored" return 0 else @@ -809,7 +880,7 @@ function restoreSourceCode() { return 1 fi else - if $decompress_cmd "$src_file" | tar -xf - -C "$target_dir"; then + if $decompress_cmd "$src_file" | tar -xf - -C "$target_dir" 2>/dev/null; then logMessage SUCCESS "Source code restored" return 0 else @@ -820,27 +891,37 @@ function restoreSourceCode() { } function performRestore() { - + + logVerbose "Starting full restore process" + logVerbose "Backup file: $RESTORE_BACKUP_FILE" + logVerbose "Output directory: $RESTORE_OUTPUT_DIR" + logVerbose "Environment path: $ROLL_ENV_PATH" + # Perform legacy migration if needed performLegacyMigration - + # Validate database environment if [[ ${ROLL_DB:-1} -eq 0 ]]; then logMessage ERROR "Database environment is not enabled (ROLL_DB=0)" exit 1 fi - + # Determine backup path from archive argument local backup_path="" + logVerbose "Checking backup file type..." if [[ -f "$RESTORE_BACKUP_FILE" ]]; then + logVerbose "Backup is a file, extracting..." backup_path=$(extractBackupArchiveFile "$RESTORE_BACKUP_FILE") elif [[ -d "$RESTORE_BACKUP_FILE" ]]; then + logVerbose "Backup is a directory" backup_path="$RESTORE_BACKUP_FILE" else logMessage ERROR "Backup file not found: $RESTORE_BACKUP_FILE" exit 1 fi + + logVerbose "Backup path resolved to: $backup_path" # Detect if backup is encrypted and handle password prompting if detectEncryptedBackup "$backup_path"; then @@ -866,17 +947,21 @@ function performRestore() { # Get backup metadata local metadata=$(getBackupMetadata "$backup_path") logMessage INFO "Restoring backup from: $(basename \"$RESTORE_BACKUP_FILE\")" + logVerbose "Backup metadata: $metadata" local source_exists=0 for ext in ".tar.gz" ".tar.xz" ".tar.lz4" ".tar"; do if [[ -f "$backup_path/source${ext}" ]] || [[ -f "$backup_path/source${ext}.gpg" ]]; then + logVerbose "Found source archive: source${ext}" source_exists=1 break fi done + logVerbose "Source code exists in backup: $source_exists" if [[ $ROLL_ENV_LOADED -eq 0 ]]; then ROLL_ENV_NAME=$(echo "$metadata" | grep -o '"environment"[^"]*"' | head -1 | sed 's/.*"environment"[ ]*:[ ]*"\([^"]*\)".*/\1/') + logVerbose "Extracted ROLL_ENV_NAME from metadata: ${ROLL_ENV_NAME:-}" fi # Detect available services in backup @@ -887,10 +972,11 @@ function performRestore() { fi logMessage INFO "Available services in backup: ${available_services[*]}" - + # Determine which services to restore local services_to_restore=() if [[ ${#RESTORE_SERVICES[@]} -gt 0 ]]; then + logVerbose "User specified services to restore: ${RESTORE_SERVICES[*]}" # Use specified services for service in "${RESTORE_SERVICES[@]}"; do if containsElement "$service" "${available_services[@]}"; then @@ -900,6 +986,7 @@ function performRestore() { fi done else + logVerbose "No specific services requested, restoring all available" # Restore all available services services_to_restore=("${available_services[@]}") fi @@ -917,23 +1004,23 @@ function performRestore() { # Calculate total steps local total_steps=${#services_to_restore[@]} if [[ $RESTORE_CONFIG -eq 1 ]]; then - ((total_steps++)) + total_steps=$((total_steps + 1)) fi if [[ $source_exists -eq 1 ]]; then - ((total_steps++)) + total_steps=$((total_steps + 1)) fi - + local current_step=0 - + # Restore source code if available if [[ $source_exists -eq 1 ]]; then - ((current_step++)) + current_step=$((current_step + 1)) restoreSourceCode "$backup_path" "$ROLL_ENV_PATH" $current_step $total_steps fi # Restore configurations if [[ $RESTORE_CONFIG -eq 1 ]]; then - ((current_step++)) + current_step=$((current_step + 1)) restoreConfigurations "$backup_path" $current_step $total_steps if [[ $ROLL_ENV_LOADED -eq 0 ]]; then loadEnvConfig "$ROLL_ENV_PATH" || exit 1 @@ -943,7 +1030,7 @@ function performRestore() { # Restore volumes for service in "${services_to_restore[@]}"; do - ((current_step++)) + current_step=$((current_step + 1)) restoreVolume "$service" "$backup_path" $current_step $total_steps done diff --git a/commands/restore-full.help b/commands/restore-full.help index 7a6c81f..b022d5b 100644 --- a/commands/restore-full.help +++ b/commands/restore-full.help @@ -10,6 +10,7 @@ ROLL_USAGE=$(cat </dev/null | head -1) fi - + if [[ -z "$archive_file" ]]; then logMessage ERROR "Backup archive not found for ID: $backup_id" return 1 fi - + logMessage INFO "Extracting backup archive: $(basename "$archive_file")" - + logVerbose "Full archive path: $archive_file" + logVerbose "Extract destination: $extract_dir" + mkdir -p "$extract_dir" - + # Determine decompression command based on file extension local decompress_cmd="cat" case "$archive_file" in @@ -288,8 +306,10 @@ function extractBackupArchive() { *.tar.xz) decompress_cmd="xz -d" ;; *.tar.lz4) decompress_cmd="lz4 -d" ;; esac - - if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1; then + logVerbose "Using decompression command: $decompress_cmd" + + if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1 2>/dev/null; then + logVerbose "Successfully extracted archive to: $extract_dir" echo "$extract_dir" return 0 else @@ -374,12 +394,19 @@ function stopEnvironment() { logMessage INFO "[DRY RUN] Would stop environment" return 0 fi - + logMessage INFO "Stopping environment for consistent restore..." - - local running_containers=$(roll env ps --services --filter "status=running" 2>/dev/null | grep 'php-fpm' | sed 's/ *$//g') + + logVerbose "Checking for running containers with project: ${ROLL_ENV_NAME}" + local running_containers=$(docker ps --filter "label=com.docker.compose.project=${ROLL_ENV_NAME}" --format '{{.Names}}' 2>/dev/null || true) + if [[ -n "$running_containers" ]]; then - "${ROLL_DIR}/bin/roll" env down >/dev/null 2>&1 + logVerbose "Found running containers: $running_containers" + logVerbose "Executing: roll env down" + "${ROLL_DIR}/bin/roll" env down >/dev/null 2>&1 || true + logVerbose "Environment stopped" + else + logVerbose "No running containers found for project ${ROLL_ENV_NAME}" fi } @@ -410,12 +437,16 @@ function restoreVolume() { local backup_path="$2" local step="$3" local total="$4" - + showProgress $step $total "Restoring $service_name volume" - + local volume_mapping=$(getVolumeMapping "$service_name") IFS=':' read -r volume_name service_type <<< "$volume_mapping" - + + logVerbose "Restoring service: $service_name" + logVerbose "Volume mapping: $volume_mapping" + logVerbose "Volume name: $volume_name, Service type: $service_type" + # Determine backup file location (check for both encrypted and unencrypted) local backup_file="" local is_encrypted=false @@ -447,8 +478,12 @@ function restoreVolume() { backup_file="$backup_path/${service_name}.tar.gz" else logMessage WARNING "Backup file not found for service: $service_name" + logVerbose "Searched in: $backup_path/volumes/ and $backup_path/" return 0 fi + + logVerbose "Found backup file: $backup_file" + logVerbose "Encrypted: $is_encrypted" if [[ $RESTORE_DRY_RUN -eq 1 ]]; then if [[ $is_encrypted == true ]]; then @@ -475,14 +510,21 @@ function restoreVolume() { if docker volume inspect "$volume_name" >/dev/null 2>&1; then if [[ $RESTORE_FORCE -eq 1 ]]; then logMessage INFO "Removing existing volume: $volume_name" + logVerbose "Executing: docker volume rm $volume_name" docker volume rm "$volume_name" >/dev/null 2>&1 else logMessage ERROR "Volume $volume_name already exists. Use --force to overwrite." return 1 fi + else + logVerbose "Volume $volume_name does not exist yet" fi - + # Create new volume with proper labels + logVerbose "Creating volume: $volume_name with labels" + logVerbose " - com.docker.compose.project=$ROLL_ENV_NAME" + logVerbose " - com.docker.compose.version=$docker_compose_version" + logVerbose " - com.docker.compose.volume=$volume_base_name" docker volume create "$volume_name" \ --label com.docker.compose.project="$ROLL_ENV_NAME" \ --label com.docker.compose.version="$docker_compose_version" \ @@ -807,20 +849,20 @@ function performRestore() { # Calculate total steps local total_steps=${#services_to_restore[@]} if [[ $RESTORE_CONFIG -eq 1 ]]; then - ((total_steps++)) + total_steps=$((total_steps + 1)) fi - + local current_step=0 - + # Restore volumes for service in "${services_to_restore[@]}"; do - ((current_step++)) + current_step=$((current_step + 1)) restoreVolume "$service" "$backup_path" $current_step $total_steps done - + # Restore configurations if [[ $RESTORE_CONFIG -eq 1 ]]; then - ((current_step++)) + current_step=$((current_step + 1)) restoreConfigurations "$backup_path" $current_step $total_steps fi diff --git a/commands/restore.help b/commands/restore.help index 36b6b75..6cb9214 100755 --- a/commands/restore.help +++ b/commands/restore.help @@ -11,6 +11,7 @@ ROLL_USAGE=$(cat < Date: Mon, 19 Jan 2026 11:53:38 +0100 Subject: [PATCH 61/69] update: remove Traefik version pinning in docker-compose and fix indentation in svc.cmd for traefik v3 --- commands/svc.cmd | 6 +++--- docker/docker-compose.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/commands/svc.cmd b/commands/svc.cmd index 2f4b752..20b5cc9 100644 --- a/commands/svc.cmd +++ b/commands/svc.cmd @@ -70,9 +70,9 @@ if [[ "${ROLL_PARAMS[0]}" == "up" ]]; then tls: stores: default: - defaultCertificate: - certFile: /etc/ssl/certs/${ROLL_SERVICE_DOMAIN}.crt.pem - keyFile: /etc/ssl/certs/${ROLL_SERVICE_DOMAIN}.key.pem + defaultCertificate: + certFile: /etc/ssl/certs/${ROLL_SERVICE_DOMAIN}.crt.pem + keyFile: /etc/ssl/certs/${ROLL_SERVICE_DOMAIN}.key.pem certificates: EOT diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index d77d116..a12e8e1 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,7 +1,7 @@ services: traefik: container_name: traefik - image: traefik:${TRAEFIK_VERSION:-2.2} + image: traefik ports: - "${TRAEFIK_LISTEN:-127.0.0.1}:80:80" # The HTTP port - "${TRAEFIK_LISTEN:-127.0.0.1}:443:443" # The HTTPS port From 750d732fa64dca9ca4044dfe6e316bdb7e219011 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Mon, 19 Jan 2026 11:56:50 +0100 Subject: [PATCH 62/69] add: automated SSL certificate signing for environment domains in restore and restore-full commands --- commands/restore-full.cmd | 54 +++++++++++++++++++++++++++++++++++++++ commands/restore.cmd | 54 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+) diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd index 34b42e4..4f332f4 100644 --- a/commands/restore-full.cmd +++ b/commands/restore-full.cmd @@ -1043,9 +1043,63 @@ function performRestore() { logMessage SUCCESS "Dry run completed successfully!" else logMessage SUCCESS "Restore completed successfully!" + + # Auto-sign SSL certificate for the environment domain + signEnvironmentCertificate + logMessage INFO "You can now start your environment with: roll env up" fi } +function signEnvironmentCertificate() { + # Determine the domain to sign certificate for + local domain="${TRAEFIK_DOMAIN:-}" + + # If TRAEFIK_DOMAIN not set, try to construct from env name + if [[ -z "$domain" ]]; then + domain="${ROLL_ENV_NAME}.test" + fi + + if [[ -z "$domain" ]]; then + logVerbose "No domain found for certificate signing, skipping" + return 0 + fi + + # Check if root CA exists + if [[ ! -f "${ROLL_SSL_DIR}/rootca/certs/ca.cert.pem" ]]; then + logMessage WARNING "Root CA not found. Run 'roll install' first to enable SSL certificates." + return 0 + fi + + logMessage INFO "Signing SSL certificates for ${domain}..." + + # Sign certificate for the main domain (includes domain and *.domain) + logVerbose "Signing certificate for: ${domain}" + if [[ $RESTORE_VERBOSE -eq 1 ]]; then + "${ROLL_DIR}/bin/roll" sign-certificate "$domain" || { + logMessage WARNING "Failed to sign certificate for ${domain}" + } + else + "${ROLL_DIR}/bin/roll" sign-certificate "$domain" >/dev/null 2>&1 || { + logMessage WARNING "Failed to sign certificate for ${domain}" + } + fi + + # Sign separate certificate for wildcard domain + local wildcard_domain="*.${domain}" + logVerbose "Signing certificate for: ${wildcard_domain}" + if [[ $RESTORE_VERBOSE -eq 1 ]]; then + "${ROLL_DIR}/bin/roll" sign-certificate "$wildcard_domain" || { + logMessage WARNING "Failed to sign certificate for ${wildcard_domain}" + } + else + "${ROLL_DIR}/bin/roll" sign-certificate "$wildcard_domain" >/dev/null 2>&1 || { + logMessage WARNING "Failed to sign certificate for ${wildcard_domain}" + } + fi + + logMessage SUCCESS "SSL certificates signed for ${domain}" +} + # Main execution performRestore diff --git a/commands/restore.cmd b/commands/restore.cmd index 1a10da9..528325b 100755 --- a/commands/restore.cmd +++ b/commands/restore.cmd @@ -875,10 +875,64 @@ function performRestore() { logMessage SUCCESS "Dry run completed successfully!" else logMessage SUCCESS "Restore completed successfully!" + + # Auto-sign SSL certificate for the environment domain + signEnvironmentCertificate + logMessage INFO "You can now start your environment with: roll env up" fi } +function signEnvironmentCertificate() { + # Determine the domain to sign certificate for + local domain="${TRAEFIK_DOMAIN:-}" + + # If TRAEFIK_DOMAIN not set, try to construct from env name + if [[ -z "$domain" ]]; then + domain="${ROLL_ENV_NAME}.test" + fi + + if [[ -z "$domain" ]]; then + logVerbose "No domain found for certificate signing, skipping" + return 0 + fi + + # Check if root CA exists + if [[ ! -f "${ROLL_SSL_DIR}/rootca/certs/ca.cert.pem" ]]; then + logMessage WARNING "Root CA not found. Run 'roll install' first to enable SSL certificates." + return 0 + fi + + logMessage INFO "Signing SSL certificates for ${domain}..." + + # Sign certificate for the main domain (includes domain and *.domain) + logVerbose "Signing certificate for: ${domain}" + if [[ $RESTORE_VERBOSE -eq 1 ]]; then + "${ROLL_DIR}/bin/roll" sign-certificate "$domain" || { + logMessage WARNING "Failed to sign certificate for ${domain}" + } + else + "${ROLL_DIR}/bin/roll" sign-certificate "$domain" >/dev/null 2>&1 || { + logMessage WARNING "Failed to sign certificate for ${domain}" + } + fi + + # Sign separate certificate for wildcard domain + local wildcard_domain="*.${domain}" + logVerbose "Signing certificate for: ${wildcard_domain}" + if [[ $RESTORE_VERBOSE -eq 1 ]]; then + "${ROLL_DIR}/bin/roll" sign-certificate "$wildcard_domain" || { + logMessage WARNING "Failed to sign certificate for ${wildcard_domain}" + } + else + "${ROLL_DIR}/bin/roll" sign-certificate "$wildcard_domain" >/dev/null 2>&1 || { + logMessage WARNING "Failed to sign certificate for ${wildcard_domain}" + } + fi + + logMessage SUCCESS "SSL certificates signed for ${domain}" +} + # Main execution if [[ -z "$RESTORE_BACKUP_ID" ]]; then # If no backup ID provided, use the latest From f8bc097c99430f4db66b27893b4d1ee05be32acc Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Mon, 19 Jan 2026 12:08:27 +0100 Subject: [PATCH 63/69] update: refine Traefik rules for host matching v3 and add Traefik configuration regeneration in restore commands v3 --- commands/multistore.cmd | 4 ++-- commands/restore-full.cmd | 12 ++++++++++++ commands/restore.cmd | 12 ++++++++++++ docs/configuration/multipledomains.md | 12 ++++++------ environments/includes/nginx.base.yml | 2 +- environments/includes/varnish.base.yml | 2 +- environments/magento2/magento2.base.yml | 2 +- 7 files changed, 35 insertions(+), 11 deletions(-) diff --git a/commands/multistore.cmd b/commands/multistore.cmd index acd0409..20bec0d 100755 --- a/commands/multistore.cmd +++ b/commands/multistore.cmd @@ -117,7 +117,7 @@ function generate_roll_env_yml() { else traefik_rules+=" || " fi - traefik_rules+="HostRegexp(\`{subdomain:.+}.${hostname}\`) || Host(\`${hostname}\`)" + traefik_rules+="HostRegexp(\`^.+\\.${hostname//./\\.}\$\$\`) || Host(\`${hostname}\`)" done < <(get_all_hostnames) # Build extra_hosts entries @@ -134,7 +134,7 @@ services: nginx: labels: - traefik.http.routers.\${ROLL_ENV_NAME}-nginx.rule= - HostRegexp(\`{subdomain:.+}.\${TRAEFIK_DOMAIN}\`) || Host(\`\${TRAEFIK_DOMAIN}\`) + HostRegexp(\`^.+\\.\${TRAEFIK_DOMAIN}\$\$\`) || Host(\`\${TRAEFIK_DOMAIN}\`) || ${traefik_rules} volumes: - ./.roll/nginx/stores.map:/etc/nginx/default.d/stores.map:ro diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd index 4f332f4..6c62dd7 100644 --- a/commands/restore-full.cmd +++ b/commands/restore-full.cmd @@ -1098,6 +1098,18 @@ function signEnvironmentCertificate() { } fi + # Regenerate traefik dynamic config and restart to pick up new certificates + logVerbose "Regenerating traefik configuration..." + if [[ $RESTORE_VERBOSE -eq 1 ]]; then + "${ROLL_DIR}/bin/roll" svc up traefik || { + logMessage WARNING "Failed to restart traefik" + } + else + "${ROLL_DIR}/bin/roll" svc up traefik >/dev/null 2>&1 || { + logMessage WARNING "Failed to restart traefik" + } + fi + logMessage SUCCESS "SSL certificates signed for ${domain}" } diff --git a/commands/restore.cmd b/commands/restore.cmd index 528325b..2693899 100755 --- a/commands/restore.cmd +++ b/commands/restore.cmd @@ -930,6 +930,18 @@ function signEnvironmentCertificate() { } fi + # Regenerate traefik dynamic config and restart to pick up new certificates + logVerbose "Regenerating traefik configuration..." + if [[ $RESTORE_VERBOSE -eq 1 ]]; then + "${ROLL_DIR}/bin/roll" svc up traefik || { + logMessage WARNING "Failed to restart traefik" + } + else + "${ROLL_DIR}/bin/roll" svc up traefik >/dev/null 2>&1 || { + logMessage WARNING "Failed to restart traefik" + } + fi + logMessage SUCCESS "SSL certificates signed for ${domain}" } diff --git a/docs/configuration/multipledomains.md b/docs/configuration/multipledomains.md index 0039ee2..d752433 100644 --- a/docs/configuration/multipledomains.md +++ b/docs/configuration/multipledomains.md @@ -16,20 +16,20 @@ Multiple top-level domains may also be setup by following the instructions below varnish: labels: - traefik.http.routers.${ROLL_ENV_NAME}-varnish.rule= - HostRegexp(`{subdomain:.+}.${TRAEFIK_DOMAIN}`) + HostRegexp(`^.+\.${TRAEFIK_DOMAIN}$$`) || Host(`${TRAEFIK_DOMAIN}`) - || HostRegexp(`{subdomain:.+}.alternate1.test`) + || HostRegexp(`^.+\.alternate1\.test$$`) || Host(`alternate1.test`) - || HostRegexp(`{subdomain:.+}.alternate2.test`) + || HostRegexp(`^.+\.alternate2\.test$$`) || Host(`alternate2.test`) nginx: labels: - traefik.http.routers.${ROLL_ENV_NAME}-nginx.rule= - HostRegexp(`{subdomain:.+}.${TRAEFIK_DOMAIN}`) + HostRegexp(`^.+\.${TRAEFIK_DOMAIN}$$`) || Host(`${TRAEFIK_DOMAIN}`) - || HostRegexp(`{subdomain:.+}.alternate1.test`) + || HostRegexp(`^.+\.alternate1\.test$$`) || Host(`alternate1.test`) - || HostRegexp(`{subdomain:.+}.alternate2.test`) + || HostRegexp(`^.+\.alternate2\.test$$`) || Host(`alternate2.test`) ``` diff --git a/environments/includes/nginx.base.yml b/environments/includes/nginx.base.yml index 4e40710..1deaa22 100644 --- a/environments/includes/nginx.base.yml +++ b/environments/includes/nginx.base.yml @@ -7,7 +7,7 @@ services: - traefik.http.routers.${ROLL_ENV_NAME}-nginx.tls=true - traefik.http.routers.${ROLL_ENV_NAME}-nginx.priority=2 - traefik.http.routers.${ROLL_ENV_NAME}-nginx.rule= - HostRegexp(`{subdomain:.+}.${TRAEFIK_DOMAIN}`) || Host(`${TRAEFIK_DOMAIN}`) + HostRegexp(`^.+\.${TRAEFIK_DOMAIN}$$`) || Host(`${TRAEFIK_DOMAIN}`) - traefik.http.services.${ROLL_ENV_NAME}-nginx.loadbalancer.server.port=80 - traefik.docker.network=${ROLL_ENV_NAME}_default volumes: diff --git a/environments/includes/varnish.base.yml b/environments/includes/varnish.base.yml index 17d515e..ad5c37f 100644 --- a/environments/includes/varnish.base.yml +++ b/environments/includes/varnish.base.yml @@ -15,6 +15,6 @@ services: - traefik.http.routers.${ROLL_ENV_NAME}-varnish.tls=true - traefik.http.routers.${ROLL_ENV_NAME}-varnish.priority=1 - traefik.http.routers.${ROLL_ENV_NAME}-varnish.rule= - HostRegexp(`{subdomain:.+}.${TRAEFIK_DOMAIN}`) || Host(`${TRAEFIK_DOMAIN}`) + HostRegexp(`^.+\.${TRAEFIK_DOMAIN}$$`) || Host(`${TRAEFIK_DOMAIN}`) - traefik.http.services.${ROLL_ENV_NAME}-varnish.loadbalancer.server.port=80 - traefik.docker.network=${ROLL_ENV_NAME}_default diff --git a/environments/magento2/magento2.base.yml b/environments/magento2/magento2.base.yml index 01cbdc2..89b86f3 100644 --- a/environments/magento2/magento2.base.yml +++ b/environments/magento2/magento2.base.yml @@ -11,7 +11,7 @@ services: - traefik.http.routers.${ROLL_ENV_NAME}-livereload.tls=true - traefik.http.routers.${ROLL_ENV_NAME}-livereload.priority=3 - traefik.http.routers.${ROLL_ENV_NAME}-livereload.rule= - (HostRegexp(`{subdomain:.+}.${TRAEFIK_DOMAIN}`) || Host(`${TRAEFIK_DOMAIN}`)) + (HostRegexp(`^.+\.${TRAEFIK_DOMAIN}$$`) || Host(`${TRAEFIK_DOMAIN}`)) && (Path(`/livereload.js`) || Path(`/livereload`)) - traefik.http.routers.${ROLL_ENV_NAME}-livereload.service=${ROLL_ENV_NAME}-livereload - traefik.http.services.${ROLL_ENV_NAME}-livereload.loadbalancer.server.port=35729 From 70d04b9d0930d6f7f05b57b6b94bcb791714b68a Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Mon, 19 Jan 2026 12:11:44 +0100 Subject: [PATCH 64/69] add: enhance Magento multi-store support with extended docs, `multistore` usage details, and example setups --- commands/magento2/usage.help | 3 +- docs/configuration/multipledomains.md | 4 + docs/environments/magento2.md | 111 ++++++++++++++++++++++++++ 3 files changed, 117 insertions(+), 1 deletion(-) diff --git a/commands/magento2/usage.help b/commands/magento2/usage.help index 508f1de..f89966f 100755 --- a/commands/magento2/usage.help +++ b/commands/magento2/usage.help @@ -10,7 +10,8 @@ ENV_TYPE_USAGE=$(cat <\033[0m Manage multi-store configuration (init, refresh, list)\n EOF ) diff --git a/docs/configuration/multipledomains.md b/docs/configuration/multipledomains.md index d752433..a6449b0 100644 --- a/docs/configuration/multipledomains.md +++ b/docs/configuration/multipledomains.md @@ -2,6 +2,10 @@ If you need multiple domains configured for your project, RollDev will now automatically route all sub-domains of the configured `TRAEFIK_DOMAIN` (as given when running `env-init`) to the Varnish/Nginx containers provided there is not a more specific rule such as for example `rabbitmq.exampleproject.com` which routes to the `rabbitmq` service for the project. +:::{tip} +**Magento 2 Users:** For an easier multi-store setup, use the `roll multistore` command which automates the configuration below. See the [Magento 2 Multi-Store Configuration](../environments/magento2.md#multi-store-configuration) section for details. +::: + Multiple top-level domains may also be setup by following the instructions below: 1. Sign certificates for your new domains: diff --git a/docs/environments/magento2.md b/docs/environments/magento2.md index 27fe8f6..6042b2b 100644 --- a/docs/environments/magento2.md +++ b/docs/environments/magento2.md @@ -291,3 +291,114 @@ The below example demonstrates the from-scratch setup of the Magento 2 applicati :::{note} To completely destroy the ``exampleproject`` environment we just created, run ``roll env down -v`` to tear down the project's Docker containers, volumes, etc. ::: + +--- + +## Multi-Store Configuration + +RollDev provides the `multistore` command to easily manage Magento multi-store setups with multiple domains. + +### Quick Setup + +1. Create a configuration file at `.roll/stores.json`: + + ```json + { + "stores": { + "store-nl.test": "store_nl", + "store-be.test": "store_be", + "store-de.test": "store_de" + }, + "run_type": "store" + } + ``` + +2. Initialize the multi-store configuration: + + ```bash + roll multistore init + ``` + +3. Restart the environment: + + ```bash + roll env up + ``` + +### Configuration Options + +The `.roll/stores.json` file supports the following options: + +| Option | Description | +|--------|-------------| +| `stores` | Object mapping hostnames to Magento store/website codes | +| `run_type` | Either `"store"` or `"website"` (default: `"store"`) | + +Use an empty string `""` for the store code to use the default store: + +```json +{ + "stores": { + "main-store.test": "", + "secondary.test": "secondary_store" + }, + "run_type": "store" +} +``` + +### Commands + +| Command | Description | +|---------|-------------| +| `roll multistore init` | Generate configs and sign SSL certificates for all domains | +| `roll multistore refresh` | Regenerate configs without re-signing certificates | +| `roll multistore list` | Show current store configuration and status | + +### Generated Files + +The `multistore` command automatically generates: + +- `.roll/roll-env.yml` - Traefik routing rules, nginx volume mounts, and extra_hosts +- `.roll/nginx/stores.map` - Nginx hostname-to-store-code mapping + +### Updating Stores + +When you need to add or modify stores: + +1. Edit `.roll/stores.json` +2. Run `roll multistore refresh` +3. Restart nginx: `roll env restart nginx` + +### Example: Complete Multi-Store Setup + +```bash +# Create stores.json +cat > .roll/stores.json << 'EOF' +{ + "stores": { + "mystore-nl.test": "nl_store", + "mystore-be.test": "be_store", + "mystore-de.test": "de_store" + }, + "run_type": "store" +} +EOF + +# Initialize (signs certificates and generates config) +roll multistore init + +# Start environment +roll env up + +# Verify configuration +roll multistore list +``` + +### Troubleshooting + +If you encounter routing issues after adding new domains: + +1. Verify certificates exist: `ls ~/.roll/ssl/certs/` +2. Check traefik config: `roll env config | grep -A5 traefik` +3. Restart traefik to reload certificates: `roll svc up traefik` +4. Restart the environment: `roll env down && roll env up` From dc17d465ab510d47fc9509c6564ac8105ef658db Mon Sep 17 00:00:00 2001 From: github-actions Date: Mon, 19 Jan 2026 11:12:50 +0000 Subject: [PATCH 65/69] Tagged 0.6.0 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index cb0c939..a918a2a 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.5.2 +0.6.0 From 3f7c5c88238d4f91f08077a3b407174143ffbfd0 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Mon, 19 Jan 2026 12:25:36 +0100 Subject: [PATCH 66/69] update: improve logging in restore/restore-full commands for better process visibility - Added informative logs for key steps, including restoring volumes, configurations, and source code. - Ensured fallback behavior for `logVerbose` to prevent errors when verbosity is disabled. --- commands/restore-full.cmd | 9 +++++++-- commands/restore.cmd | 4 +++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd index 6c62dd7..3147aa2 100644 --- a/commands/restore-full.cmd +++ b/commands/restore-full.cmd @@ -197,7 +197,7 @@ function logMessage() { } function logVerbose() { - [[ $RESTORE_VERBOSE -eq 1 ]] && logMessage INFO "$@" + [[ $RESTORE_VERBOSE -eq 1 ]] && logMessage INFO "$@" || true } function performLegacyMigration() { @@ -891,8 +891,8 @@ function restoreSourceCode() { } function performRestore() { + logMessage INFO "Starting full restore from $(basename "$RESTORE_BACKUP_FILE")..." - logVerbose "Starting full restore process" logVerbose "Backup file: $RESTORE_BACKUP_FILE" logVerbose "Output directory: $RESTORE_OUTPUT_DIR" logVerbose "Environment path: $ROLL_ENV_PATH" @@ -911,8 +911,10 @@ function performRestore() { logVerbose "Checking backup file type..." if [[ -f "$RESTORE_BACKUP_FILE" ]]; then + logMessage INFO "Extracting backup archive..." logVerbose "Backup is a file, extracting..." backup_path=$(extractBackupArchiveFile "$RESTORE_BACKUP_FILE") + logMessage SUCCESS "Archive extracted" elif [[ -d "$RESTORE_BACKUP_FILE" ]]; then logVerbose "Backup is a directory" backup_path="$RESTORE_BACKUP_FILE" @@ -1015,12 +1017,14 @@ function performRestore() { # Restore source code if available if [[ $source_exists -eq 1 ]]; then current_step=$((current_step + 1)) + logMessage INFO "Restoring source code..." restoreSourceCode "$backup_path" "$ROLL_ENV_PATH" $current_step $total_steps fi # Restore configurations if [[ $RESTORE_CONFIG -eq 1 ]]; then current_step=$((current_step + 1)) + logMessage INFO "Restoring configuration files..." restoreConfigurations "$backup_path" $current_step $total_steps if [[ $ROLL_ENV_LOADED -eq 0 ]]; then loadEnvConfig "$ROLL_ENV_PATH" || exit 1 @@ -1031,6 +1035,7 @@ function performRestore() { # Restore volumes for service in "${services_to_restore[@]}"; do current_step=$((current_step + 1)) + logMessage INFO "Restoring ${service} volume..." restoreVolume "$service" "$backup_path" $current_step $total_steps done diff --git a/commands/restore.cmd b/commands/restore.cmd index 2693899..5e96e9e 100755 --- a/commands/restore.cmd +++ b/commands/restore.cmd @@ -182,7 +182,7 @@ function logMessage() { } function logVerbose() { - [[ $RESTORE_VERBOSE -eq 1 ]] && logMessage INFO "$@" + [[ $RESTORE_VERBOSE -eq 1 ]] && logMessage INFO "$@" || true } function performLegacyMigration() { @@ -857,12 +857,14 @@ function performRestore() { # Restore volumes for service in "${services_to_restore[@]}"; do current_step=$((current_step + 1)) + logMessage INFO "Restoring ${service} volume..." restoreVolume "$service" "$backup_path" $current_step $total_steps done # Restore configurations if [[ $RESTORE_CONFIG -eq 1 ]]; then current_step=$((current_step + 1)) + logMessage INFO "Restoring configuration files..." restoreConfigurations "$backup_path" $current_step $total_steps fi From 4bae5e7056550cebc803e306ff06549549c4abcb Mon Sep 17 00:00:00 2001 From: github-actions Date: Mon, 19 Jan 2026 11:36:40 +0000 Subject: [PATCH 67/69] Tagged 0.6.1 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index a918a2a..ee6cdce 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.6.0 +0.6.1 From ba026da7b44e78bd28390432558b4567f0279038 Mon Sep 17 00:00:00 2001 From: Rick Bouma Date: Sun, 25 Jan 2026 16:14:28 +0100 Subject: [PATCH 68/69] update: simplify archive extraction for cross-platform compatibility in restore commands - Replaced decompression commands with direct file reading for improved BSD/GNU tar support. - Adjusted handling of encrypted archives to decrypt into temporary files before extraction. - Enhanced logging for archive extraction steps. --- commands/restore-full.cmd | 37 +++++++++++++++++++++++++------------ commands/restore.cmd | 14 +++++--------- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/commands/restore-full.cmd b/commands/restore-full.cmd index 3147aa2..5c833a5 100644 --- a/commands/restore-full.cmd +++ b/commands/restore-full.cmd @@ -860,27 +860,40 @@ function restoreSourceCode() { mkdir -p "$target_dir" - local decompress_cmd="cat" - case "$src_file" in - *.tar.gz*) decompress_cmd="gzip -dc" ;; - *.tar.xz*) decompress_cmd="xz -dc" ;; - *.tar.lz4*) decompress_cmd="lz4 -dc" ;; - esac - + # Use direct file reading instead of piping for cross-platform compatibility + # BSD tar (macOS) doesn't reliably handle piped stdin with -C flag + # Both BSD and GNU tar auto-detect compression format with -xf if [[ $is_encrypted == true ]]; then if [[ -z "$RESTORE_DECRYPT" ]]; then logMessage ERROR "Encrypted source archive found but no decryption password provided" return 1 fi - if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$src_file" | $decompress_cmd | tar -xf - -C "$target_dir" 2>/dev/null; then - logMessage SUCCESS "Source code restored" - return 0 + # Decrypt to temp file first, then extract directly + local temp_file="$backup_path/source_decrypted.tar" + case "$src_file" in + *.tar.gz.gpg) temp_file="$backup_path/source_decrypted.tar.gz" ;; + *.tar.xz.gpg) temp_file="$backup_path/source_decrypted.tar.xz" ;; + *.tar.lz4.gpg) temp_file="$backup_path/source_decrypted.tar.lz4" ;; + esac + + if echo "$RESTORE_DECRYPT" | gpg --batch --yes --quiet --passphrase-fd 0 --decrypt "$src_file" > "$temp_file"; then + if tar -xf "$temp_file" -C "$target_dir"; then + rm -f "$temp_file" + logMessage SUCCESS "Source code restored" + return 0 + else + rm -f "$temp_file" + logMessage ERROR "Failed to extract source code" + return 1 + fi else - logMessage ERROR "Failed to restore source code" + rm -f "$temp_file" 2>/dev/null + logMessage ERROR "Failed to decrypt source archive" return 1 fi else - if $decompress_cmd "$src_file" | tar -xf - -C "$target_dir" 2>/dev/null; then + # Direct extraction - works on both BSD tar (macOS) and GNU tar (Linux) + if tar -xf "$src_file" -C "$target_dir"; then logMessage SUCCESS "Source code restored" return 0 else diff --git a/commands/restore.cmd b/commands/restore.cmd index 5e96e9e..90272a2 100755 --- a/commands/restore.cmd +++ b/commands/restore.cmd @@ -299,16 +299,12 @@ function extractBackupArchive() { mkdir -p "$extract_dir" - # Determine decompression command based on file extension - local decompress_cmd="cat" - case "$archive_file" in - *.tar.gz) decompress_cmd="gzip -d" ;; - *.tar.xz) decompress_cmd="xz -d" ;; - *.tar.lz4) decompress_cmd="lz4 -d" ;; - esac - logVerbose "Using decompression command: $decompress_cmd" + # Use direct file reading for cross-platform compatibility + # BSD tar (macOS) doesn't reliably handle piped stdin with -C flag + # Both BSD and GNU tar auto-detect compression format with -xf + logVerbose "Extracting archive using direct file reading" - if $decompress_cmd < "$archive_file" | tar -xf - -C "$extract_dir" --strip-components=1 2>/dev/null; then + if tar -xf "$archive_file" -C "$extract_dir" --strip-components=1; then logVerbose "Successfully extracted archive to: $extract_dir" echo "$extract_dir" return 0 From 8d667f4498515702b73e1416ee172978f4a08dff Mon Sep 17 00:00:00 2001 From: github-actions Date: Sun, 25 Jan 2026 15:15:49 +0000 Subject: [PATCH 69/69] Tagged 0.6.2 --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index ee6cdce..b616048 100644 --- a/version +++ b/version @@ -1 +1 @@ -0.6.1 +0.6.2