Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Log diagnostics & errors to stderr #450

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 6 additions & 5 deletions app/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@

import os
from abc import ABC, abstractmethod
from .deploy import get_stack_status
from decouple import config
from .deploy import get_stack_status
from .util import _log


def get_stack(config, stack):
Expand Down Expand Up @@ -49,7 +50,7 @@ def ensure_available(self):
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
if url_from_environment:
if self.config.verbose:
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
_log(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
self.url = url_from_environment
else:
# Otherwise we expect to use the local package-registry stack
Expand All @@ -58,13 +59,13 @@ def ensure_available(self):
if registry_running:
# If it is available, get its mapped port and construct its URL
if self.config.debug:
print("Found local package registry stack is up")
_log("Found local package registry stack is up")
# TODO: get url from deploy-stack
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
else:
# If not, print a message about how to start it and return fail to the caller
print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
_log("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
_log("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
return False
return True

Expand Down
32 changes: 16 additions & 16 deletions app/build_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
import click
import importlib.resources
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config
from .util import _log, include_exclude_check, get_parsed_stack_config
from .base import get_npm_registry_url

# TODO: find a place for this
Expand Down Expand Up @@ -56,15 +56,15 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):

if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
_log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))

if not quiet:
print(f'Dev Root is: {dev_root_path}')
_log(f'Dev Root is: {dev_root_path}')

if not os.path.isdir(dev_root_path):
print('Dev root directory doesn\'t exist, creating')
_log('Dev root directory doesn\'t exist, creating')

# See: https://stackoverflow.com/a/20885799/1701505
from . import data
Expand All @@ -79,9 +79,9 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
containers_in_scope = all_containers

if verbose:
print(f'Containers: {containers_in_scope}')
_log(f'Containers: {containers_in_scope}')
if stack:
print(f"Stack: {stack}")
_log(f"Stack: {stack}")

# TODO: make this configurable
container_build_env = {
Expand All @@ -102,16 +102,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):

def process_container(container):
if not quiet:
print(f"Building: {container}")
_log(f"Building: {container}")
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
build_script_filename = os.path.join(build_dir, "build.sh")
if verbose:
print(f"Build script filename: {build_script_filename}")
_log(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename):
build_command = build_script_filename
else:
if verbose:
print(f"No script file found: {build_script_filename}, using default build script")
_log(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
Expand All @@ -120,23 +120,23 @@ def process_container(container):
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
if not dry_run:
if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}")
_log(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")
_log(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0:
print(f"Error running build for {container}")
_log(f"Error running build for {container}")
if not continue_on_error:
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
_log("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Container Build Error, continuing because --continue-on-error is set")
_log("****** Container Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
_log("Skipped")

for container in containers_in_scope:
if include_exclude_check(container, include, exclude):
process_container(container)
else:
if verbose:
print(f"Excluding: {container}")
_log(f"Excluding: {container}")
56 changes: 28 additions & 28 deletions app/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from python_on_whales import DockerClient, DockerException
import click
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config, global_options2
from .util import _log, include_exclude_check, get_parsed_stack_config, global_options2
from .deployment_create import create as deployment_create
from .deployment_create import init as deployment_init

Expand Down Expand Up @@ -69,7 +69,7 @@ def up_operation(ctx, services_list):
for attr, value in container_exec_env.items():
os.environ[attr] = value
if global_context.verbose:
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
_log(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command)
deploy_context.docker.compose.up(detach=True, services=services_list)
Expand All @@ -82,7 +82,7 @@ def down_operation(ctx, delete_volumes, extra_args_list):
global_context = ctx.parent.parent.obj
if not global_context.dry_run:
if global_context.verbose:
print("Running compose down")
_log("Running compose down")
timeout_arg = None
if extra_args_list:
timeout_arg = extra_args_list[0]
Expand All @@ -94,7 +94,7 @@ def ps_operation(ctx):
global_context = ctx.parent.parent.obj
if not global_context.dry_run:
if global_context.verbose:
print("Running compose ps")
_log("Running compose ps")
container_list = ctx.obj.docker.compose.ps()
if len(container_list) > 0:
print("Running containers:")
Expand All @@ -120,12 +120,12 @@ def port_operation(ctx, extra_args):
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: port <service> <exposed-port>")
_log("Usage: port <service> <exposed-port>")
sys.exit(1)
service_name = extra_args_list[0]
exposed_port = extra_args_list[1]
if global_context.verbose:
print(f"Running compose port {service_name} {exposed_port}")
_log(f"Running compose port {service_name} {exposed_port}")
mapped_port_data = ctx.obj.docker.compose.port(service_name, exposed_port)
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")

Expand All @@ -135,25 +135,25 @@ def exec_operation(ctx, extra_args):
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: exec <service> <cmd>")
_log("Usage: exec <service> <cmd>")
sys.exit(1)
service_name = extra_args_list[0]
command_to_exec = ["sh", "-c"] + extra_args_list[1:]
container_exec_env = _make_runtime_env(global_context)
if global_context.verbose:
print(f"Running compose exec {service_name} {command_to_exec}")
_log(f"Running compose exec {service_name} {command_to_exec}")
try:
ctx.obj.docker.compose.execute(service_name, command_to_exec, envs=container_exec_env)
except DockerException as error:
print(f"container command returned error exit status")
_log(f"container command returned error exit status")


def logs_operation(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if global_context.verbose:
print("Running compose logs")
_log("Running compose logs")
logs_output = ctx.obj.docker.compose.logs(services=extra_args_list if extra_args_list is not None else [])
print(logs_output)

Expand Down Expand Up @@ -211,15 +211,15 @@ def get_stack_status(ctx, stack):
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above
if ctx.verbose:
print("Running compose ps")
_log("Running compose ps")
container_list = docker.compose.ps()
if len(container_list) > 0:
if ctx.debug:
print(f"Container list from compose ps: {container_list}")
_log(f"Container list from compose ps: {container_list}")
return True
else:
if ctx.debug:
print("No containers found from compose ps")
_log("No containers found from compose ps")
False


Expand All @@ -237,7 +237,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):

if ctx.local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
_log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))

Expand All @@ -256,11 +256,11 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
path = os.path.realpath(sys.argv[0])
unique_cluster_descriptor = f"{path},{stack},{include},{exclude}"
if ctx.debug:
print(f"pre-hash descriptor: {unique_cluster_descriptor}")
_log(f"pre-hash descriptor: {unique_cluster_descriptor}")
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
cluster = f"laconic-{hash}"
if ctx.verbose:
print(f"Using cluster name: {cluster}")
_log(f"Using cluster name: {cluster}")

# See: https://stackoverflow.com/a/20885799/1701505
from . import data
Expand All @@ -281,7 +281,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
pods_in_scope = _convert_to_new_format(pods_in_scope)

if ctx.verbose:
print(f"Pods: {pods_in_scope}")
_log(f"Pods: {pods_in_scope}")

# Construct a docker compose command suitable for our purpose

Expand All @@ -307,10 +307,10 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
compose_files.append(compose_file_name)
else:
if ctx.verbose:
print(f"Excluding: {pod_name}")
_log(f"Excluding: {pod_name}")

if ctx.verbose:
print(f"files: {compose_files}")
_log(f"files: {compose_files}")

return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)

Expand Down Expand Up @@ -342,7 +342,7 @@ def _convert_to_new_format(old_pod_array):

def _run_command(ctx, cluster_name, command):
if ctx.verbose:
print(f"Running command: {command}")
_log(f"Running command: {command}")
command_dir = os.path.dirname(command)
command_file = os.path.join(".", os.path.basename(command))
command_env = os.environ.copy()
Expand All @@ -351,7 +351,7 @@ def _run_command(ctx, cluster_name, command):
command_env["CERC_SCRIPT_DEBUG"] = "true"
command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir)
if command_result.returncode != 0:
print(f"FATAL Error running command: {command}")
_log(f"FATAL Error running command: {command}")
sys.exit(1)


Expand All @@ -368,7 +368,7 @@ class ConfigDirective:
for container in cluster_config:
container_config = cluster_config[container]
if ctx.verbose:
print(f"{container} config: {container_config}")
_log(f"{container} config: {container_config}")
for directive in container_config:
pd = ConfigDirective(
container_config[directive].split(".")[0],
Expand All @@ -377,7 +377,7 @@ class ConfigDirective:
directive
)
if ctx.verbose:
print(f"Setting {pd.destination_container}.{pd.destination_variable}"
_log(f"Setting {pd.destination_container}.{pd.destination_variable}"
f" = {pd.source_container}.{pd.source_variable}")
# TODO: add a timeout
waiting_for_data = True
Expand All @@ -394,19 +394,19 @@ class ConfigDirective:
envs=container_exec_env)
except DockerException as error:
if ctx.debug:
print(f"Docker exception reading config source: {error}")
_log(f"Docker exception reading config source: {error}")
# If the script executed failed for some reason, we get:
# "It returned with code 1"
if "It returned with code 1" in str(error):
if ctx.verbose:
print("Config export script returned an error, re-trying")
_log("Config export script returned an error, re-trying")
# If the script failed to execute (e.g. the file is not there) then we get:
# "It returned with code 2"
if "It returned with code 2" in str(error):
print(f"Fatal error reading config source: {error}")
_log(f"Fatal error reading config source: {error}")
if source_value:
if ctx.debug:
print(f"fetched source value: {source_value}")
_log(f"fetched source value: {source_value}")
destination_output = docker.compose.execute(pd.destination_container,
["sh", "-c",
f"sh /scripts/import-{pd.destination_variable}.sh"
Expand All @@ -415,7 +415,7 @@ class ConfigDirective:
envs=container_exec_env)
waiting_for_data = False
if ctx.debug:
print(f"destination output: {destination_output}")
_log(f"destination output: {destination_output}")


command.add_command(deployment_init)
Expand Down
Loading