diff --git a/Makefile b/Makefile index fd07fa50130..1754cf7e7e8 100644 --- a/Makefile +++ b/Makefile @@ -41,7 +41,7 @@ trace-documentation: ### ### Workbench ### -workbench-ci: workbench-ci-test ci-test-auto ci-test-autonix ci-test-autonomadpodman +workbench-ci: workbench-ci-test ci-test-auto ci-test-autonix CI_TARGETS := hlint workbench-ci haddock-hoogle ci: ci-report ci-targets ci-report: diff --git a/lib.mk b/lib.mk index 9241a42a92e..f0fe8572888 100644 --- a/lib.mk +++ b/lib.mk @@ -42,8 +42,6 @@ $$(foreach prof,$(1),$$(eval $$(call proftgt,$$(prof)-nix, $$(prof) $$(foreach prof,$(1),$$(eval $$(call proftgt,$$(prof)-autonix, $$(prof), true,false, true,false, false, supervisor))) $$(foreach prof,$(1),$$(eval $$(call proftgt,$$(prof)-nomadexec, $$(prof), true,false,false,false, false, nomadexec))) $$(foreach prof,$(1),$$(eval $$(call proftgt,$$(prof)-nomadexec-auto, $$(prof), true,false, true,false, false, nomadexec))) -$$(foreach prof,$(1),$$(eval $$(call proftgt,$$(prof)-nomadpodman, $$(prof), true,false,false,false, false, nomadpodman))) -$$(foreach prof,$(1),$$(eval $$(call proftgt,$$(prof)-nomadpodman-auto, $$(prof), true,false, true,false, false, nomadpodman))) endef define define_profile_targets_nomadcloud diff --git a/nix/pkgs.nix b/nix/pkgs.nix index 7605f1312b9..c0e16143cff 100644 --- a/nix/pkgs.nix +++ b/nix/pkgs.nix @@ -23,8 +23,6 @@ let import ./workbench/backend/nomad/cloud.nix params; nomadexec = params: import ./workbench/backend/nomad/exec.nix params; - nomadpodman = params: - import ./workbench/backend/nomad/podman.nix params; supervisor = params: import ./workbench/backend/supervisor.nix params; } diff --git a/nix/workbench/backend/nomad-job.nix b/nix/workbench/backend/nomad-job.nix index 11ff8b1684d..7d2a0d57968 100644 --- a/nix/workbench/backend/nomad-job.nix +++ b/nix/workbench/backend/nomad-job.nix @@ -8,7 +8,6 @@ , stateDir , profileData , containerSpecs -, execTaskDriver , generatorTaskName , oneTracerPerNode ? false , withSsh ? false @@ -21,8 +20,8 @@ let # Nomad creates a working directory for each allocation on a client. This # directory can be found in the Nomad data_dir at ./alloc/«alloc_id». The # allocation working directory is where Nomad creates task directories and - # directories shared between tasks, write logs for tasks, and downloads - # artifacts or templates. + # directories shared between tasks, write logs for tasks, downloads artifacts + # and renders templates. # https://developer.hashicorp.com/nomad/docs/concepts/filesystem # # For example: @@ -36,34 +35,26 @@ let # Templates are rendered into the task working directory. Drivers without # filesystem isolation (such as raw_exec) or drivers that build a chroot in # the task working directory (such as exec) can have templates rendered to - # arbitrary paths in the task. But task drivers such as docker can only access - # templates rendered into the NOMAD_ALLOC_DIR, NOMAD_TASK_DIR, or - # NOMAD_SECRETS_DIR. To work around this restriction, you can create a mount - # from the template destination to another location in the task. + # arbitrary paths in the task. ## - https://developer.hashicorp.com/nomad/docs/job-specification/template#template-destinations ## - https://developer.hashicorp.com/nomad/docs/runtime/environment#task-directories ## - https://developer.hashicorp.com/nomad/docs/concepts/filesystem - # Task's filesystem / working directory (maybe container or chroot) defaults: + # Workbench's ${stateDir} will be created/populated inside NOMAD_TASK_DIR. # - # When using the isolated fork task driver ("exec") - ## Default values below are stored in the job's "meta" stanza to be able to - ## overrided them with 'jq' from a workbench shell. These values in "meta" - ## are used to programatically create a "template" with "env = true;" so they - ## are automagically reachable as envars inside the Task's entrypoint and - ## 'supervisord' programs. + ## Some workbench default values are stored in the job's "meta" stanza to be + ## able to override them with 'jq' from a workbench shell. These values in + ## "meta" are used to programmatically create a "template" with "env = true;" + ## so they are automagically reachable as envars inside the Task's entrypoint + ## and/or 'supervisord' programs. ## Values go: Nix (defaults) -> meta -> template -> envars - # - ## See ./oci-images.nix for further details if using the `podman` driver. - ## For the `exec` driver almost everything is here. - # - # A symlink to the supervisord nix-installed inside the OCI image/chroot. - # We need to be able to `nomad exec supervisorctl ...` , for this the path + # Symlink to the supervisord that is nix-installed inside the deployed chroot. + # We need to be able to do `nomad exec supervisorctl ...` , for this the path # of the installed supervisor binaries is needed. task_supervisor_nix = "${stateDir}/supervisor/nix-store"; # Location of the supervisord config file inside the container. - # This file can be mounted as a volume or created as a template. + # This file is created as a template. task_supervisord_conf = "${stateDir}/supervisor/supervisord.conf"; # The URL to the listening inet or socket of the supervisord server: # The problem is that if we use "127.0.0.1:9001" as parameter (without the @@ -111,11 +102,8 @@ let }" \ > "''${NOMAD_TASK_DIR}"/entrypoint.dirs - # Only needed for "exec" ? - if test "''${TASK_DRIVER}" = "exec" - then - cd "''${NOMAD_TASK_DIR}" - fi + # Move to stateDir's parent directory. + cd "''${NOMAD_TASK_DIR}" # Create a symlink to 'supervisor' Nix Store folder so we can call it from # 'ssh' or 'nomad exec' without having it in PATH or knowing the currently @@ -141,7 +129,7 @@ let # About the JSON Job Specification and my odd assumptions: # - # TL;DR; We are using what HashiCorp calls an unespecified format but it's the + # TL;DR; We are using what HashiCorp calls an unspecified format but it's the # same format the SRE team is using. # # At least in Nomad version v1.4.3, the CLI command to submit new jobs @@ -250,7 +238,6 @@ let meta = { # Only top level "KEY=STRING" are allowed, no child objects/attributes! WORKBENCH_STATEDIR = stateDir; - TASK_DRIVER = if execTaskDriver then "exec" else "podman"; SUPERVISORD_LOGLEVEL = task_supervisord_loglevel; ONE_TRACER_PER_NODE = oneTracerPerNode; }; @@ -411,8 +398,8 @@ let # Actually using the interface specified on Nomad Client startup that # for local runs it's forced to "lo" and whatever is automatically # fingerprinted or provided for cloud runs. - # TODO: Use "bridge" mode for podman, this will allow to run isolated - # local cluster with no addresses or ports clashing. + # TODO: Use "bridge" mode for local ?? this will allow to run isolated + # local cluster with no addresses or ports clashing ?? mode = "host"; # Specifies a TCP/UDP port allocation and can be used to specify both # dynamic ports and reserved ports. @@ -453,18 +440,14 @@ let # jobs like load balancers. static = ''${toString portNum}''; - # TODO: When switching the network mode to "bridge" for - # podman use "Mapped Ports" to be able to run isolated - # local cluster with no addresses or ports clashing. + # The "exec" driver does not accept "Mapped Ports". + # to = ''${toString portNum}''; # Applicable when using "bridge" mode to configure port # to map to inside the task's network namespace. Omitting # this field or setting it to -1 sets the mapped port # equal to the dynamic port allocated by the scheduler. # The NOMAD_PORT_