diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index eba830b74..1f6890f71 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,23 @@ + + + ## Proposed Commit Message -## Checklist - -- [ ] My code follows the process laid out in [the documentation](https://cloudinit.readthedocs.io/en/latest/development/index.html) -- [ ] I have updated or added any [unit tests](https://cloudinit.readthedocs.io/en/latest/development/testing.html) accordingly -- [ ] I have updated or added any [documentation](https://cloudinit.readthedocs.io/en/latest/development/contribute_docs.html) accordingly ## Merge type diff --git a/.github/workflows/alpine-unittests.yml b/.github/workflows/alpine-unittests.yml new file mode 100644 index 000000000..eac2c7a81 --- /dev/null +++ b/.github/workflows/alpine-unittests.yml @@ -0,0 +1,74 @@ + +name: Alpine Unittests + +on: + pull_request: + branches-ignore: + - 'ubuntu/**' + push: + branches: + - main + +concurrency: + group: 'ci-${{ github.workflow }}-${{ github.ref }}' + cancel-in-progress: true + +defaults: + run: + shell: sh -ex {0} + +jobs: + build: + runs-on: ubuntu-latest + steps: + + - name: "Checkout" + uses: actions/checkout@v4 + with: + # Fetch all tags for tools/read-version + fetch-depth: 0 + + - name: Setup LXD + uses: canonical/setup-lxd@v0.1.1 + with: + channel: latest/candidate + + - name: Create alpine container + # the current shell doesn't have lxd as one of the groups + # so switch groups to run lxd commands + run: lxc launch images:alpine/edge alpine + + - name: Check networking (for debugging) + run: | + lxc exec alpine -- ping -c 1 google.com || true + lxc exec alpine -- ping -c 1 dl-cdn.alpinelinux.org || true + lxc exec alpine -- nslookup www.google.com || true + lxc exec alpine -- ping -c 1 dl-cdn.alpinelinux.org || true + + - name: Install dependencies + run: lxc exec alpine -- apk add py3-tox git tzdata + + - name: Mount source into container directory + run: lxc config device add alpine gitdir disk source=$(pwd) path=/root/cloud-init-ro + + - name: Create a r/w directory to run tests in + # without this, tox fails during package install + run: | + lxc exec alpine -- git config --global --add safe.directory /root/cloud-init-ro/.git + lxc exec alpine -- git clone cloud-init-ro cloud-init-rw + + - name: Set a timezone + # test for regression of GH-5158 + # https://github.com/canonical/cloud-init/issues/5158 + run: lxc exec alpine -- ln -s /usr/share/zoneinfo/Europe/Brussels /etc/localtime + + - name: Set up tox environment + # Setup the environment and then tell pytest to do essentially nothing + run: lxc exec alpine --cwd /root/cloud-init-rw -- tox -e py3 -- --cache-show= + + - name: Stop network + # Take down network interfaces to ensure tests don't use network + run: lxc exec alpine -- ifdown -a + + - name: Run unittests + run: lxc exec alpine --cwd /root/cloud-init-rw -- tox -e py3 diff --git a/.github/workflows/check_format.yml b/.github/workflows/check_format.yml index 70f04afa9..ba44e6658 100644 --- a/.github/workflows/check_format.yml +++ b/.github/workflows/check_format.yml @@ -18,11 +18,8 @@ jobs: fail-fast: false matrix: env: [ruff, mypy, pylint, black, isort] - lint-with: - - {tip-versions: false, os: ubuntu-20.04} - - {tip-versions: true, os: ubuntu-latest} - name: Check ${{ matrix.lint-with.tip-versions && 'tip-' || '' }}${{ matrix.env }} - runs-on: ${{ matrix.lint-with.os }} + name: Check ${{ matrix.env }} + runs-on: ubuntu-20.04 steps: - name: "Checkout #1" uses: actions/checkout@v3.0.0 @@ -41,17 +38,10 @@ jobs: run: python3 --version - name: Test - if: ${{ !matrix.lint-with.tip-versions }} env: # matrix env: not to be confused w/environment variables or testenv TOXENV: ${{ matrix.env }} run: tox - - name: Test (tip versions) - if: matrix.lint-with.tip-versions - continue-on-error: true - env: - TOXENV: tip-${{ matrix.env }} - run: tox schema-format: strategy: fail-fast: false diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index c9bfa15b7..aeae6840a 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -1,16 +1,10 @@ -name: linkcheck in CI - +name: scheduled-linkcheck on: - push: - branches: - - main - pull_request: {} - workflow_dispatch: - inputs: - failOnError: - description: 'Fail job on link check error' - required: false - default: 'false' + schedule: + - cron: '3 14 * * *' +concurrency: + group: 'ci-${{ github.workflow }}-${{ github.ref }}' + cancel-in-progress: true jobs: linkcheck: diff --git a/.github/workflows/scheduled.yml b/.github/workflows/scheduled.yml index ac4af1e7f..aca4f9b7e 100644 --- a/.github/workflows/scheduled.yml +++ b/.github/workflows/scheduled.yml @@ -1,24 +1,18 @@ -name: scheduled +name: scheduled-daily on: schedule: - cron: '3 14 * * *' concurrency: group: 'ci-${{ github.workflow }}-${{ github.ref }}' - cancel-in-progress: true + cancel-in-progress: false jobs: - unittests: - strategy: - matrix: - toxenv: [ hypothesis-slow ] - name: unittest / ${{ matrix.toxenv }} + hypothesis: + name: unittest / hypothesis-slow runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 - with: - # Fetch all tags for tools/read-version - fetch-depth: 0 - name: Install dependencies env: DEBIAN_FRONTEND: noninteractive @@ -29,4 +23,37 @@ jobs: env: PYTEST_ADDOPTS: -v HYPOTHESIS_PROFILE: ci - run: tox -e ${{ matrix.toxenv }} + run: tox -e hypothesis-slow + devel_tests: + name: unittest / 3.13-dev + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Install Python 3.13-dev + uses: actions/setup-python@v4 + with: + python-version: 3.13-dev + check-latest: true + - name: Install tox + run: pip install tox + - name: Run unittest + run: tox -e py3 + format_tip: + strategy: + fail-fast: false + matrix: + env: [tip-ruff, tip-mypy, tip-pylint, tip-black, tip-isort] + name: format-tip + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Install dependencies + run: | + sudo apt-get -qy update + sudo apt-get -qy install tox + - name: Run Linters tip + env: + TOXENV: ${{ matrix.env }} + run: tox diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index ec054f86c..5eda03c8e 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -25,11 +25,6 @@ jobs: continue-on-error: false check-latest: false experimental: false - - python-version: "3.13-dev" - toxenv: py3 - check-latest: true - experimental: true - continue-on-error: true name: unittest / ${{ matrix.toxenv }} / python ${{matrix.python-version}} runs-on: ubuntu-20.04 continue-on-error: ${{ matrix.experimental }} diff --git a/.gitignore b/.gitignore index 5abb9ee63..8a85858a4 100644 --- a/.gitignore +++ b/.gitignore @@ -31,7 +31,7 @@ cloud-init_*.buildinfo cloud-init_*.changes cloud-init_*.deb cloud-init_*.dsc -cloud-init_*.orig.tar.gz +cloud-init_*.tar.gz cloud-init_*.tar.xz cloud-init_*.upload diff --git a/.pc/applied-patches b/.pc/applied-patches index 5aa214b34..7dffcbd01 100644 --- a/.pc/applied-patches +++ b/.pc/applied-patches @@ -5,15 +5,10 @@ netplan99-cannot-use-default.patch retain-old-groups.patch keep-dhclient-as-priority-client.patch revert-551f560d-cloud-config-after-snap-seeding.patch -do-not-block-user-login.patch status-do-not-remove-duplicated-data.patch retain-apt-pre-deb822.patch status-retain-recoverable-error-exit-code.patch retain-ec2-default-net-update-events.patch -cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without cli-retain-file-argument-as-main-cmd-arg.patch -cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent -cpick-417ee551-fix-ec2-Ensure-metadata-exists-before-configuring-PBR -cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321 -cpick-74dc7cce-test-Fix-failing-test_ec2.py-test-5324 -cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361 +drop-unsupported-systemd-condition-environment.patch +deprecation-version-boundary.patch diff --git a/.pc/cli-retain-file-argument-as-main-cmd-arg.patch/cloudinit/cmd/main.py b/.pc/cli-retain-file-argument-as-main-cmd-arg.patch/cloudinit/cmd/main.py index e077a7fc6..4a1c8b2e2 100644 --- a/.pc/cli-retain-file-argument-as-main-cmd-arg.patch/cloudinit/cmd/main.py +++ b/.pc/cli-retain-file-argument-as-main-cmd-arg.patch/cloudinit/cmd/main.py @@ -16,10 +16,10 @@ import json import os import sys -import time import traceback import logging -from typing import Tuple +import yaml +from typing import Tuple, Callable from cloudinit import netinfo from cloudinit import signal_handler @@ -35,16 +35,8 @@ from cloudinit.config import cc_set_hostname from cloudinit.config.modules import Modules from cloudinit.config.schema import validate_cloudconfig_schema -from cloudinit.log import ( - LogExporter, - setup_basic_logging, - setup_logging, - reset_logging, - configure_root_logger, - DEPRECATED, -) +from cloudinit import log from cloudinit.reporting import events -from cloudinit.safeyaml import load from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG # Welcome message template @@ -64,6 +56,14 @@ "once": PER_ONCE, } +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +STAGE_NAME = { + "init-local": "Local Stage", + "init": "Network Stage", + "modules-config": "Config Stage", + "modules-final": "Final Stage", +} + LOG = logging.getLogger(__name__) @@ -102,6 +102,20 @@ def welcome_format(action): ) +def close_stdin(logger: Callable[[str], None] = LOG.debug): + """ + reopen stdin as /dev/null to ensure no side effects + + logger: a function for logging messages + """ + if not os.isatty(sys.stdin.fileno()): + logger("Closing stdin") + with open(os.devnull) as fp: + os.dup2(fp.fileno(), sys.stdin.fileno()) + else: + logger("Not closing stdin, stdin is a tty.") + + def extract_fns(args): # Files are already opened so lets just pass that along # since it would of broke if it couldn't have @@ -222,17 +236,12 @@ def attempt_cmdline_url(path, network=True, cmdline=None) -> Tuple[int, str]: is_cloud_cfg = False if is_cloud_cfg: if cmdline_name == "url": - return ( - DEPRECATED, - str( - util.deprecate( - deprecated="The kernel command line key `url`", - deprecated_version="22.3", - extra_message=" Please use `cloud-config-url` " - "kernel command line parameter instead", - return_log=True, - ), - ), + return util.deprecate( + deprecated="The kernel command line key `url`", + deprecated_version="22.3", + extra_message=" Please use `cloud-config-url` " + "kernel command line parameter instead", + skip_log=True, ) else: if cmdline_name == "cloud-config-url": @@ -335,9 +344,8 @@ def main_init(name, args): outfmt = None errfmt = None try: - early_logs.append((logging.DEBUG, "Closing stdin.")) - util.close_stdin() - (outfmt, errfmt) = util.fixup_output(init.cfg, name) + close_stdin(lambda msg: early_logs.append((logging.DEBUG, msg))) + outfmt, errfmt = util.fixup_output(init.cfg, name) except Exception: msg = "Failed to setup output redirection!" util.logexc(LOG, msg) @@ -348,8 +356,8 @@ def main_init(name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(init.cfg) + log.reset_logging() + log.setup_logging(init.cfg) apply_reporting_cfg(init.cfg) # Any log usage prior to setup_logging above did not have local user log @@ -488,7 +496,7 @@ def main_init(name, args): cloud_cfg_path = init.paths.get_ipath_cur("cloud_config") if os.path.exists(cloud_cfg_path) and os.stat(cloud_cfg_path).st_size != 0: validate_cloudconfig_schema( - config=load(util.load_text_file(cloud_cfg_path)), + config=yaml.safe_load(util.load_text_file(cloud_cfg_path)), strict=False, log_details=False, log_deprecations=True, @@ -510,7 +518,7 @@ def main_init(name, args): (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") - setup_logging(mods.cfg) + log.setup_logging(mods.cfg) # give the activated datasource a chance to adjust init.activate_datasource() @@ -605,8 +613,7 @@ def main_modules(action_name, args): mods = Modules(init, extract_fns(args), reporter=args.reporter) # Stage 4 try: - LOG.debug("Closing stdin") - util.close_stdin() + close_stdin() util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to setup output redirection!") @@ -615,13 +622,20 @@ def main_modules(action_name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(mods.cfg) + log.reset_logging() + log.setup_logging(mods.cfg) apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome welcome(name, msg=w_msg) + if name == "init": + util.deprecate( + deprecated="`--mode init`", + deprecated_version="24.1", + extra_message="Use `cloud-init init` instead.", + ) + # Stage 5 return run_module_section(mods, name, name) @@ -667,8 +681,7 @@ def main_single(name, args): mod_freq = FREQ_SHORT_NAMES.get(mod_freq) # Stage 4 try: - LOG.debug("Closing stdin") - util.close_stdin() + close_stdin() util.fixup_output(mods.cfg, None) except Exception: util.logexc(LOG, "Failed to setup output redirection!") @@ -677,8 +690,8 @@ def main_single(name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(mods.cfg) + log.reset_logging() + log.setup_logging(mods.cfg) apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome @@ -697,12 +710,10 @@ def main_single(name, args): return 0 -def status_wrapper(name, args, data_d=None, link_d=None): - if data_d is None: - paths = read_cfg_paths() - data_d = paths.get_cpath("data") - if link_d is None: - link_d = os.path.normpath("/run/cloud-init") +def status_wrapper(name, args): + paths = read_cfg_paths() + data_d = paths.get_cpath("data") + link_d = os.path.normpath(paths.run_dir) status_path = os.path.join(data_d, "status.json") status_link = os.path.join(link_d, "status.json") @@ -729,18 +740,26 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ( - "init", - "init-local", - "modules-config", - "modules-final", - ) - if mode not in modes: + if mode not in STAGE_NAME: raise ValueError( "Invalid cloud init mode specified '{0}'".format(mode) ) - status = None + nullstatus = { + "errors": [], + "recoverable_errors": {}, + "start": None, + "finished": None, + } + status = { + "v1": { + "datasource": None, + "init": nullstatus.copy(), + "init-local": nullstatus.copy(), + "modules-config": nullstatus.copy(), + "modules-final": nullstatus.copy(), + } + } if mode == "init-local": for f in (status_link, result_link, status_path, result_path): util.del_file(f) @@ -750,25 +769,22 @@ def status_wrapper(name, args, data_d=None, link_d=None): except Exception: pass - nullstatus = { - "errors": [], - "start": None, - "finished": None, - } - - if status is None: - status = {"v1": {}} - status["v1"]["datasource"] = None - - for m in modes: - if m not in status["v1"]: - status["v1"][m] = nullstatus.copy() + if mode not in status["v1"]: + # this should never happen, but leave it just to be safe + status["v1"][mode] = nullstatus.copy() v1 = status["v1"] v1["stage"] = mode - v1[mode]["start"] = time.time() - v1[mode]["recoverable_errors"] = next( - filter(lambda h: isinstance(h, LogExporter), root_logger.handlers) + if v1[mode]["start"] and not v1[mode]["finished"]: + # This stage was restarted, which isn't expected. + LOG.warning( + "Unexpected start time found for %s. Was this stage restarted?", + STAGE_NAME[mode], + ) + + v1[mode]["start"] = float(util.uptime()) + preexisting_recoverable_errors = next( + filter(lambda h: isinstance(h, log.LogExporter), root_logger.handlers) ).export_logs() # Write status.json prior to running init / module code @@ -786,27 +802,56 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: errors = ret - v1[mode]["errors"] = [str(e) for e in errors] - + v1[mode]["errors"].extend([str(e) for e in errors]) except Exception as e: - util.logexc(LOG, "failed stage %s", mode) + LOG.exception("failed stage %s", mode) print_exc("failed run of stage %s" % mode) - v1[mode]["errors"] = [str(e)] - - v1[mode]["finished"] = time.time() - v1["stage"] = None + v1[mode]["errors"].append(str(e)) + except SystemExit as e: + # All calls to sys.exit() resume running here. + # silence a pylint false positive + # https://github.com/pylint-dev/pylint/issues/9556 + if e.code: # pylint: disable=using-constant-test + # Only log errors when sys.exit() is called with a non-zero + # exit code + LOG.exception("failed stage %s", mode) + print_exc("failed run of stage %s" % mode) + v1[mode]["errors"].append(f"sys.exit({str(e.code)}) called") + finally: + # Before it exits, cloud-init will: + # 1) Write status.json (and result.json if in Final stage). + # 2) Write the final log message containing module run time. + # 3) Flush any queued reporting event handlers. + v1[mode]["finished"] = float(util.uptime()) + v1["stage"] = None + + # merge new recoverable errors into existing recoverable error list + new_recoverable_errors = next( + filter( + lambda h: isinstance(h, log.LogExporter), root_logger.handlers + ) + ).export_logs() + for key in new_recoverable_errors.keys(): + if key in preexisting_recoverable_errors: + v1[mode]["recoverable_errors"][key] = list( + set( + preexisting_recoverable_errors[key] + + new_recoverable_errors[key] + ) + ) + else: + v1[mode]["recoverable_errors"][key] = new_recoverable_errors[ + key + ] - # Write status.json after running init / module code - v1[mode]["recoverable_errors"] = next( - filter(lambda h: isinstance(h, LogExporter), root_logger.handlers) - ).export_logs() - atomic_helper.write_json(status_path, status) + # Write status.json after running init / module code + atomic_helper.write_json(status_path, status) if mode == "modules-final": # write the 'finished' file errors = [] - for m in modes: - if v1[m]["errors"]: + for m in v1.keys(): + if isinstance(v1[m], dict) and v1[m].get("errors"): errors.extend(v1[m].get("errors", [])) atomic_helper.write_json( @@ -856,7 +901,7 @@ def main_features(name, args): def main(sysv_args=None): - configure_root_logger() + log.configure_root_logger() if not sysv_args: sysv_args = sys.argv parser = argparse.ArgumentParser(prog=sysv_args.pop(0)) @@ -918,11 +963,20 @@ def main(sysv_args=None): parser_mod = subparsers.add_parser( "modules", help="Activate modules using a given configuration key." ) + extra_help = util.deprecate( + deprecated="`init`", + deprecated_version="24.1", + extra_message="Use `cloud-init init` instead.", + skip_log=True, + ).message parser_mod.add_argument( "--mode", "-m", action="store", - help="Module configuration name to use (default: %(default)s).", + help=( + f"Module configuration name to use (default: %(default)s)." + f" {extra_help}" + ), default="config", choices=("init", "config", "final"), ) @@ -1032,7 +1086,7 @@ def main(sysv_args=None): handle_collect_logs_args, ) - logs_parser(parser_collect_logs) + logs_parser(parser=parser_collect_logs) parser_collect_logs.set_defaults( action=("collect-logs", handle_collect_logs_args) ) @@ -1080,9 +1134,11 @@ def main(sysv_args=None): # - if --debug is passed, logging.DEBUG # - if --debug is not passed, logging.WARNING if name not in ("init", "modules"): - setup_basic_logging(logging.DEBUG if args.debug else logging.WARNING) + log.setup_basic_logging( + logging.DEBUG if args.debug else logging.WARNING + ) elif args.debug: - setup_basic_logging() + log.setup_basic_logging() # Setup signal handlers before running signal_handler.attach_handlers() @@ -1132,6 +1188,12 @@ def main(sysv_args=None): args=(name, args), ) reporting.flush_events() + + # handle return code for main_modules, as it is not wrapped by + # status_wrapped when mode == init + if "modules" == name and "init" == args.mode: + retval = len(retval) + return retval diff --git a/.pc/cli-retain-file-argument-as-main-cmd-arg.patch/tests/unittests/cmd/test_main.py b/.pc/cli-retain-file-argument-as-main-cmd-arg.patch/tests/unittests/cmd/test_main.py index 2a9e063fe..f9b3faab1 100644 --- a/.pc/cli-retain-file-argument-as-main-cmd-arg.patch/tests/unittests/cmd/test_main.py +++ b/.pc/cli-retain-file-argument-as-main-cmd-arg.patch/tests/unittests/cmd/test_main.py @@ -54,6 +54,18 @@ def setUp(self): self.patchUtils(self.new_root) self.stderr = StringIO() self.patchStdoutAndStderr(stderr=self.stderr) + # Every cc_ module calls get_meta_doc on import. + # This call will fail if filesystem redirection mocks are in place + # and the module hasn't already been imported which can depend + # on test ordering. + self.m_doc = mock.patch( + "cloudinit.config.schema.get_meta_doc", return_value={} + ) + self.m_doc.start() + + def tearDown(self): + self.m_doc.stop() + super().tearDown() def test_main_init_run_net_runs_modules(self): """Modules like write_files are run in 'net' mode.""" @@ -68,7 +80,7 @@ def test_main_init_run_net_runs_modules(self): (_item1, item2) = wrap_and_call( "cloudinit.cmd.main", { - "util.close_stdin": True, + "close_stdin": True, "netinfo.debug_info": "my net debug info", "util.fixup_output": ("outfmt", "errfmt"), }, @@ -137,7 +149,7 @@ def set_hostname(name, cfg, cloud, args): (_item1, item2) = wrap_and_call( "cloudinit.cmd.main", { - "util.close_stdin": True, + "close_stdin": True, "netinfo.debug_info": "my net debug info", "cc_set_hostname.handle": {"side_effect": set_hostname}, "util.fixup_output": ("outfmt", "errfmt"), diff --git a/.pc/cpick-417ee551-fix-ec2-Ensure-metadata-exists-before-configuring-PBR/cloudinit/sources/DataSourceEc2.py b/.pc/cpick-417ee551-fix-ec2-Ensure-metadata-exists-before-configuring-PBR/cloudinit/sources/DataSourceEc2.py deleted file mode 100644 index 45952dda7..000000000 --- a/.pc/cpick-417ee551-fix-ec2-Ensure-metadata-exists-before-configuring-PBR/cloudinit/sources/DataSourceEc2.py +++ /dev/null @@ -1,1205 +0,0 @@ -# Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Juerg Hafliger -# Author: Joshua Harlow -# -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import logging -import os -import time -from typing import Dict, List - -from cloudinit import dmi, net, sources -from cloudinit import url_helper as uhelp -from cloudinit import util, warnings -from cloudinit.distros import Distro -from cloudinit.event import EventScope, EventType -from cloudinit.net import activators -from cloudinit.net.dhcp import NoDHCPLeaseError -from cloudinit.net.ephemeral import EphemeralIPNetwork -from cloudinit.sources.helpers import ec2 - -LOG = logging.getLogger(__name__) - -SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) - -STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") -STRICT_ID_DEFAULT = "warn" - - -class CloudNames: - ALIYUN = "aliyun" - AWS = "aws" - BRIGHTBOX = "brightbox" - ZSTACK = "zstack" - E24CLOUD = "e24cloud" - OUTSCALE = "outscale" - # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', - # then an attempt at the Ec2 Metadata service will be made. - UNKNOWN = "unknown" - # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata - # service available. No attempt at the Ec2 Metadata service will be made. - NO_EC2_METADATA = "no-ec2-metadata" - - -# Drop when LP: #1988157 tag handling is fixed -def skip_404_tag_errors(exception): - return exception.code == 404 and "meta-data/tags/" in exception.url - - -# Cloud platforms that support IMDSv2 style metadata server -IDMSV2_SUPPORTED_CLOUD_PLATFORMS = [CloudNames.AWS, CloudNames.ALIYUN] - -# Only trigger hook-hotplug on NICs with Ec2 drivers. Avoid triggering -# it on docker virtual NICs and the like. LP: #1946003 -_EXTRA_HOTPLUG_UDEV_RULES = """ -ENV{ID_NET_DRIVER}=="vif|ena|ixgbevf", GOTO="cloudinit_hook" -GOTO="cloudinit_end" -""" - - -class DataSourceEc2(sources.DataSource): - dsname = "Ec2" - # Default metadata urls that will be used if none are provided - # They will be checked for 'resolveability' and some of the - # following may be discarded if they do not resolve - metadata_urls = [ - "http://169.254.169.254", - "http://[fd00:ec2::254]", - "http://instance-data.:8773", - ] - - # The minimum supported metadata_version from the ec2 metadata apis - min_metadata_version = "2009-04-04" - - # Priority ordered list of additional metadata versions which will be tried - # for extended metadata content. IPv6 support comes in 2016-09-02. - # Tags support comes in 2021-03-23. - extended_metadata_versions: List[str] = [ - "2021-03-23", - "2018-09-24", - "2016-09-02", - ] - - # Setup read_url parameters per get_url_params. - url_max_wait = 120 - url_timeout = 50 - - _api_token = None # API token for accessing the metadata service - _network_config = sources.UNSET # Used to cache calculated network cfg v1 - - # Whether we want to get network configuration from the metadata service. - perform_dhcp_setup = False - - supported_update_events = { - EventScope.NETWORK: { - EventType.BOOT_NEW_INSTANCE, - EventType.BOOT, - EventType.BOOT_LEGACY, - EventType.HOTPLUG, - } - } - - extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES - - def __init__(self, sys_cfg, distro, paths): - super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) - self.metadata_address = None - - def _unpickle(self, ci_pkl_version: int) -> None: - super()._unpickle(ci_pkl_version) - self.extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES - - def _get_cloud_name(self): - """Return the cloud name as identified during _get_data.""" - return identify_platform() - - def _get_data(self): - strict_mode, _sleep = read_strict_mode( - util.get_cfg_by_path( - self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT - ), - ("warn", None), - ) - - LOG.debug( - "strict_mode: %s, cloud_name=%s cloud_platform=%s", - strict_mode, - self.cloud_name, - self.platform, - ) - if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN: - return False - elif self.cloud_name == CloudNames.NO_EC2_METADATA: - return False - - if self.perform_dhcp_setup: # Setup networking in init-local stage. - if util.is_FreeBSD(): - LOG.debug("FreeBSD doesn't support running dhclient with -sf") - return False - try: - with EphemeralIPNetwork( - self.distro, - self.distro.fallback_interface, - ipv4=True, - ipv6=True, - ) as netw: - state_msg = f" {netw.state_msg}" if netw.state_msg else "" - self._crawled_metadata = util.log_time( - logfunc=LOG.debug, - msg=f"Crawl of metadata service{state_msg}", - func=self.crawl_metadata, - ) - - except NoDHCPLeaseError: - return False - else: - self._crawled_metadata = util.log_time( - logfunc=LOG.debug, - msg="Crawl of metadata service", - func=self.crawl_metadata, - ) - if not self._crawled_metadata: - return False - self.metadata = self._crawled_metadata.get("meta-data", None) - self.userdata_raw = self._crawled_metadata.get("user-data", None) - self.identity = ( - self._crawled_metadata.get("dynamic", {}) - .get("instance-identity", {}) - .get("document", {}) - ) - return True - - def is_classic_instance(self): - """Report if this instance type is Ec2 Classic (non-vpc).""" - if not self.metadata: - # Can return False on inconclusive as we are also called in - # network_config where metadata will be present. - # Secondary call site is in packaging postinst script. - return False - ifaces_md = self.metadata.get("network", {}).get("interfaces", {}) - for _mac, mac_data in ifaces_md.get("macs", {}).items(): - if "vpc-id" in mac_data: - return False - return True - - @property - def launch_index(self): - if not self.metadata: - return None - return self.metadata.get("ami-launch-index") - - @property - def platform(self): - # Handle upgrade path of pickled ds - if not hasattr(self, "_platform_type"): - self._platform_type = DataSourceEc2.dsname.lower() - if not self._platform_type: - self._platform_type = DataSourceEc2.dsname.lower() - return self._platform_type - - # IMDSv2 related parameters from the ec2 metadata api document - @property - def api_token_route(self): - return "latest/api/token" - - @property - def imdsv2_token_ttl_seconds(self): - return "21600" - - @property - def imdsv2_token_put_header(self): - return "X-aws-ec2-metadata-token" - - @property - def imdsv2_token_req_header(self): - return self.imdsv2_token_put_header + "-ttl-seconds" - - @property - def imdsv2_token_redact(self): - return [self.imdsv2_token_put_header, self.imdsv2_token_req_header] - - def get_metadata_api_version(self): - """Get the best supported api version from the metadata service. - - Loop through all extended support metadata versions in order and - return the most-fully featured metadata api version discovered. - - If extended_metadata_versions aren't present, return the datasource's - min_metadata_version. - """ - # Assumes metadata service is already up - url_tmpl = "{0}/{1}/meta-data/instance-id" - headers = self._get_headers() - for api_ver in self.extended_metadata_versions: - url = url_tmpl.format(self.metadata_address, api_ver) - try: - resp = uhelp.readurl( - url=url, - headers=headers, - headers_redact=self.imdsv2_token_redact, - ) - except uhelp.UrlError as e: - LOG.debug("url %s raised exception %s", url, e) - else: - if resp.code == 200: - LOG.debug("Found preferred metadata version %s", api_ver) - return api_ver - elif resp.code == 404: - msg = "Metadata api version %s not present. Headers: %s" - LOG.debug(msg, api_ver, resp.headers) - return self.min_metadata_version - - def get_instance_id(self): - if self.cloud_name == CloudNames.AWS: - # Prefer the ID from the instance identity document, but fall back - if not getattr(self, "identity", None): - # If re-using cached datasource, it's get_data run didn't - # setup self.identity. So we need to do that now. - api_version = self.get_metadata_api_version() - self.identity = ec2.get_instance_identity( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=self.imdsv2_token_redact, - exception_cb=self._refresh_stale_aws_token_cb, - ).get("document", {}) - return self.identity.get( - "instanceId", self.metadata["instance-id"] - ) - else: - return self.metadata["instance-id"] - - def _maybe_fetch_api_token(self, mdurls): - """Get an API token for EC2 Instance Metadata Service. - - On EC2. IMDS will always answer an API token, unless - the instance owner has disabled the IMDS HTTP endpoint or - the network topology conflicts with the configured hop-limit. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return - - urls = [] - url2base = {} - url_path = self.api_token_route - request_method = "PUT" - for url in mdurls: - cur = "{0}/{1}".format(url, url_path) - urls.append(cur) - url2base[cur] = url - - # use the self._imds_exception_cb to check for Read errors - LOG.debug("Fetching Ec2 IMDSv2 API Token") - - response = None - url = None - url_params = self.get_url_params() - try: - url, response = uhelp.wait_for_url( - urls=urls, - max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, - status_cb=LOG.warning, - headers_cb=self._get_headers, - exception_cb=self._imds_exception_cb, - request_method=request_method, - headers_redact=self.imdsv2_token_redact, - connect_synchronously=False, - ) - except uhelp.UrlError: - # We use the raised exception to interrupt the retry loop. - # Nothing else to do here. - pass - - if url and response: - self._api_token = response - return url2base[url] - - # If we get here, then wait_for_url timed out, waiting for IMDS - # or the IMDS HTTP endpoint is disabled - return None - - def wait_for_metadata_service(self): - mcfg = self.ds_cfg - - url_params = self.get_url_params() - if url_params.max_wait_seconds <= 0: - return False - - # Remove addresses from the list that wont resolve. - mdurls = mcfg.get("metadata_urls", self.metadata_urls) - filtered = [x for x in mdurls if util.is_resolvable_url(x)] - - if set(filtered) != set(mdurls): - LOG.debug( - "Removed the following from metadata urls: %s", - list((set(mdurls) - set(filtered))), - ) - - if len(filtered): - mdurls = filtered - else: - LOG.warning("Empty metadata url list! using default list") - mdurls = self.metadata_urls - - # try the api token path first - metadata_address = self._maybe_fetch_api_token(mdurls) - # When running on EC2, we always access IMDS with an API token. - # If we could not get an API token, then we assume the IMDS - # endpoint was disabled and we move on without a data source. - # Fallback to IMDSv1 if not running on EC2 - if ( - not metadata_address - and self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS - ): - # if we can't get a token, use instance-id path - urls = [] - url2base = {} - url_path = "{ver}/meta-data/instance-id".format( - ver=self.min_metadata_version - ) - request_method = "GET" - for url in mdurls: - cur = "{0}/{1}".format(url, url_path) - urls.append(cur) - url2base[cur] = url - - start_time = time.time() - url, _ = uhelp.wait_for_url( - urls=urls, - max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, - status_cb=LOG.warning, - headers_redact=self.imdsv2_token_redact, - headers_cb=self._get_headers, - request_method=request_method, - ) - - if url: - metadata_address = url2base[url] - - if metadata_address: - self.metadata_address = metadata_address - LOG.debug("Using metadata source: '%s'", self.metadata_address) - elif self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - LOG.warning("IMDS's HTTP endpoint is probably disabled") - else: - LOG.critical( - "Giving up on md from %s after %s seconds", - urls, - int(time.time() - start_time), - ) - - return bool(metadata_address) - - def device_name_to_device(self, name): - # Consult metadata service, that has - # ephemeral0: sdb - # and return 'sdb' for input 'ephemeral0' - if "block-device-mapping" not in self.metadata: - return None - - # Example: - # 'block-device-mapping': - # {'ami': '/dev/sda1', - # 'ephemeral0': '/dev/sdb', - # 'root': '/dev/sda1'} - found = None - bdm = self.metadata["block-device-mapping"] - if not isinstance(bdm, dict): - LOG.debug("block-device-mapping not a dictionary: '%s'", bdm) - return None - - for entname, device in bdm.items(): - if entname == name: - found = device - break - # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0' - if entname == "ephemeral" and name == "ephemeral0": - found = device - - if found is None: - LOG.debug("Unable to convert %s to a device", name) - return None - - ofound = found - if not found.startswith("/"): - found = "/dev/%s" % found - - if os.path.exists(found): - return found - - remapped = self._remap_device(os.path.basename(found)) - if remapped: - LOG.debug("Remapped device name %s => %s", found, remapped) - return remapped - - # On t1.micro, ephemeral0 will appear in block-device-mapping from - # metadata, but it will not exist on disk (and never will) - # at this point, we've verified that the path did not exist - # in the special case of 'ephemeral0' return None to avoid bogus - # fstab entry (LP: #744019) - if name == "ephemeral0": - return None - return ofound - - @property - def availability_zone(self): - try: - if self.cloud_name == CloudNames.AWS: - return self.identity.get( - "availabilityZone", - self.metadata["placement"]["availability-zone"], - ) - else: - return self.metadata["placement"]["availability-zone"] - except KeyError: - return None - - @property - def region(self): - if self.cloud_name == CloudNames.AWS: - region = self.identity.get("region") - # Fallback to trimming the availability zone if region is missing - if self.availability_zone and not region: - region = self.availability_zone[:-1] - return region - else: - az = self.availability_zone - if az is not None: - return az[:-1] - return None - - def activate(self, cfg, is_new_instance): - if not is_new_instance: - return - if self.cloud_name == CloudNames.UNKNOWN: - warn_if_necessary( - util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), - cfg, - ) - - @property - def network_config(self): - """Return a network config dict for rendering ENI or netplan files.""" - if self._network_config != sources.UNSET: - return self._network_config - - if self.metadata is None: - # this would happen if get_data hadn't been called. leave as UNSET - LOG.warning( - "Unexpected call to network_config when metadata is None." - ) - return None - - result = None - no_network_metadata_on_aws = bool( - "network" not in self.metadata - and self.cloud_name == CloudNames.AWS - ) - if no_network_metadata_on_aws: - LOG.debug( - "Metadata 'network' not present:" - " Refreshing stale metadata from prior to upgrade." - ) - util.log_time( - logfunc=LOG.debug, - msg="Re-crawl of metadata service", - func=self.get_data, - ) - - iface = self.distro.fallback_interface - net_md = self.metadata.get("network") - if isinstance(net_md, dict): - # SRU_BLOCKER: xenial, bionic and eoan should default - # apply_full_imds_network_config to False to retain original - # behavior on those releases. - result = convert_ec2_metadata_network_config( - net_md, - self.distro, - fallback_nic=iface, - full_network_config=util.get_cfg_option_bool( - self.ds_cfg, "apply_full_imds_network_config", True - ), - ) - - # Non-VPC (aka Classic) Ec2 instances need to rewrite the - # network config file every boot due to MAC address change. - if self.is_classic_instance(): - self.default_update_events = copy.deepcopy( - self.default_update_events - ) - self.default_update_events[EventScope.NETWORK].add( - EventType.BOOT - ) - self.default_update_events[EventScope.NETWORK].add( - EventType.BOOT_LEGACY - ) - else: - LOG.warning("Metadata 'network' key not valid: %s.", net_md) - self._network_config = result - - return self._network_config - - def crawl_metadata(self): - """Crawl metadata service when available. - - @returns: Dictionary of crawled metadata content containing the keys: - meta-data, user-data and dynamic. - """ - if not self.wait_for_metadata_service(): - return {} - api_version = self.get_metadata_api_version() - redact = self.imdsv2_token_redact - crawled_metadata = {} - if self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - exc_cb = self._refresh_stale_aws_token_cb - exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb - skip_cb = None - elif self.cloud_name == CloudNames.OUTSCALE: - exc_cb = exc_cb_ud = None - skip_cb = skip_404_tag_errors - else: - exc_cb = exc_cb_ud = skip_cb = None - try: - raw_userdata = ec2.get_instance_userdata( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb_ud, - ) - crawled_metadata["user-data"] = util.maybe_b64decode(raw_userdata) - crawled_metadata["meta-data"] = ec2.get_instance_metadata( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb, - retrieval_exception_ignore_cb=skip_cb, - ) - if self.cloud_name == CloudNames.AWS: - identity = ec2.get_instance_identity( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb, - ) - crawled_metadata["dynamic"] = {"instance-identity": identity} - except Exception: - util.logexc( - LOG, - "Failed reading from metadata address %s", - self.metadata_address, - ) - return {} - crawled_metadata["_metadata_api_version"] = api_version - return crawled_metadata - - def _refresh_api_token(self, seconds=None): - """Request new metadata API token. - @param seconds: The lifetime of the token in seconds - - @return: The API token or None if unavailable. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return None - - if seconds is None: - seconds = self.imdsv2_token_ttl_seconds - - LOG.debug("Refreshing Ec2 metadata API token") - request_header = {self.imdsv2_token_req_header: seconds} - token_url = "{}/{}".format(self.metadata_address, self.api_token_route) - try: - response = uhelp.readurl( - token_url, - headers=request_header, - headers_redact=self.imdsv2_token_redact, - request_method="PUT", - ) - except uhelp.UrlError as e: - LOG.warning( - "Unable to get API token: %s raised exception %s", token_url, e - ) - return None - return response.contents - - def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): - """Callback will not retry on SKIP_USERDATA_CODES or if no token - is available.""" - retry = ec2.skip_retry_on_codes( - ec2.SKIP_USERDATA_CODES, msg, exception - ) - if not retry: - return False # False raises exception - return self._refresh_stale_aws_token_cb(msg, exception) - - def _refresh_stale_aws_token_cb(self, msg, exception): - """Exception handler for Ec2 to refresh token if token is stale.""" - if isinstance(exception, uhelp.UrlError) and exception.code == 401: - # With _api_token as None, _get_headers will _refresh_api_token. - LOG.debug("Clearing cached Ec2 API token due to expiry") - self._api_token = None - return True # always retry - - def _imds_exception_cb(self, msg, exception=None): - """Fail quickly on proper AWS if IMDSv2 rejects API token request - - Guidance from Amazon is that if IMDSv2 had disabled token requests - by returning a 403, or cloud-init malformed requests resulting in - other 40X errors, we want the datasource detection to fail quickly - without retries as those symptoms will likely not be resolved by - retries. - - Exceptions such as requests.ConnectionError due to IMDS being - temporarily unroutable or unavailable will still retry due to the - callsite wait_for_url. - """ - if isinstance(exception, uhelp.UrlError): - # requests.ConnectionError will have exception.code == None - if exception.code and exception.code >= 400: - if exception.code == 403: - LOG.warning( - "Ec2 IMDS endpoint returned a 403 error. " - "HTTP endpoint is disabled. Aborting." - ) - else: - LOG.warning( - "Fatal error while requesting Ec2 IMDSv2 API tokens" - ) - raise exception - - def _get_headers(self, url=""): - """Return a dict of headers for accessing a url. - - If _api_token is unset on AWS, attempt to refresh the token via a PUT - and then return the updated token header. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return {} - # Request a 6 hour token if URL is api_token_route - request_token_header = { - self.imdsv2_token_req_header: self.imdsv2_token_ttl_seconds - } - if self.api_token_route in url: - return request_token_header - if not self._api_token: - # If we don't yet have an API token, get one via a PUT against - # api_token_route. This _api_token may get unset by a 403 due - # to an invalid or expired token - self._api_token = self._refresh_api_token() - if not self._api_token: - return {} - return {self.imdsv2_token_put_header: self._api_token} - - -class DataSourceEc2Local(DataSourceEc2): - """Datasource run at init-local which sets up network to query metadata. - - In init-local, no network is available. This subclass sets up minimal - networking with dhclient on a viable nic so that it can talk to the - metadata service. If the metadata service provides network configuration - then render the network configuration for that instance based on metadata. - """ - - perform_dhcp_setup = True # Use dhcp before querying metadata - - def get_data(self): - supported_platforms = (CloudNames.AWS, CloudNames.OUTSCALE) - if self.cloud_name not in supported_platforms: - LOG.debug( - "Local Ec2 mode only supported on %s, not %s", - supported_platforms, - self.cloud_name, - ) - return False - return super(DataSourceEc2Local, self).get_data() - - -def read_strict_mode(cfgval, default): - try: - return parse_strict_mode(cfgval) - except ValueError as e: - LOG.warning(e) - return default - - -def parse_strict_mode(cfgval): - # given a mode like: - # true, false, warn,[sleep] - # return tuple with string mode (true|false|warn) and sleep. - if cfgval is True: - return "true", None - if cfgval is False: - return "false", None - - if not cfgval: - return "warn", 0 - - mode, _, sleep = cfgval.partition(",") - if mode not in ("true", "false", "warn"): - raise ValueError( - "Invalid mode '%s' in strict_id setting '%s': " - "Expected one of 'true', 'false', 'warn'." % (mode, cfgval) - ) - - if sleep: - try: - sleep = int(sleep) - except ValueError as e: - raise ValueError( - "Invalid sleep '%s' in strict_id setting '%s': not an integer" - % (sleep, cfgval) - ) from e - else: - sleep = None - - return mode, sleep - - -def warn_if_necessary(cfgval, cfg): - try: - mode, sleep = parse_strict_mode(cfgval) - except ValueError as e: - LOG.warning(e) - return - - if mode == "false": - return - - warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep) - - -def identify_aliyun(data): - if data["product_name"] == "Alibaba Cloud ECS": - return CloudNames.ALIYUN - - -def identify_aws(data): - # data is a dictionary returned by _collect_platform_data. - if data["uuid"].startswith("ec2") and ( - data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"] - ): - return CloudNames.AWS - - return None - - -def identify_brightbox(data): - if data["serial"].endswith(".brightbox.com"): - return CloudNames.BRIGHTBOX - - -def identify_zstack(data): - if data["asset_tag"].endswith(".zstack.io"): - return CloudNames.ZSTACK - - -def identify_e24cloud(data): - if data["vendor"] == "e24cloud": - return CloudNames.E24CLOUD - - -def identify_outscale(data): - if ( - data["product_name"] == "3DS Outscale VM".lower() - and data["vendor"] == "3DS Outscale".lower() - ): - return CloudNames.OUTSCALE - - -def identify_platform(): - # identify the platform and return an entry in CloudNames. - data = _collect_platform_data() - checks = ( - identify_aws, - identify_brightbox, - identify_zstack, - identify_e24cloud, - identify_outscale, - identify_aliyun, - lambda x: CloudNames.UNKNOWN, - ) - for checker in checks: - try: - result = checker(data) - if result: - return result - except Exception as e: - LOG.warning( - "calling %s with %s raised exception: %s", checker, data, e - ) - - -def _collect_platform_data(): - """Returns a dictionary of platform info from dmi or /sys/hypervisor. - - Keys in the dictionary are as follows: - uuid: system-uuid from dmi or /sys/hypervisor - uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' - serial: dmi 'system-serial-number' (/sys/.../product_serial) - asset_tag: 'dmidecode -s chassis-asset-tag' - vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) - product_name: dmi 'system-product-name' (/sys/.../system-manufacturer) - - On Ec2 instances experimentation is that product_serial is upper case, - and product_uuid is lower case. This returns lower case values for both. - """ - data = {} - try: - uuid = util.load_text_file("/sys/hypervisor/uuid").strip() - data["uuid_source"] = "hypervisor" - except Exception: - uuid = dmi.read_dmi_data("system-uuid") - data["uuid_source"] = "dmi" - - if uuid is None: - uuid = "" - data["uuid"] = uuid.lower() - - serial = dmi.read_dmi_data("system-serial-number") - if serial is None: - serial = "" - - data["serial"] = serial.lower() - - asset_tag = dmi.read_dmi_data("chassis-asset-tag") - if asset_tag is None: - asset_tag = "" - - data["asset_tag"] = asset_tag.lower() - - vendor = dmi.read_dmi_data("system-manufacturer") - data["vendor"] = (vendor if vendor else "").lower() - - product_name = dmi.read_dmi_data("system-product-name") - data["product_name"] = (product_name if product_name else "").lower() - - return data - - -def _build_nic_order( - macs_metadata: Dict[str, Dict], macs: List[str] -) -> Dict[str, int]: - """ - Builds a dictionary containing macs as keys nad nic orders as values, - taking into account `network-card` and `device-number` if present. - - Note that the first NIC will be the primary NIC as it will be the one with - [network-card] == 0 and device-number == 0 if present. - - @param macs_metadata: dictionary with mac address as key and contents like: - {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} - @macs: list of macs to consider - - @return: Dictionary with macs as keys and nic orders as values. - """ - nic_order: Dict[str, int] = {} - if len(macs) == 0 or len(macs_metadata) == 0: - return nic_order - - valid_macs_metadata = filter( - # filter out nics without metadata (not a physical nic) - lambda mmd: mmd[1] is not None, - # filter by macs - map(lambda mac: (mac, macs_metadata.get(mac)), macs), - ) - - def _get_key_as_int_or(dikt, key, alt_value): - value = dikt.get(key, None) - if value is not None: - return int(value) - return alt_value - - # Sort by (network_card, device_index) as some instances could have - # multiple network cards with repeated device indexes. - # - # On platforms where network-card and device-number are not present, - # as AliYun, the order will be by mac, as before the introduction of this - # function. - return { - mac: i - for i, (mac, _mac_metadata) in enumerate( - sorted( - valid_macs_metadata, - key=lambda mmd: ( - _get_key_as_int_or( - mmd[1], "network-card", float("infinity") - ), - _get_key_as_int_or( - mmd[1], "device-number", float("infinity") - ), - ), - ) - ) - } - - -def _configure_policy_routing( - dev_config: dict, - *, - nic_name: str, - nic_metadata: dict, - distro: Distro, - is_ipv4: bool, - table: int, -) -> None: - """ - Configure policy-based routing on secondary NICs / secondary IPs to - ensure outgoing packets are routed via the correct interface. - - @param: dev_config: network cfg v2 to be updated inplace. - @param: nic_name: nic name. Only used if ipv4. - @param: nic_metadata: nic metadata from IMDS. - @param: distro: Instance of Distro. Only used if ipv4. - @param: is_ipv4: Boolean indicating if we are acting over ipv4 or not. - @param: table: Routing table id. - """ - if not dev_config.get("routes"): - dev_config["routes"] = [] - if is_ipv4: - subnet_prefix_routes = nic_metadata["subnet-ipv4-cidr-block"] - ips = nic_metadata["local-ipv4s"] - try: - lease = distro.dhcp_client.dhcp_discovery(nic_name, distro=distro) - gateway = lease["routers"] - except NoDHCPLeaseError as e: - LOG.warning( - "Could not perform dhcp discovery on %s to find its " - "gateway. Not adding default route via the gateway. " - "Error: %s", - nic_name, - e, - ) - else: - # Add default route via the NIC's gateway - dev_config["routes"].append( - { - "to": "0.0.0.0/0", - "via": gateway, - "table": table, - }, - ) - else: - subnet_prefix_routes = nic_metadata["subnet-ipv6-cidr-blocks"] - ips = nic_metadata["ipv6s"] - - subnet_prefix_routes = ( - [subnet_prefix_routes] - if isinstance(subnet_prefix_routes, str) - else subnet_prefix_routes - ) - for prefix_route in subnet_prefix_routes: - dev_config["routes"].append( - { - "to": prefix_route, - "table": table, - }, - ) - - if not dev_config.get("routing-policy"): - dev_config["routing-policy"] = [] - # Packets coming from any IP associated with the current NIC - # will be routed using `table` routing table - ips = [ips] if isinstance(ips, str) else ips - for ip in ips: - dev_config["routing-policy"].append( - { - "from": ip, - "table": table, - }, - ) - - -def convert_ec2_metadata_network_config( - network_md, - distro, - macs_to_nics=None, - fallback_nic=None, - full_network_config=True, -): - """Convert ec2 metadata to network config version 2 data dict. - - @param: network_md: 'network' portion of EC2 metadata. - generally formed as {"interfaces": {"macs": {}} where - 'macs' is a dictionary with mac address as key and contents like: - {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} - @param: distro: instance of Distro. - @param: macs_to_nics: Optional dict of mac addresses and nic names. If - not provided, get_interfaces_by_mac is called to get it from the OS. - @param: fallback_nic: Optionally provide the primary nic interface name. - This nic will be guaranteed to minimally have a dhcp4 configuration. - @param: full_network_config: Boolean set True to configure all networking - presented by IMDS. This includes rendering secondary IPv4 and IPv6 - addresses on all NICs and rendering network config on secondary NICs. - If False, only the primary nic will be configured and only with dhcp - (IPv4/IPv6). - - @return A dict of network config version 2 based on the metadata and macs. - """ - netcfg = {"version": 2, "ethernets": {}} - if not macs_to_nics: - macs_to_nics = net.get_interfaces_by_mac() - macs_metadata = network_md["interfaces"]["macs"] - - if not full_network_config: - for mac, nic_name in macs_to_nics.items(): - if nic_name == fallback_nic: - break - dev_config = { - "dhcp4": True, - "dhcp6": False, - "match": {"macaddress": mac.lower()}, - "set-name": nic_name, - } - nic_metadata = macs_metadata.get(mac) - if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured - dev_config["dhcp6"] = True - netcfg["ethernets"][nic_name] = dev_config - return netcfg - # Apply network config for all nics and any secondary IPv4/v6 addresses - is_netplan = distro.network_activator == activators.NetplanActivator - macs = sorted(macs_to_nics.keys()) - nic_order = _build_nic_order(macs_metadata, macs) - for mac in macs: - nic_name = macs_to_nics[mac] - nic_metadata = macs_metadata.get(mac) - if not nic_metadata: - continue # Not a physical nic represented in metadata - nic_idx = nic_order[mac] - is_primary_nic = nic_idx == 0 - # nic_idx + 1 to start route_metric at 100 (nic_idx is 0-indexed) - dhcp_override = {"route-metric": (nic_idx + 1) * 100} - dev_config = { - "dhcp4": True, - "dhcp4-overrides": dhcp_override, - "dhcp6": False, - "match": {"macaddress": mac.lower()}, - "set-name": nic_name, - } - # This config only works on systems using Netplan because Networking - # config V2 does not support `routing-policy`, but this config is - # passed through on systems using Netplan. - # See: https://github.com/canonical/cloud-init/issues/4862 - # - # If device-number is not present (AliYun or other ec2-like platforms), - # do not configure source-routing as we cannot determine which is the - # primary NIC. - table = 100 + nic_idx - if ( - is_netplan - and nic_metadata.get("device-number") - and not is_primary_nic - ): - dhcp_override["use-routes"] = True - _configure_policy_routing( - dev_config, - distro=distro, - nic_name=nic_name, - nic_metadata=nic_metadata, - is_ipv4=True, - table=table, - ) - if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured - dev_config["dhcp6"] = True - dev_config["dhcp6-overrides"] = dhcp_override - if ( - is_netplan - and nic_metadata.get("device-number") - and not is_primary_nic - ): - _configure_policy_routing( - dev_config, - distro=distro, - nic_name=nic_name, - nic_metadata=nic_metadata, - is_ipv4=False, - table=table, - ) - dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac) - if not dev_config["addresses"]: - dev_config.pop("addresses") # Since we found none configured - - netcfg["ethernets"][nic_name] = dev_config - # Remove route-metric dhcp overrides and routes / routing-policy if only - # one nic configured - if len(netcfg["ethernets"]) == 1: - for nic_name in netcfg["ethernets"].keys(): - netcfg["ethernets"][nic_name].pop("dhcp4-overrides") - netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None) - netcfg["ethernets"][nic_name].pop("routes", None) - netcfg["ethernets"][nic_name].pop("routing-policy", None) - return netcfg - - -def get_secondary_addresses(nic_metadata, mac): - """Parse interface-specific nic metadata and return any secondary IPs - - :return: List of secondary IPv4 or IPv6 addresses to configure on the - interface - """ - ipv4s = nic_metadata.get("local-ipv4s") - ipv6s = nic_metadata.get("ipv6s") - addresses = [] - # In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP - if bool(isinstance(ipv4s, list) and len(ipv4s) > 1): - addresses.extend( - _get_secondary_addresses( - nic_metadata, "subnet-ipv4-cidr-block", mac, ipv4s, "24" - ) - ) - if bool(isinstance(ipv6s, list) and len(ipv6s) > 1): - addresses.extend( - _get_secondary_addresses( - nic_metadata, "subnet-ipv6-cidr-block", mac, ipv6s, "128" - ) - ) - return sorted(addresses) - - -def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix): - """Return list of IP addresses as CIDRs for secondary IPs - - The CIDR prefix will be default_prefix if cidr_key is absent or not - parseable in nic_metadata. - """ - addresses = [] - cidr = nic_metadata.get(cidr_key) - prefix = default_prefix - if not cidr or len(cidr.split("/")) != 2: - ip_type = "ipv4" if "ipv4" in cidr_key else "ipv6" - LOG.warning( - "Could not parse %s %s for mac %s. %s network" - " config prefix defaults to /%s", - cidr_key, - cidr, - mac, - ip_type, - prefix, - ) - else: - prefix = cidr.split("/")[1] - # We know we have > 1 ips for in metadata for this IP type - for ip in ips[1:]: - addresses.append("{ip}/{prefix}".format(ip=ip, prefix=prefix)) - return addresses - - -# Used to match classes to dependencies -datasources = [ - (DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local - (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -] - - -# Return a list of data sources that match this set of dependencies -def get_datasource_list(depends): - return sources.list_from_depends(depends, datasources) diff --git a/.pc/cpick-417ee551-fix-ec2-Ensure-metadata-exists-before-configuring-PBR/tests/unittests/sources/test_ec2.py b/.pc/cpick-417ee551-fix-ec2-Ensure-metadata-exists-before-configuring-PBR/tests/unittests/sources/test_ec2.py deleted file mode 100644 index 599d70d8e..000000000 --- a/.pc/cpick-417ee551-fix-ec2-Ensure-metadata-exists-before-configuring-PBR/tests/unittests/sources/test_ec2.py +++ /dev/null @@ -1,1527 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import json -import threading -from unittest import mock - -import pytest -import requests -import responses - -from cloudinit import helpers -from cloudinit.net import activators -from cloudinit.sources import DataSourceEc2 as ec2 -from tests.unittests import helpers as test_helpers -from tests.unittests.util import MockDistro - -DYNAMIC_METADATA = { - "instance-identity": { - "document": json.dumps( - { - "devpayProductCodes": None, - "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], - "availabilityZone": "us-west-2b", - "privateIp": "10.158.112.84", - "version": "2017-09-30", - "instanceId": "my-identity-id", - "billingProducts": None, - "instanceType": "t2.micro", - "accountId": "123456789012", - "imageId": "ami-5fb8c835", - "pendingTime": "2016-11-19T16:32:11Z", - "architecture": "x86_64", - "kernelId": None, - "ramdiskId": None, - "region": "us-west-2", - } - ) - } -} - - -# collected from api version 2016-09-02/ with -# python3 -c 'import json -# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm -# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))' -# Note that the MAC addresses have been modified to sort in the opposite order -# to the device-number attribute, to test LP: #1876312 -DEFAULT_METADATA = { - "ami-id": "ami-8b92b4ee", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "hostname": "ip-172-31-31-158.us-east-2.compute.internal", - "instance-action": "none", - "instance-id": "i-0a33f80f09c96477f", - "instance-type": "t2.small", - "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", - "local-ipv4": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "metrics": {"vhostmd": ''}, - "network": { - "interfaces": { - "macs": { - "06:17:04:d7:26:09": { - "device-number": "0", - "interface-id": "eni-e44ef49e", - "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, - "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", - "local-hostname": ( - "ip-172-3-3-15.us-east-2.compute.internal" - ), - "local-ipv4s": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "owner-id": "950047163771", - "public-hostname": ( - "ec2-13-59-77-202.us-east-2.compute.amazonaws.com" - ), - "public-ipv4s": "13.59.77.202", - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56", - }, - "06:17:04:d7:26:08": { - "device-number": "1", # Only IPv4 local config - "interface-id": "eni-e44ef49f", - "ipv4-associations": {"": "172.3.3.16"}, - "ipv6s": "", # No IPv6 config - "local-hostname": ( - "ip-172-3-3-16.us-east-2.compute.internal" - ), - "local-ipv4s": "172.3.3.16", - "mac": "06:17:04:d7:26:08", - "owner-id": "950047163771", - "public-hostname": ( - "ec2-172-3-3-16.us-east-2.compute.amazonaws.com" - ), - "public-ipv4s": "", # No public ipv4 config - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "", - }, - } - } - }, - "placement": {"availability-zone": "us-east-2b"}, - "profile": "default-hvm", - "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com", - "public-ipv4": "13.59.77.202", - "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, - "reservation-id": "r-01efbc9996bac1bd6", - "security-groups": "my-wide-open", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -# collected from api version 2018-09-24/ with -# python3 -c 'import json -# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm -# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))' - -NIC1_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "0", - "interface-id": "eni-0d6335689899ce9cc", - "ipv4-associations": {"18.218.219.181": "172.31.44.13"}, - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - "2600:1f16:292:100:f152:2222:3333:4444", - ], - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4s": ["172.31.44.13", "172.31.45.70"], - "mac": "0a:07:84:3d:6e:38", - "owner-id": "329910648901", - "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", - "public-ipv4s": "18.218.219.181", - "security-group-ids": "sg-0c387755222ba8d2e", - "security-groups": "launch-wizard-4", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56", -} - -NIC2_MD = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", -} - -NIC2_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - ], - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:292:100::/56", -} - -SECONDARY_IP_METADATA_2018_09_24 = { - "ami-id": "ami-0986c2ac728528ac2", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "events": {"maintenance": {"history": "[]", "scheduled": "[]"}}, - "hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "identity-credentials": { - "ec2": { - "info": { - "AccountId": "329910648901", - "Code": "Success", - "LastUpdated": "2019-07-06T14:22:56Z", - } - } - }, - "instance-action": "none", - "instance-id": "i-069e01e8cc43732f8", - "instance-type": "t2.micro", - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4": "172.31.44.13", - "mac": "0a:07:84:3d:6e:38", - "metrics": {"vhostmd": ''}, - "network": { - "interfaces": { - "macs": { - "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP, - } - } - }, - "placement": {"availability-zone": "us-east-2c"}, - "profile": "default-hvm", - "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", - "public-ipv4": "18.218.219.181", - "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]}, - "reservation-id": "r-09b4917135cdd33be", - "security-groups": "launch-wizard-4", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -M_PATH = "cloudinit.sources.DataSourceEc2." -M_PATH_NET = "cloudinit.sources.DataSourceEc2.net." - -TAGS_METADATA_2021_03_23: dict = { - **DEFAULT_METADATA, - "tags": { - "instance": { - "Environment": "production", - "Application": "test", - "TagWithoutValue": "", - } - }, -} - - -@pytest.fixture(autouse=True) -def disable_is_resolvable(): - with mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable"): - yield - - -def _register_ssh_keys(rfunc, base_url, keys_data): - """handle ssh key inconsistencies. - - public-keys in the ec2 metadata is inconsistently formated compared - to other entries. - Given keys_data of {name1: pubkey1, name2: pubkey2} - - This registers the following urls: - base_url 0={name1}\n1={name2} # (for each name) - base_url/ 0={name1}\n1={name2} # (for each name) - base_url/0 openssh-key - base_url/0/ openssh-key - base_url/0/openssh-key {pubkey1} - base_url/0/openssh-key/ {pubkey1} - ... - """ - - base_url = base_url.rstrip("/") - odd_index = "\n".join( - ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))] - ) - - rfunc(base_url, odd_index) - rfunc(base_url + "/", odd_index) - - for n, name in enumerate(sorted(keys_data)): - val = keys_data[name] - if isinstance(val, list): - val = "\n".join(val) - burl = base_url + "/%s" % n - rfunc(burl, "openssh-key") - rfunc(burl + "/", "openssh-key") - rfunc(burl + "/%s/openssh-key" % name, val) - rfunc(burl + "/%s/openssh-key/" % name, val) - - -def register_mock_metaserver(base_url, data, responses_mock=None): - """Register with responses a ec2 metadata like service serving 'data'. - - If given a dictionary, it will populate urls under base_url for - that dictionary. For example, input of - {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} - populates - base_url with 'instance-id\nmac' - base_url/ with 'instance-id\nmac' - base_url/instance-id with i-abc - base_url/mac with 00:16:3e:00:00:00 - In the index, references to lists or dictionaries have a trailing /. - """ - responses_mock = responses_mock or responses - - def register_helper(register, base_url, body): - if not isinstance(base_url, str): - register(base_url, body) - return - base_url = base_url.rstrip("/") - if isinstance(body, str): - register(base_url, body) - elif isinstance(body, list): - register(base_url, "\n".join(body) + "\n") - register(base_url + "/", "\n".join(body) + "\n") - elif isinstance(body, dict): - vals = [] - for k, v in body.items(): - if k == "public-keys": - _register_ssh_keys(register, base_url + "/public-keys/", v) - continue - suffix = k.rstrip("/") - if not isinstance(v, (str, list)): - suffix += "/" - vals.append(suffix) - url = base_url + "/" + suffix - register_helper(register, url, v) - register(base_url, "\n".join(vals) + "\n") - register(base_url + "/", "\n".join(vals) + "\n") - elif body is None: - register(base_url, "not found", status=404) - - def myreg(*argc, **kwargs): - url, body = argc - method = responses.PUT if "latest/api/token" in url else responses.GET - status = kwargs.get("status", 200) - return responses_mock.add(method, url, body, status=status) - - register_helper(myreg, base_url, data) - - -class TestEc2(test_helpers.ResponsesTestCase): - with_logs = True - maxDiff = None - - valid_platform_data = { - "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - "uuid_source": "dmi", - "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - } - - def setUp(self): - super(TestEc2, self).setUp() - self.datasource = ec2.DataSourceEc2 - self.metadata_addr = self.datasource.metadata_urls[0] - self.tmp = self.tmp_dir() - - def data_url(self, version, data_item="meta-data"): - """Return a metadata url based on the version provided.""" - return "/".join([self.metadata_addr, version, data_item]) - - def _patch_add_cleanup(self, mpath, *args, **kwargs): - p = mock.patch(mpath, *args, **kwargs) - p.start() - self.addCleanup(p.stop) - - def _setup_ds( - self, sys_cfg, platform_data, md, md_version=None, distro=None - ): - self.uris = [] - distro = distro or mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - paths = helpers.Paths({"run_dir": self.tmp}) - if sys_cfg is None: - sys_cfg = {} - ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) - event = threading.Event() - p = mock.patch("time.sleep", event.wait) - p.start() - - def _mock_sleep(): - event.set() - p.stop() - - self.addCleanup(_mock_sleep) - if not md_version: - md_version = ds.min_metadata_version - if platform_data is not None: - self._patch_add_cleanup( - "cloudinit.sources.DataSourceEc2._collect_platform_data", - return_value=platform_data, - ) - - if md: - all_versions = [ - ds.min_metadata_version - ] + ds.extended_metadata_versions - token_url = self.data_url("latest", data_item="api/token") - register_mock_metaserver(token_url, "API-TOKEN", self.responses) - for version in all_versions: - metadata_url = self.data_url(version) + "/" - if version == md_version: - # Register all metadata for desired version - register_mock_metaserver( - metadata_url, - md.get("md", DEFAULT_METADATA), - self.responses, - ) - userdata_url = self.data_url( - version, data_item="user-data" - ) - register_mock_metaserver( - userdata_url, md.get("ud", ""), self.responses - ) - identity_url = self.data_url( - version, data_item="dynamic/instance-identity" - ) - register_mock_metaserver( - identity_url, - md.get("id", DYNAMIC_METADATA), - self.responses, - ) - else: - instance_id_url = metadata_url + "instance-id" - if version == ds.min_metadata_version: - # Add min_metadata_version service availability check - register_mock_metaserver( - instance_id_url, - DEFAULT_METADATA["instance-id"], - self.responses, - ) - else: - # Register 404s for all unrequested extended versions - register_mock_metaserver( - instance_id_url, None, self.responses - ) - return ds - - def test_network_config_property_returns_version_2_network_data(self): - """network_config property returns network version 2 for metadata""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": "06:17:04:d7:26:09"}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_set_dhcp4(self): - """network_config property configures dhcp4 on nics with local-ipv4s. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1.lower()}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - dhcp_client = ds.distro.dhcp_client - dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_secondary_private_ips(self): - """network_config property configures any secondary ipv4 addresses. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": SECONDARY_IP_METADATA_2018_09_24}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6 - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1}, - "set-name": "eth9", - "addresses": [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - "dhcp4": True, - "dhcp6": True, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_is_cached_in_datasource(self): - """network_config property is cached in DataSourceEc2.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ds._network_config = {"cached": "data"} - self.assertEqual({"cached": "data"}, ds.network_config) - - @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): - """Refresh the network_config Ec2 cache if network key is absent. - - This catches an upgrade issue where obj.pkl contained stale metadata - which lacked newly required network key. - """ - old_metadata = copy.deepcopy(DEFAULT_METADATA) - old_metadata.pop("network") - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": old_metadata}, - ) - self.assertTrue(ds.get_data()) - - # Workaround https://github.com/getsentry/responses/issues/212 - if hasattr(self.responses, "_urls"): - # Can be removed when Bionic is EOL - for index, url in enumerate(self.responses._urls): - if url["url"].startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._urls[index] - elif hasattr(self.responses, "_matches"): - # Can be removed when Focal is EOL - for index, response in enumerate(self.responses._matches): - if response.url.startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._matches[index] - - # Provide new revision of metadata that contains network data - register_mock_metaserver( - "http://169.254.169.254/2009-04-04/meta-data/", - DEFAULT_METADATA, - self.responses, - ) - mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA - get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac" - ds.distro.fallback_nic = "eth9" - with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - nc = ds.network_config # Will re-crawl network metadata - self.assertIsNotNone(nc) - self.assertIn( - "Refreshing stale metadata from prior to upgrade", - self.logs.getvalue(), - ) - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - self.assertEqual(expected, ds.network_config) - - def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): - """get_instance-id gets DataSourceEc2Local.identity if not present. - - This handles an upgrade case where the old pickled datasource didn't - set up self.identity, but 'systemctl cloud-init init' runs - get_instance_id which traces on missing self.identity. lp:1748354. - """ - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - # Mock 404s on all versions except latest - all_versions = [ - ds.min_metadata_version - ] + ds.extended_metadata_versions - for ver in all_versions[:-1]: - register_mock_metaserver( - "http://[fd00:ec2::254]/{0}/meta-data/instance-id".format(ver), - None, - self.responses, - ) - - ds.metadata_address = "http://[fd00:ec2::254]" - register_mock_metaserver( - "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]), - DEFAULT_METADATA, - self.responses, - ) - # Register dynamic/instance-identity document which we now read. - register_mock_metaserver( - "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]), - DYNAMIC_METADATA, - self.responses, - ) - ds._cloud_name = ec2.CloudNames.AWS - # Setup cached metadata on the Datasource - ds.metadata = DEFAULT_METADATA - self.assertEqual("my-identity-id", ds.get_instance_id()) - - def test_classic_instance_true(self): - """If no vpc-id in metadata, is_classic_instance must return true.""" - md_copy = copy.deepcopy(DEFAULT_METADATA) - ifaces_md = md_copy.get("network", {}).get("interfaces", {}) - for _mac, mac_data in ifaces_md.get("macs", {}).items(): - if "vpc-id" in mac_data: - del mac_data["vpc-id"] - - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": md_copy}, - ) - self.assertTrue(ds.get_data()) - self.assertTrue(ds.is_classic_instance()) - - def test_classic_instance_false(self): - """If vpc-id in metadata, is_classic_instance must return false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - self.assertTrue(ds.get_data()) - self.assertFalse(ds.is_classic_instance()) - - def test_aws_inaccessible_imds_service_fails_with_retries(self): - """Inaccessibility of http://169.254.169.254 are retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md=None, - ) - - conn_error = requests.exceptions.ConnectionError( - "[Errno 113] no route to host" - ) - - mock_success = mock.MagicMock(contents=b"fakesuccess") - mock_success.ok.return_value = True - - with mock.patch("cloudinit.url_helper.readurl") as m_readurl: - # yikes, this endpoint needs help - m_readurl.side_effect = ( - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - mock_success, - ) - with mock.patch("cloudinit.url_helper.time.sleep"): - self.assertTrue(ds.wait_for_metadata_service()) - - # Just one /latest/api/token request - self.assertEqual(19, len(m_readurl.call_args_list)) - for readurl_call in m_readurl.call_args_list: - self.assertIn("latest/api/token", readurl_call[0][0]) - - def test_aws_token_403_fails_without_retries(self): - """Verify that 403s fetching AWS tokens are not retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md=None, - ) - - token_url = self.data_url("latest", data_item="api/token") - self.responses.add(responses.PUT, token_url, status=403) - self.assertFalse(ds.get_data()) - # Just one /latest/api/token request - logs = self.logs.getvalue() - expected_logs = [ - "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is" - " disabled. Aborting.", - "WARNING: IMDS's HTTP endpoint is probably disabled", - ] - for log in expected_logs: - self.assertIn(log, logs) - - def test_aws_token_redacted(self): - """Verify that aws tokens are redacted when logged.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - self.assertTrue(ds.get_data()) - all_logs = self.logs.getvalue().splitlines() - REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" - REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" - logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] - logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] - logs_with_token = [log for log in all_logs if "API-TOKEN" in log] - self.assertEqual(1, len(logs_with_redacted_ttl)) - self.assertEqual(83, len(logs_with_redacted)) - self.assertEqual(0, len(logs_with_token)) - - @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_valid_platform_with_strict_true(self, m_dhcp): - """Valid platform data should return true with strict_id true.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual("aws", ds.cloud_name) - self.assertEqual("ec2", ds.platform_type) - self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform) - - def test_valid_platform_with_strict_false(self): - """Valid platform data should return true with strict_id false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - - def test_unknown_platform_with_strict_true(self): - """Unknown platform data with strict_id true should return False.""" - uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" - ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertFalse(ret) - - def test_unknown_platform_with_strict_false(self): - """Unknown platform data with strict_id false should return True.""" - uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" - ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - - def test_ec2_local_returns_false_on_non_aws(self): - """DataSourceEc2Local returns False when platform is not AWS.""" - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - platform_attrs = [ - attr - for attr in ec2.CloudNames.__dict__.keys() - if not attr.startswith("__") - ] - for attr_name in platform_attrs: - platform_name = getattr(ec2.CloudNames, attr_name) - if platform_name not in ["aws", "outscale"]: - ds._cloud_name = platform_name - ret = ds.get_data() - self.assertEqual("ec2", ds.platform_type) - self.assertFalse(ret) - message = ( - "Local Ec2 mode only supported on ('aws', 'outscale')," - " not {0}".format(platform_name) - ) - self.assertIn(message, self.logs.getvalue()) - - @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): - """DataSourceEc2Local returns False on BSD. - - FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. - """ - m_is_freebsd.return_value = True - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertFalse(ret) - self.assertIn( - "FreeBSD doesn't support running dhclient with -sf", - self.logs.getvalue(), - ) - - @mock.patch("cloudinit.net.ephemeral.EphemeralIPv6Network") - @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") - @mock.patch("cloudinit.distros.net.find_fallback_nic") - @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") - @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_performs_dhcp_on_non_bsd( - self, m_is_bsd, m_dhcp, m_fallback_nic, m_net4, m_net6 - ): - """Ec2Local returns True for valid platform data on non-BSD with dhcp. - - DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. - Then the metadata services is crawled for more network config info. - When the platform data is valid, return True. - """ - - m_fallback_nic.return_value = "eth9" - m_is_bsd.return_value = False - m_dhcp.return_value = { - "interface": "eth9", - "fixed-address": "192.168.2.9", - "routers": "192.168.2.1", - "subnet-mask": "255.255.255.0", - "broadcast-address": "192.168.2.255", - } - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - distro=MockDistro("", {}, {}), - ) - - ret = ds.get_data() - self.assertTrue(ret) - m_dhcp.assert_called_once_with(ds.distro, "eth9", None) - m_net4.assert_called_once_with( - ds.distro, - broadcast="192.168.2.255", - interface="eth9", - ip="192.168.2.9", - prefix_or_mask="255.255.255.0", - router="192.168.2.1", - static_routes=None, - ) - self.assertIn("Crawl of metadata service ", self.logs.getvalue()) - - def test_get_instance_tags(self): - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": TAGS_METADATA_2021_03_23}, - ) - self.assertTrue(ds.get_data()) - self.assertIn("tags", ds.metadata) - self.assertIn("instance", ds.metadata["tags"]) - instance_tags = ds.metadata["tags"]["instance"] - self.assertEqual(instance_tags["Application"], "test") - self.assertEqual(instance_tags["Environment"], "production") - - -class TestGetSecondaryAddresses(test_helpers.CiTestCase): - mac = "06:17:04:d7:26:ff" - with_logs = True - - def test_md_with_no_secondary_addresses(self): - """Empty list is returned when nic metadata contains no secondary ip""" - self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) - - def test_md_with_secondary_v4_and_v6_addresses(self): - """All secondary addresses are returned from nic metadata""" - self.assertEqual( - [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac), - ) - - def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): - """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" - invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) - invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected" - invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is" - self.assertEqual( - [ - "172.31.45.70/24", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(invalid_cidr_md, self.mac), - ) - expected_logs = [ - "WARNING: Could not parse subnet-ipv4-cidr-block" - " something-unexpected for mac 06:17:04:d7:26:ff." - " ipv4 network config prefix defaults to /24", - "WARNING: Could not parse subnet-ipv6-cidr-block" - " not/sure/what/this/is for mac 06:17:04:d7:26:ff." - " ipv6 network config prefix defaults to /128", - ] - logs = self.logs.getvalue() - for log in expected_logs: - self.assertIn(log, logs) - - -class TestBuildNicOrder: - @pytest.mark.parametrize( - ["macs_metadata", "macs", "expected"], - [ - pytest.param({}, [], {}, id="all_empty"), - pytest.param( - {}, ["0a:f7:8d:96:f2:a1"], {}, id="empty_macs_metadata" - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - } - }, - [], - {}, - id="empty_macs", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:f7:8d:96:f2:a1": 0, "0a:0d:dd:44:cd:7b": 1}, - id="no-device-number-info", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1"], - {"0a:f7:8d:96:f2:a1": 0}, - id="no-device-number-info-subset", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1}, - id="device-numbers", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "network-card": "0", - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "network-card": "1", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - "0a:f7:8d:96:f2:a2": { - "network-card": "2", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], - { - "0a:0d:dd:44:cd:7b": 0, - "0a:f7:8d:96:f2:a1": 1, - "0a:f7:8d:96:f2:a2": 2, - }, - id="network-cardes", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "network-card": "0", - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "network-card": "1", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - "0a:f7:8d:96:f2:a2": { - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], - { - "0a:0d:dd:44:cd:7b": 0, - "0a:f7:8d:96:f2:a1": 1, - "0a:f7:8d:96:f2:a2": 2, - }, - id="network-card-partially-missing", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a9"], - {}, - id="macs-not-in-md", - ), - ], - ) - def test_build_nic_order(self, macs_metadata, macs, expected): - assert expected == ec2._build_nic_order(macs_metadata, macs) - - -class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): - def setUp(self): - super(TestConvertEc2MetadataNetworkConfig, self).setUp() - self.mac1 = "06:17:04:d7:26:09" - interface_dict = copy.deepcopy( - DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1] - ) - # These tests are written assuming the base interface doesn't have IPv6 - interface_dict.pop("ipv6s") - self.network_metadata = { - "interfaces": {"macs": {self.mac1: interface_dict}} - } - - def test_convert_ec2_metadata_network_config_skips_absent_macs(self): - """Any mac absent from metadata is skipped by network config.""" - macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"} - - # DE:AD:BE:EF:FF:FF represented by OS but not in metadata - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): - """Config dhcp6 when ipv6s is in metadata for a mac.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): - """Config dhcp4 when there are no public addresses in public-ipv4s.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["local-ipv4s"] = "172.3.3.15" - nic1_metadata.pop("public-ipv4s") - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): - """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["public-ipv4s"] = "" - - # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): - """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. - Source-routing configured for secondary NICs (routing-policy and extra - routing table).""" - mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth10": { - "match": {"macaddress": mac2}, - "set-name": "eth10", - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": False, - "routes": [ - # via DHCP gateway - {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, - # to NIC2_MD["subnet-ipv4-cidr-block"] - {"to": "172.31.32.0/20", "table": 101}, - ], - "routing-policy": [ - # NIC2_MD["local-ipv4s"] - {"from": "172.31.47.221", "table": 101} - ], - }, - }, - } - distro = mock.Mock() - distro.network_activator = activators.NetplanActivator - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( - self, - ): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. - Source-routing configured for secondary NICs (routing-policy and extra - routing table).""" - mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both["interfaces"]["macs"][ - mac2 - ] = NIC2_MD_IPV4_IPV6_MULTI_IP - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "match": {"macaddress": "06:17:04:d7:26:09"}, - "set-name": "eth9", - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth10": { - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": True, - "match": {"macaddress": "06:17:04:d7:26:08"}, - "set-name": "eth10", - "routes": [ - # via DHCP gateway - {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, - # to NIC2_MD["subnet-ipv4-cidr-block"] - {"to": "172.31.32.0/20", "table": 101}, - # to NIC2_MD["subnet-ipv6-cidr-blocks"] - {"to": "2600:1f16:292:100::/64", "table": 101}, - ], - "routing-policy": [ - # NIC2_MD["local-ipv4s"] - {"from": "172.31.47.221", "table": 101}, - { - "from": "2600:1f16:292:100:c187:593c:4349:136", - "table": 101, - }, - { - "from": "2600:1f16:292:100:f153:12a3:c37c:11f9", - "table": 101, - }, - ], - "dhcp6-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "addresses": ["2600:1f16:292:100:f153:12a3:c37c:11f9/128"], - }, - }, - } - distro = mock.Mock() - distro.network_activator = activators.NetplanActivator - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): - """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): - """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - distro = mock.Mock() - with mock.patch(patch_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro - ), - ) - - -class TesIdentifyPlatform(test_helpers.CiTestCase): - def collmock(self, **kwargs): - """return non-special _collect_platform_data updated with changes.""" - unspecial = { - "asset_tag": "3857-0037-2746-7462-1818-3997-77", - "serial": "H23-C4J3JV-R6", - "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2", - "uuid_source": "dmi", - "vendor": "tothecloud", - "product_name": "cloudproduct", - } - unspecial.update(**kwargs) - return unspecial - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_aliyun(self, m_collect): - """aliyun should be identified if product name equals to - Alibaba Cloud ECS - """ - m_collect.return_value = self.collmock( - product_name="Alibaba Cloud ECS" - ) - self.assertEqual(ec2.CloudNames.ALIYUN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_zstack(self, m_collect): - """zstack should be identified if chassis-asset-tag - ends in .zstack.io - """ - m_collect.return_value = self.collmock(asset_tag="123456.zstack.io") - self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_zstack_full_domain_only(self, m_collect): - """zstack asset-tag matching should match only on - full domain boundary. - """ - m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_e24cloud(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor="e24cloud") - self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_e24cloud_negative(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor="e24cloudyday") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - # Outscale - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_outscale(self, m_collect): - """Should return true if the dmi product data has expected value.""" - m_collect.return_value = self.collmock( - vendor="3DS Outscale".lower(), - product_name="3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.OUTSCALE, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_false_on_wrong_sys_vendor(self, m_collect): - """Should return false on empty value returned.""" - m_collect.return_value = self.collmock( - vendor="Not 3DS Outscale".lower(), - product_name="3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_false_on_wrong_product_name(self, m_collect): - """Should return false on an unrelated string.""" - m_collect.return_value = self.collmock( - vendor="3DS Outscale".lower(), - product_name="Not 3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) diff --git a/.pc/cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent/cloudinit/distros/ubuntu.py b/.pc/cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent/cloudinit/distros/ubuntu.py deleted file mode 100644 index 1c7da9a3b..000000000 --- a/.pc/cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent/cloudinit/distros/ubuntu.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Juerg Haefliger -# Author: Joshua Harlow -# Author: Ben Howard -# -# This file is part of cloud-init. See LICENSE file for license information. - -import copy - -from cloudinit.distros import PREFERRED_NTP_CLIENTS, debian -from cloudinit.distros.package_management.snap import Snap -from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE - - -class Distro(debian.Distro): - def __init__(self, name, cfg, paths): - super().__init__(name, cfg, paths) - # Ubuntu specific network cfg locations - self.network_conf_fn = { - "eni": "/etc/network/interfaces.d/50-cloud-init.cfg", - "netplan": CLOUDINIT_NETPLAN_FILE, - } - self.renderer_configs = { - "eni": { - "eni_path": self.network_conf_fn["eni"], - "eni_header": debian.NETWORK_FILE_HEADER, - }, - "netplan": { - "netplan_path": self.network_conf_fn["netplan"], - "netplan_header": debian.NETWORK_FILE_HEADER, - "postcmds": True, - }, - } - self.snap = Snap(self._runner) - self.package_managers.append(self.snap) - - def package_command(self, command, args=None, pkgs=None): - super().package_command(command, args, pkgs) - self.snap.upgrade_packages() - - @property - def preferred_ntp_clients(self): - """The preferred ntp client is dependent on the version.""" - if not self._preferred_ntp_clients: - self._preferred_ntp_clients = copy.deepcopy(PREFERRED_NTP_CLIENTS) - return self._preferred_ntp_clients diff --git a/.pc/cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent/tests/unittests/distros/test_ubuntu.py b/.pc/cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent/tests/unittests/distros/test_ubuntu.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/.pc/cpick-74dc7cce-test-Fix-failing-test_ec2.py-test-5324/tests/unittests/sources/test_ec2.py b/.pc/cpick-74dc7cce-test-Fix-failing-test_ec2.py-test-5324/tests/unittests/sources/test_ec2.py deleted file mode 100644 index 43d237347..000000000 --- a/.pc/cpick-74dc7cce-test-Fix-failing-test_ec2.py-test-5324/tests/unittests/sources/test_ec2.py +++ /dev/null @@ -1,1615 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import json -import threading -from unittest import mock - -import pytest -import requests -import responses - -from cloudinit import helpers -from cloudinit.net import netplan -from cloudinit.sources import DataSourceEc2 as ec2 -from tests.unittests import helpers as test_helpers -from tests.unittests.util import MockDistro - -DYNAMIC_METADATA = { - "instance-identity": { - "document": json.dumps( - { - "devpayProductCodes": None, - "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], - "availabilityZone": "us-west-2b", - "privateIp": "10.158.112.84", - "version": "2017-09-30", - "instanceId": "my-identity-id", - "billingProducts": None, - "instanceType": "t2.micro", - "accountId": "123456789012", - "imageId": "ami-5fb8c835", - "pendingTime": "2016-11-19T16:32:11Z", - "architecture": "x86_64", - "kernelId": None, - "ramdiskId": None, - "region": "us-west-2", - } - ) - } -} - - -# collected from api version 2016-09-02/ with -# python3 -c 'import json -# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm -# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))' -# Note that the MAC addresses have been modified to sort in the opposite order -# to the device-number attribute, to test LP: #1876312 -DEFAULT_METADATA = { - "ami-id": "ami-8b92b4ee", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "hostname": "ip-172-31-31-158.us-east-2.compute.internal", - "instance-action": "none", - "instance-id": "i-0a33f80f09c96477f", - "instance-type": "t2.small", - "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", - "local-ipv4": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "metrics": {"vhostmd": ''}, - "network": { - "interfaces": { - "macs": { - "06:17:04:d7:26:09": { - "device-number": "0", - "interface-id": "eni-e44ef49e", - "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, - "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", - "local-hostname": ( - "ip-172-3-3-15.us-east-2.compute.internal" - ), - "local-ipv4s": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "owner-id": "950047163771", - "public-hostname": ( - "ec2-13-59-77-202.us-east-2.compute.amazonaws.com" - ), - "public-ipv4s": "13.59.77.202", - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56", - }, - "06:17:04:d7:26:08": { - "device-number": "1", # Only IPv4 local config - "interface-id": "eni-e44ef49f", - "ipv4-associations": {"": "172.3.3.16"}, - "ipv6s": "", # No IPv6 config - "local-hostname": ( - "ip-172-3-3-16.us-east-2.compute.internal" - ), - "local-ipv4s": "172.3.3.16", - "mac": "06:17:04:d7:26:08", - "owner-id": "950047163771", - "public-hostname": ( - "ec2-172-3-3-16.us-east-2.compute.amazonaws.com" - ), - "public-ipv4s": "", # No public ipv4 config - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "", - }, - } - } - }, - "placement": {"availability-zone": "us-east-2b"}, - "profile": "default-hvm", - "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com", - "public-ipv4": "13.59.77.202", - "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, - "reservation-id": "r-01efbc9996bac1bd6", - "security-groups": "my-wide-open", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -# collected from api version 2018-09-24/ with -# python3 -c 'import json -# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm -# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))' - -NIC1_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "0", - "interface-id": "eni-0d6335689899ce9cc", - "ipv4-associations": {"18.218.219.181": "172.31.44.13"}, - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - "2600:1f16:292:100:f152:2222:3333:4444", - ], - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4s": ["172.31.44.13", "172.31.45.70"], - "mac": "0a:07:84:3d:6e:38", - "owner-id": "329910648901", - "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", - "public-ipv4s": "18.218.219.181", - "security-group-ids": "sg-0c387755222ba8d2e", - "security-groups": "launch-wizard-4", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56", -} - -NIC2_MD = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", -} - -NIC2_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - ], - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:292:100::/56", -} - -MULTI_NIC_V6_ONLY_MD = { - "macs": { - "02:6b:df:a2:4b:2b": { - "device-number": "1", - "interface-id": "eni-0669816d0cf606123", - "ipv6s": "2600:1f16:67f:f201:8d2e:4d1f:9e80:4ab9", - "local-hostname": "i-0951b6d0b66337123.us-east-2.compute.internal", - "mac": "02:6b:df:a2:4b:2b", - "owner-id": "483410185123", - "security-group-ids": "sg-0bf34e5c3cde1d123", - "security-groups": "default", - "subnet-id": "subnet-0903f279682c66123", - "subnet-ipv6-cidr-blocks": "2600:1f16:67f:f201:0:0:0:0/64", - "vpc-id": "vpc-0ac1befb8c824a123", - "vpc-ipv4-cidr-block": "192.168.0.0/20", - "vpc-ipv4-cidr-blocks": "192.168.0.0/20", - "vpc-ipv6-cidr-blocks": "2600:1f16:67f:f200:0:0:0:0/56", - }, - "02:7c:03:b8:5c:af": { - "device-number": "0", - "interface-id": "eni-0f3cddb84c16e1123", - "ipv6s": "2600:1f16:67f:f201:6613:29a2:dbf7:2f1f", - "local-hostname": "i-0951b6d0b66337123.us-east-2.compute.internal", - "mac": "02:7c:03:b8:5c:af", - "owner-id": "483410185123", - "security-group-ids": "sg-0bf34e5c3cde1d123", - "security-groups": "default", - "subnet-id": "subnet-0903f279682c66123", - "subnet-ipv6-cidr-blocks": "2600:1f16:67f:f201:0:0:0:0/64", - "vpc-id": "vpc-0ac1befb8c824a123", - "vpc-ipv4-cidr-block": "192.168.0.0/20", - "vpc-ipv4-cidr-blocks": "192.168.0.0/20", - "vpc-ipv6-cidr-blocks": "2600:1f16:67f:f200:0:0:0:0/56", - }, - } -} - -SECONDARY_IP_METADATA_2018_09_24 = { - "ami-id": "ami-0986c2ac728528ac2", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "events": {"maintenance": {"history": "[]", "scheduled": "[]"}}, - "hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "identity-credentials": { - "ec2": { - "info": { - "AccountId": "329910648901", - "Code": "Success", - "LastUpdated": "2019-07-06T14:22:56Z", - } - } - }, - "instance-action": "none", - "instance-id": "i-069e01e8cc43732f8", - "instance-type": "t2.micro", - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4": "172.31.44.13", - "mac": "0a:07:84:3d:6e:38", - "metrics": {"vhostmd": ''}, - "network": { - "interfaces": { - "macs": { - "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP, - } - } - }, - "placement": {"availability-zone": "us-east-2c"}, - "profile": "default-hvm", - "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", - "public-ipv4": "18.218.219.181", - "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]}, - "reservation-id": "r-09b4917135cdd33be", - "security-groups": "launch-wizard-4", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -M_PATH = "cloudinit.sources.DataSourceEc2." -M_PATH_NET = "cloudinit.sources.DataSourceEc2.net." - -TAGS_METADATA_2021_03_23: dict = { - **DEFAULT_METADATA, - "tags": { - "instance": { - "Environment": "production", - "Application": "test", - "TagWithoutValue": "", - } - }, -} - - -@pytest.fixture(autouse=True) -def disable_is_resolvable(): - with mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable"): - yield - - -def _register_ssh_keys(rfunc, base_url, keys_data): - """handle ssh key inconsistencies. - - public-keys in the ec2 metadata is inconsistently formated compared - to other entries. - Given keys_data of {name1: pubkey1, name2: pubkey2} - - This registers the following urls: - base_url 0={name1}\n1={name2} # (for each name) - base_url/ 0={name1}\n1={name2} # (for each name) - base_url/0 openssh-key - base_url/0/ openssh-key - base_url/0/openssh-key {pubkey1} - base_url/0/openssh-key/ {pubkey1} - ... - """ - - base_url = base_url.rstrip("/") - odd_index = "\n".join( - ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))] - ) - - rfunc(base_url, odd_index) - rfunc(base_url + "/", odd_index) - - for n, name in enumerate(sorted(keys_data)): - val = keys_data[name] - if isinstance(val, list): - val = "\n".join(val) - burl = base_url + "/%s" % n - rfunc(burl, "openssh-key") - rfunc(burl + "/", "openssh-key") - rfunc(burl + "/%s/openssh-key" % name, val) - rfunc(burl + "/%s/openssh-key/" % name, val) - - -def register_mock_metaserver(base_url, data, responses_mock=None): - """Register with responses a ec2 metadata like service serving 'data'. - - If given a dictionary, it will populate urls under base_url for - that dictionary. For example, input of - {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} - populates - base_url with 'instance-id\nmac' - base_url/ with 'instance-id\nmac' - base_url/instance-id with i-abc - base_url/mac with 00:16:3e:00:00:00 - In the index, references to lists or dictionaries have a trailing /. - """ - responses_mock = responses_mock or responses - - def register_helper(register, base_url, body): - if not isinstance(base_url, str): - register(base_url, body) - return - base_url = base_url.rstrip("/") - if isinstance(body, str): - register(base_url, body) - elif isinstance(body, list): - register(base_url, "\n".join(body) + "\n") - register(base_url + "/", "\n".join(body) + "\n") - elif isinstance(body, dict): - vals = [] - for k, v in body.items(): - if k == "public-keys": - _register_ssh_keys(register, base_url + "/public-keys/", v) - continue - suffix = k.rstrip("/") - if not isinstance(v, (str, list)): - suffix += "/" - vals.append(suffix) - url = base_url + "/" + suffix - register_helper(register, url, v) - register(base_url, "\n".join(vals) + "\n") - register(base_url + "/", "\n".join(vals) + "\n") - elif body is None: - register(base_url, "not found", status=404) - - def myreg(*argc, **kwargs): - url, body = argc - method = responses.PUT if "latest/api/token" in url else responses.GET - status = kwargs.get("status", 200) - return responses_mock.add(method, url, body, status=status) - - register_helper(myreg, base_url, data) - - -class TestEc2(test_helpers.ResponsesTestCase): - with_logs = True - maxDiff = None - - valid_platform_data = { - "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - "uuid_source": "dmi", - "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - } - - def setUp(self): - super(TestEc2, self).setUp() - self.datasource = ec2.DataSourceEc2 - self.metadata_addr = self.datasource.metadata_urls[0] - self.tmp = self.tmp_dir() - - def data_url(self, version, data_item="meta-data"): - """Return a metadata url based on the version provided.""" - return "/".join([self.metadata_addr, version, data_item]) - - def _patch_add_cleanup(self, mpath, *args, **kwargs): - p = mock.patch(mpath, *args, **kwargs) - p.start() - self.addCleanup(p.stop) - - def _setup_ds( - self, sys_cfg, platform_data, md, md_version=None, distro=None - ): - self.uris = [] - distro = distro or mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - paths = helpers.Paths({"run_dir": self.tmp}) - if sys_cfg is None: - sys_cfg = {} - ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) - event = threading.Event() - p = mock.patch("time.sleep", event.wait) - p.start() - - def _mock_sleep(): - event.set() - p.stop() - - self.addCleanup(_mock_sleep) - if not md_version: - md_version = ds.min_metadata_version - if platform_data is not None: - self._patch_add_cleanup( - "cloudinit.sources.DataSourceEc2._collect_platform_data", - return_value=platform_data, - ) - - if md: - all_versions = [ - ds.min_metadata_version - ] + ds.extended_metadata_versions - token_url = self.data_url("latest", data_item="api/token") - register_mock_metaserver(token_url, "API-TOKEN", self.responses) - for version in all_versions: - metadata_url = self.data_url(version) + "/" - if version == md_version: - # Register all metadata for desired version - register_mock_metaserver( - metadata_url, - md.get("md", DEFAULT_METADATA), - self.responses, - ) - userdata_url = self.data_url( - version, data_item="user-data" - ) - register_mock_metaserver( - userdata_url, md.get("ud", ""), self.responses - ) - identity_url = self.data_url( - version, data_item="dynamic/instance-identity" - ) - register_mock_metaserver( - identity_url, - md.get("id", DYNAMIC_METADATA), - self.responses, - ) - else: - instance_id_url = metadata_url + "instance-id" - if version == ds.min_metadata_version: - # Add min_metadata_version service availability check - register_mock_metaserver( - instance_id_url, - DEFAULT_METADATA["instance-id"], - self.responses, - ) - else: - # Register 404s for all unrequested extended versions - register_mock_metaserver( - instance_id_url, None, self.responses - ) - return ds - - def test_network_config_property_returns_version_2_network_data(self): - """network_config property returns network version 2 for metadata""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": "06:17:04:d7:26:09"}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_set_dhcp4(self): - """network_config property configures dhcp4 on nics with local-ipv4s. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1.lower()}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - dhcp_client = ds.distro.dhcp_client - dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_secondary_private_ips(self): - """network_config property configures any secondary ipv4 addresses. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": SECONDARY_IP_METADATA_2018_09_24}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6 - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1}, - "set-name": "eth9", - "addresses": [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - "dhcp4": True, - "dhcp6": True, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_is_cached_in_datasource(self): - """network_config property is cached in DataSourceEc2.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ds._network_config = {"cached": "data"} - self.assertEqual({"cached": "data"}, ds.network_config) - - @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): - """Refresh the network_config Ec2 cache if network key is absent. - - This catches an upgrade issue where obj.pkl contained stale metadata - which lacked newly required network key. - """ - old_metadata = copy.deepcopy(DEFAULT_METADATA) - old_metadata.pop("network") - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": old_metadata}, - ) - self.assertTrue(ds.get_data()) - - # Workaround https://github.com/getsentry/responses/issues/212 - if hasattr(self.responses, "_urls"): - # Can be removed when Bionic is EOL - for index, url in enumerate(self.responses._urls): - if url["url"].startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._urls[index] - elif hasattr(self.responses, "_matches"): - # Can be removed when Focal is EOL - for index, response in enumerate(self.responses._matches): - if response.url.startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._matches[index] - - # Provide new revision of metadata that contains network data - register_mock_metaserver( - "http://169.254.169.254/2009-04-04/meta-data/", - DEFAULT_METADATA, - self.responses, - ) - mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA - get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac" - ds.distro.fallback_nic = "eth9" - with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - nc = ds.network_config # Will re-crawl network metadata - self.assertIsNotNone(nc) - self.assertIn( - "Refreshing stale metadata from prior to upgrade", - self.logs.getvalue(), - ) - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - self.assertEqual(expected, ds.network_config) - - def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): - """get_instance-id gets DataSourceEc2Local.identity if not present. - - This handles an upgrade case where the old pickled datasource didn't - set up self.identity, but 'systemctl cloud-init init' runs - get_instance_id which traces on missing self.identity. lp:1748354. - """ - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - # Mock 404s on all versions except latest - all_versions = [ - ds.min_metadata_version - ] + ds.extended_metadata_versions - for ver in all_versions[:-1]: - register_mock_metaserver( - "http://[fd00:ec2::254]/{0}/meta-data/instance-id".format(ver), - None, - self.responses, - ) - - ds.metadata_address = "http://[fd00:ec2::254]" - register_mock_metaserver( - "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]), - DEFAULT_METADATA, - self.responses, - ) - # Register dynamic/instance-identity document which we now read. - register_mock_metaserver( - "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]), - DYNAMIC_METADATA, - self.responses, - ) - ds._cloud_name = ec2.CloudNames.AWS - # Setup cached metadata on the Datasource - ds.metadata = DEFAULT_METADATA - self.assertEqual("my-identity-id", ds.get_instance_id()) - - def test_classic_instance_true(self): - """If no vpc-id in metadata, is_classic_instance must return true.""" - md_copy = copy.deepcopy(DEFAULT_METADATA) - ifaces_md = md_copy.get("network", {}).get("interfaces", {}) - for _mac, mac_data in ifaces_md.get("macs", {}).items(): - if "vpc-id" in mac_data: - del mac_data["vpc-id"] - - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": md_copy}, - ) - self.assertTrue(ds.get_data()) - self.assertTrue(ds.is_classic_instance()) - - def test_classic_instance_false(self): - """If vpc-id in metadata, is_classic_instance must return false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - self.assertTrue(ds.get_data()) - self.assertFalse(ds.is_classic_instance()) - - def test_aws_inaccessible_imds_service_fails_with_retries(self): - """Inaccessibility of http://169.254.169.254 are retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md=None, - ) - - conn_error = requests.exceptions.ConnectionError( - "[Errno 113] no route to host" - ) - - mock_success = mock.MagicMock(contents=b"fakesuccess") - mock_success.ok.return_value = True - - with mock.patch("cloudinit.url_helper.readurl") as m_readurl: - # yikes, this endpoint needs help - m_readurl.side_effect = ( - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - mock_success, - ) - with mock.patch("cloudinit.url_helper.time.sleep"): - self.assertTrue(ds.wait_for_metadata_service()) - - # Just one /latest/api/token request - self.assertEqual(19, len(m_readurl.call_args_list)) - for readurl_call in m_readurl.call_args_list: - self.assertIn("latest/api/token", readurl_call[0][0]) - - def test_aws_token_403_fails_without_retries(self): - """Verify that 403s fetching AWS tokens are not retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md=None, - ) - - token_url = self.data_url("latest", data_item="api/token") - self.responses.add(responses.PUT, token_url, status=403) - self.assertFalse(ds.get_data()) - # Just one /latest/api/token request - logs = self.logs.getvalue() - expected_logs = [ - "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is" - " disabled. Aborting.", - "WARNING: IMDS's HTTP endpoint is probably disabled", - ] - for log in expected_logs: - self.assertIn(log, logs) - - def test_aws_token_redacted(self): - """Verify that aws tokens are redacted when logged.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - self.assertTrue(ds.get_data()) - all_logs = self.logs.getvalue().splitlines() - REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" - REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" - logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] - logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] - logs_with_token = [log for log in all_logs if "API-TOKEN" in log] - self.assertEqual(1, len(logs_with_redacted_ttl)) - self.assertEqual(83, len(logs_with_redacted)) - self.assertEqual(0, len(logs_with_token)) - - @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_valid_platform_with_strict_true(self, m_dhcp): - """Valid platform data should return true with strict_id true.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual("aws", ds.cloud_name) - self.assertEqual("ec2", ds.platform_type) - self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform) - - def test_valid_platform_with_strict_false(self): - """Valid platform data should return true with strict_id false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - - def test_unknown_platform_with_strict_true(self): - """Unknown platform data with strict_id true should return False.""" - uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" - ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertFalse(ret) - - def test_unknown_platform_with_strict_false(self): - """Unknown platform data with strict_id false should return True.""" - uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" - ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - - def test_ec2_local_returns_false_on_non_aws(self): - """DataSourceEc2Local returns False when platform is not AWS.""" - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - platform_attrs = [ - attr - for attr in ec2.CloudNames.__dict__.keys() - if not attr.startswith("__") - ] - for attr_name in platform_attrs: - platform_name = getattr(ec2.CloudNames, attr_name) - if platform_name not in ["aws", "outscale"]: - ds._cloud_name = platform_name - ret = ds.get_data() - self.assertEqual("ec2", ds.platform_type) - self.assertFalse(ret) - message = ( - "Local Ec2 mode only supported on ('aws', 'outscale')," - " not {0}".format(platform_name) - ) - self.assertIn(message, self.logs.getvalue()) - - @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): - """DataSourceEc2Local returns False on BSD. - - FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. - """ - m_is_freebsd.return_value = True - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertFalse(ret) - self.assertIn( - "FreeBSD doesn't support running dhclient with -sf", - self.logs.getvalue(), - ) - - @mock.patch("cloudinit.net.ephemeral.EphemeralIPv6Network") - @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") - @mock.patch("cloudinit.distros.net.find_fallback_nic") - @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") - @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_performs_dhcp_on_non_bsd( - self, m_is_bsd, m_dhcp, m_fallback_nic, m_net4, m_net6 - ): - """Ec2Local returns True for valid platform data on non-BSD with dhcp. - - DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. - Then the metadata services is crawled for more network config info. - When the platform data is valid, return True. - """ - - m_fallback_nic.return_value = "eth9" - m_is_bsd.return_value = False - m_dhcp.return_value = { - "interface": "eth9", - "fixed-address": "192.168.2.9", - "routers": "192.168.2.1", - "subnet-mask": "255.255.255.0", - "broadcast-address": "192.168.2.255", - } - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - distro=MockDistro("", {}, {}), - ) - - ret = ds.get_data() - self.assertTrue(ret) - m_dhcp.assert_called_once_with(ds.distro, "eth9", None) - m_net4.assert_called_once_with( - ds.distro, - broadcast="192.168.2.255", - interface="eth9", - ip="192.168.2.9", - prefix_or_mask="255.255.255.0", - router="192.168.2.1", - static_routes=None, - ) - self.assertIn("Crawl of metadata service ", self.logs.getvalue()) - - def test_get_instance_tags(self): - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": TAGS_METADATA_2021_03_23}, - ) - self.assertTrue(ds.get_data()) - self.assertIn("tags", ds.metadata) - self.assertIn("instance", ds.metadata["tags"]) - instance_tags = ds.metadata["tags"]["instance"] - self.assertEqual(instance_tags["Application"], "test") - self.assertEqual(instance_tags["Environment"], "production") - - -class TestGetSecondaryAddresses(test_helpers.CiTestCase): - mac = "06:17:04:d7:26:ff" - with_logs = True - - def test_md_with_no_secondary_addresses(self): - """Empty list is returned when nic metadata contains no secondary ip""" - self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) - - def test_md_with_secondary_v4_and_v6_addresses(self): - """All secondary addresses are returned from nic metadata""" - self.assertEqual( - [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac), - ) - - def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): - """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" - invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) - invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected" - invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is" - self.assertEqual( - [ - "172.31.45.70/24", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(invalid_cidr_md, self.mac), - ) - expected_logs = [ - "WARNING: Could not parse subnet-ipv4-cidr-block" - " something-unexpected for mac 06:17:04:d7:26:ff." - " ipv4 network config prefix defaults to /24", - "WARNING: Could not parse subnet-ipv6-cidr-block" - " not/sure/what/this/is for mac 06:17:04:d7:26:ff." - " ipv6 network config prefix defaults to /128", - ] - logs = self.logs.getvalue() - for log in expected_logs: - self.assertIn(log, logs) - - -class TestBuildNicOrder: - @pytest.mark.parametrize( - ["macs_metadata", "macs", "expected"], - [ - pytest.param({}, [], {}, id="all_empty"), - pytest.param( - {}, ["0a:f7:8d:96:f2:a1"], {}, id="empty_macs_metadata" - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - } - }, - [], - {}, - id="empty_macs", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:f7:8d:96:f2:a1": 0, "0a:0d:dd:44:cd:7b": 1}, - id="no-device-number-info", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1"], - {"0a:f7:8d:96:f2:a1": 0}, - id="no-device-number-info-subset", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1}, - id="device-numbers", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "network-card": "0", - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "network-card": "1", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - "0a:f7:8d:96:f2:a2": { - "network-card": "2", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], - { - "0a:0d:dd:44:cd:7b": 0, - "0a:f7:8d:96:f2:a1": 1, - "0a:f7:8d:96:f2:a2": 2, - }, - id="network-cardes", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "network-card": "0", - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "network-card": "1", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - "0a:f7:8d:96:f2:a2": { - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], - { - "0a:0d:dd:44:cd:7b": 0, - "0a:f7:8d:96:f2:a1": 1, - "0a:f7:8d:96:f2:a2": 2, - }, - id="network-card-partially-missing", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a9"], - {}, - id="macs-not-in-md", - ), - ], - ) - def test_build_nic_order(self, macs_metadata, macs, expected): - assert expected == ec2._build_nic_order(macs_metadata, macs) - - -class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): - def setUp(self): - super(TestConvertEc2MetadataNetworkConfig, self).setUp() - self.mac1 = "06:17:04:d7:26:09" - interface_dict = copy.deepcopy( - DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1] - ) - # These tests are written assuming the base interface doesn't have IPv6 - interface_dict.pop("ipv6s") - self.network_metadata = { - "interfaces": {"macs": {self.mac1: interface_dict}} - } - - def test_convert_ec2_metadata_network_config_skips_absent_macs(self): - """Any mac absent from metadata is skipped by network config.""" - macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"} - - # DE:AD:BE:EF:FF:FF represented by OS but not in metadata - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): - """Config dhcp6 when ipv6s is in metadata for a mac.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): - """Config dhcp4 when there are no public addresses in public-ipv4s.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["local-ipv4s"] = "172.3.3.15" - nic1_metadata.pop("public-ipv4s") - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): - """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["public-ipv4s"] = "" - - # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): - """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. - Source-routing configured for secondary NICs (routing-policy and extra - routing table).""" - mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth10": { - "match": {"macaddress": mac2}, - "set-name": "eth10", - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": False, - "routes": [ - # via DHCP gateway - {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, - # to NIC2_MD["subnet-ipv4-cidr-block"] - {"to": "172.31.32.0/20", "table": 101}, - ], - "routing-policy": [ - # NIC2_MD["local-ipv4s"] - {"from": "172.31.47.221", "table": 101} - ], - }, - }, - } - distro = mock.Mock() - distro.network_renderer = netplan.Renderer - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( - self, - ): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. - Source-routing configured for secondary NICs (routing-policy and extra - routing table).""" - mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both["interfaces"]["macs"][ - mac2 - ] = NIC2_MD_IPV4_IPV6_MULTI_IP - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "match": {"macaddress": "06:17:04:d7:26:09"}, - "set-name": "eth9", - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth10": { - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": True, - "match": {"macaddress": "06:17:04:d7:26:08"}, - "set-name": "eth10", - "routes": [ - # via DHCP gateway - {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, - # to NIC2_MD["subnet-ipv4-cidr-block"] - {"to": "172.31.32.0/20", "table": 101}, - # to NIC2_MD["subnet-ipv6-cidr-blocks"] - {"to": "2600:1f16:292:100::/64", "table": 101}, - ], - "routing-policy": [ - # NIC2_MD["local-ipv4s"] - {"from": "172.31.47.221", "table": 101}, - { - "from": "2600:1f16:292:100:c187:593c:4349:136", - "table": 101, - }, - { - "from": "2600:1f16:292:100:f153:12a3:c37c:11f9", - "table": 101, - }, - ], - "dhcp6-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "addresses": ["2600:1f16:292:100:f153:12a3:c37c:11f9/128"], - }, - }, - } - distro = mock.Mock() - distro.network_renderer = netplan.Renderer - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv6_only(self): - """Like above, but only ipv6s are present in metadata.""" - macs_to_nics = { - "02:7c:03:b8:5c:af": "eth0", - "02:6b:df:a2:4b:2b": "eth1", - } - mac_data = copy.deepcopy(MULTI_NIC_V6_ONLY_MD) - network_metadata = {"interfaces": mac_data} - expected = { - "version": 2, - "ethernets": { - "eth0": { - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "match": {"macaddress": "02:7c:03:b8:5c:af"}, - "set-name": "eth0", - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth1": { - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": True, - "match": {"macaddress": "02:6b:df:a2:4b:2b"}, - "set-name": "eth1", - "routes": [ - {"to": "2600:1f16:67f:f201:0:0:0:0/64", "table": 101}, - ], - "routing-policy": [ - { - "from": "2600:1f16:67f:f201:8d2e:4d1f:9e80:4ab9", - "table": 101, - }, - ], - "dhcp6-overrides": { - "route-metric": 200, - "use-routes": True, - }, - }, - }, - } - distro = mock.Mock() - distro.network_activator = activators.NetplanActivator - assert expected == ec2.convert_ec2_metadata_network_config( - network_metadata, distro, macs_to_nics - ) - distro.dhcp_client.dhcp_discovery.assert_not_called() - - def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): - """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): - """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - distro = mock.Mock() - with mock.patch(patch_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro - ), - ) - - -class TesIdentifyPlatform(test_helpers.CiTestCase): - def collmock(self, **kwargs): - """return non-special _collect_platform_data updated with changes.""" - unspecial = { - "asset_tag": "3857-0037-2746-7462-1818-3997-77", - "serial": "H23-C4J3JV-R6", - "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2", - "uuid_source": "dmi", - "vendor": "tothecloud", - "product_name": "cloudproduct", - } - unspecial.update(**kwargs) - return unspecial - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_aliyun(self, m_collect): - """aliyun should be identified if product name equals to - Alibaba Cloud ECS - """ - m_collect.return_value = self.collmock( - product_name="Alibaba Cloud ECS" - ) - self.assertEqual(ec2.CloudNames.ALIYUN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_zstack(self, m_collect): - """zstack should be identified if chassis-asset-tag - ends in .zstack.io - """ - m_collect.return_value = self.collmock(asset_tag="123456.zstack.io") - self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_zstack_full_domain_only(self, m_collect): - """zstack asset-tag matching should match only on - full domain boundary. - """ - m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_e24cloud(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor="e24cloud") - self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_e24cloud_negative(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor="e24cloudyday") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - # Outscale - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_outscale(self, m_collect): - """Should return true if the dmi product data has expected value.""" - m_collect.return_value = self.collmock( - vendor="3DS Outscale".lower(), - product_name="3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.OUTSCALE, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_false_on_wrong_sys_vendor(self, m_collect): - """Should return false on empty value returned.""" - m_collect.return_value = self.collmock( - vendor="Not 3DS Outscale".lower(), - product_name="3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_false_on_wrong_product_name(self, m_collect): - """Should return false on an unrelated string.""" - m_collect.return_value = self.collmock( - vendor="3DS Outscale".lower(), - product_name="Not 3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) diff --git a/.pc/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without/cloudinit/distros/__init__.py b/.pc/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without/cloudinit/distros/__init__.py deleted file mode 100644 index 9c4d6b233..000000000 --- a/.pc/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without/cloudinit/distros/__init__.py +++ /dev/null @@ -1,1540 +0,0 @@ -# Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Juerg Haefliger -# Author: Joshua Harlow -# Author: Ben Howard -# -# This file is part of cloud-init. See LICENSE file for license information. - -import abc -import logging -import os -import re -import stat -import string -import urllib.parse -from collections import defaultdict -from contextlib import suppress -from io import StringIO -from typing import ( - Any, - Dict, - List, - Mapping, - MutableMapping, - Optional, - Set, - Tuple, - Type, - Union, -) - -import cloudinit.net.netops.iproute2 as iproute2 -from cloudinit import ( - helpers, - importer, - net, - persistence, - ssh_util, - subp, - temp_utils, - type_utils, - util, -) -from cloudinit.distros.networking import LinuxNetworking, Networking -from cloudinit.distros.package_management.package_manager import PackageManager -from cloudinit.distros.package_management.utils import known_package_managers -from cloudinit.distros.parsers import hosts -from cloudinit.features import ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES -from cloudinit.net import activators, dhcp, renderers -from cloudinit.net.network_state import parse_net_config_data -from cloudinit.net.renderer import Renderer - -# Used when a cloud-config module can be run on all cloud-init distributions. -# The value 'all' is surfaced in module documentation for distro support. -ALL_DISTROS = "all" - -OSFAMILIES = { - "alpine": ["alpine"], - "arch": ["arch"], - "debian": ["debian", "ubuntu"], - "freebsd": ["freebsd", "dragonfly"], - "gentoo": ["gentoo", "cos"], - "netbsd": ["netbsd"], - "openbsd": ["openbsd"], - "redhat": [ - "almalinux", - "amazon", - "centos", - "cloudlinux", - "eurolinux", - "fedora", - "mariner", - "miraclelinux", - "openmandriva", - "photon", - "rhel", - "rocky", - "virtuozzo", - ], - "suse": [ - "opensuse", - "opensuse-leap", - "opensuse-microos", - "opensuse-tumbleweed", - "sle_hpc", - "sle-micro", - "sles", - "suse", - ], - "openeuler": ["openeuler"], - "OpenCloudOS": ["OpenCloudOS", "TencentOS"], -} - -LOG = logging.getLogger(__name__) - -# This is a best guess regex, based on current EC2 AZs on 2017-12-11. -# It could break when Amazon adds new regions and new AZs. -_EC2_AZ_RE = re.compile("^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$") - -# Default NTP Client Configurations -PREFERRED_NTP_CLIENTS = ["chrony", "systemd-timesyncd", "ntp", "ntpdate"] - -# Letters/Digits/Hyphen characters, for use in domain name validation -LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-" - -# Before you try to go rewriting this better using Unions, read -# https://github.com/microsoft/pyright/blob/main/docs/type-concepts.md#generic-types # noqa: E501 -# The Immutable types mentioned there won't work for us because -# we need to distinguish between a str and a Sequence[str] -# This also isn't exhaustive. If you have a unique case that adheres to -# the `packages` schema, you can add it here. -PackageList = Union[ - List[str], - List[Mapping], - List[List[str]], - List[Union[str, List[str]]], - List[Union[str, List[str], Mapping]], -] - - -class PackageInstallerError(Exception): - pass - - -class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): - pip_package_name = "python3-pip" - usr_lib_exec = "/usr/lib" - hosts_fn = "/etc/hosts" - doas_fn = "/etc/doas.conf" - ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users" - hostname_conf_fn = "/etc/hostname" - tz_zone_dir = "/usr/share/zoneinfo" - default_owner = "root:root" - init_cmd = ["service"] # systemctl, service etc - renderer_configs: Mapping[str, MutableMapping[str, Any]] = {} - _preferred_ntp_clients = None - networking_cls: Type[Networking] = LinuxNetworking - # This is used by self.shutdown_command(), and can be overridden in - # subclasses - shutdown_options_map = {"halt": "-H", "poweroff": "-P", "reboot": "-r"} - net_ops = iproute2.Iproute2 - - _ci_pkl_version = 1 - prefer_fqdn = False - resolve_conf_fn = "/etc/resolv.conf" - - osfamily: str - # Directory where the distro stores their DHCP leases. - # The children classes should override this with their dhcp leases - # directory - dhclient_lease_directory: Optional[str] = None - # A regex to match DHCP lease file(s) - # The children classes should override this with a regex matching - # their lease file name format - dhclient_lease_file_regex: Optional[str] = None - - def __init__(self, name, cfg, paths): - self._paths = paths - self._cfg = cfg - self.name = name - self.networking: Networking = self.networking_cls() - self.dhcp_client_priority = dhcp.ALL_DHCP_CLIENTS - self.net_ops = iproute2.Iproute2 - self._runner = helpers.Runners(paths) - self.package_managers: List[PackageManager] = [] - self._dhcp_client = None - self._fallback_interface = None - - def _unpickle(self, ci_pkl_version: int) -> None: - """Perform deserialization fixes for Distro.""" - if "networking" not in self.__dict__ or not self.networking.__dict__: - # This is either a Distro pickle with no networking attribute OR - # this is a Distro pickle with a networking attribute but from - # before ``Networking`` had any state (meaning that - # Networking.__setstate__ will not be called). In either case, we - # want to ensure that `self.networking` is freshly-instantiated: - # either because it isn't present at all, or because it will be - # missing expected instance state otherwise. - self.networking = self.networking_cls() - if not hasattr(self, "_dhcp_client"): - self._dhcp_client = None - if not hasattr(self, "_fallback_interface"): - self._fallback_interface = None - - def _validate_entry(self, entry): - if isinstance(entry, str): - return entry - elif isinstance(entry, (list, tuple)): - if len(entry) == 2: - return tuple(entry) - raise ValueError( - "Invalid 'packages' yaml specification. " - "Check schema definition." - ) - - def _extract_package_by_manager( - self, pkglist: PackageList - ) -> Tuple[Dict[Type[PackageManager], Set], Set]: - """Transform the generic package list to package by package manager. - - Additionally, include list of generic packages - """ - packages_by_manager = defaultdict(set) - generic_packages: Set = set() - for entry in pkglist: - if isinstance(entry, dict): - for package_manager, package_list in entry.items(): - for definition in package_list: - definition = self._validate_entry(definition) - try: - packages_by_manager[ - known_package_managers[package_manager] - ].add(definition) - except KeyError: - LOG.error( - "Cannot install packages under '%s' as it is " - "not a supported package manager!", - package_manager, - ) - else: - generic_packages.add(self._validate_entry(entry)) - return dict(packages_by_manager), generic_packages - - def install_packages(self, pkglist: PackageList): - error_message = ( - "Failed to install the following packages: %s. " - "See associated package manager logs for more details." - ) - # If an entry hasn't been included with an explicit package name, - # add it to a 'generic' list of packages - ( - packages_by_manager, - generic_packages, - ) = self._extract_package_by_manager(pkglist) - - # First install packages using package manager(s) - # supported by the distro - total_failed: Set[str] = set() - for manager in self.package_managers: - - manager_packages = packages_by_manager.get( - manager.__class__, set() - ) - - to_try = manager_packages | generic_packages - # Remove any failed we will try for this package manager - total_failed.difference_update(to_try) - if not manager.available(): - LOG.debug("Package manager '%s' not available", manager.name) - total_failed.update(to_try) - continue - if not to_try: - continue - failed = manager.install_packages(to_try) - total_failed.update(failed) - if failed: - LOG.info(error_message, failed) - # Ensure we don't attempt to install packages specific to - # one particular package manager using another package manager - generic_packages = set(failed) - manager_packages - - # Now attempt any specified package managers not explicitly supported - # by distro - for manager_type, packages in packages_by_manager.items(): - if manager_type.name in [p.name for p in self.package_managers]: - # We already installed/attempted these; don't try again - continue - total_failed.update( - manager_type.from_config( - self._runner, self._cfg - ).install_packages(pkglist=packages) - ) - - if total_failed: - raise PackageInstallerError(error_message % total_failed) - - @property - def dhcp_client(self) -> dhcp.DhcpClient: - """access the distro's preferred dhcp client - - if no client has been selected yet select one - uses - self.dhcp_client_priority, which may be overridden in each distro's - object to eliminate checking for clients which will not be provided - by the distro - """ - if self._dhcp_client: - return self._dhcp_client - - # no client has been selected yet, so pick one - # - # set the default priority list to the distro-defined priority list - dhcp_client_priority = self.dhcp_client_priority - - # if the configuration includes a network.dhcp_client_priority list - # then attempt to use it - config_priority = util.get_cfg_by_path( - self._cfg, ("network", "dhcp_client_priority"), [] - ) - - if config_priority: - # user or image builder configured a custom dhcp client priority - # list - found_clients = [] - LOG.debug( - "Using configured dhcp client priority list: %s", - config_priority, - ) - for client_configured in config_priority: - for client_class in dhcp.ALL_DHCP_CLIENTS: - if client_configured == client_class.client_name: - found_clients.append(client_class) - break - else: - LOG.warning( - "Configured dhcp client %s is not supported, skipping", - client_configured, - ) - # If dhcp_client_priority is defined in the configuration, but none - # of the defined clients are supported by cloud-init, then we don't - # override the distro default. If at least one client in the - # configured list exists, then we use that for our list of clients - # to check. - if found_clients: - dhcp_client_priority = found_clients - - # iterate through our priority list and use the first client that is - # installed on the system - for client in dhcp_client_priority: - try: - self._dhcp_client = client() - LOG.debug("DHCP client selected: %s", client.client_name) - return self._dhcp_client - except (dhcp.NoDHCPLeaseMissingDhclientError,): - LOG.debug("DHCP client not found: %s", client.client_name) - raise dhcp.NoDHCPLeaseMissingDhclientError() - - @property - def network_activator(self) -> Optional[Type[activators.NetworkActivator]]: - """Return the configured network activator for this environment.""" - priority = util.get_cfg_by_path( - self._cfg, ("network", "activators"), None - ) - try: - return activators.select_activator(priority=priority) - except activators.NoActivatorException: - return None - - def _get_renderer(self) -> Renderer: - priority = util.get_cfg_by_path( - self._cfg, ("network", "renderers"), None - ) - - name, render_cls = renderers.select(priority=priority) - LOG.debug( - "Selected renderer '%s' from priority list: %s", name, priority - ) - renderer = render_cls(config=self.renderer_configs.get(name)) - return renderer - - def _write_network_state(self, network_state, renderer: Renderer): - renderer.render_network_state(network_state) - - def _find_tz_file(self, tz): - tz_file = os.path.join(self.tz_zone_dir, str(tz)) - if not os.path.isfile(tz_file): - raise IOError( - "Invalid timezone %s, no file found at %s" % (tz, tz_file) - ) - return tz_file - - def get_option(self, opt_name, default=None): - return self._cfg.get(opt_name, default) - - def set_option(self, opt_name, value=None): - self._cfg[opt_name] = value - - def set_hostname(self, hostname, fqdn=None): - writeable_hostname = self._select_hostname(hostname, fqdn) - self._write_hostname(writeable_hostname, self.hostname_conf_fn) - self._apply_hostname(writeable_hostname) - - @staticmethod - def uses_systemd(): - """Wrapper to report whether this distro uses systemd or sysvinit.""" - return uses_systemd() - - @abc.abstractmethod - def package_command(self, command, args=None, pkgs=None): - # Long-term, this method should be removed and callers refactored. - # Very few commands are going to be consistent across all package - # managers. - raise NotImplementedError() - - def update_package_sources(self): - for manager in self.package_managers: - try: - manager.update_package_sources() - except Exception as e: - LOG.error( - "Failed to update package using %s: %s", manager.name, e - ) - - def get_primary_arch(self): - arch = os.uname()[4] - if arch in ("i386", "i486", "i586", "i686"): - return "i386" - return arch - - def _get_arch_package_mirror_info(self, arch=None): - mirror_info = self.get_option("package_mirrors", []) - if not arch: - arch = self.get_primary_arch() - return _get_arch_package_mirror_info(mirror_info, arch) - - def get_package_mirror_info(self, arch=None, data_source=None): - # This resolves the package_mirrors config option - # down to a single dict of {mirror_name: mirror_url} - arch_info = self._get_arch_package_mirror_info(arch) - return _get_package_mirror_info( - data_source=data_source, mirror_info=arch_info - ) - - def generate_fallback_config(self): - return net.generate_fallback_config() - - def apply_network_config(self, netconfig, bring_up=False) -> bool: - """Apply the network config. - - If bring_up is True, attempt to bring up the passed in devices. If - devices is None, attempt to bring up devices returned by - _write_network_config. - - Returns True if any devices failed to come up, otherwise False. - """ - renderer = self._get_renderer() - network_state = parse_net_config_data(netconfig, renderer=renderer) - self._write_network_state(network_state, renderer) - - # Now try to bring them up - if bring_up: - LOG.debug("Bringing up newly configured network interfaces") - network_activator = self.network_activator - if not network_activator: - LOG.warning( - "No network activator found, not bringing up " - "network interfaces" - ) - return True - network_activator.bring_up_all_interfaces(network_state) - else: - LOG.debug("Not bringing up newly configured network interfaces") - return False - - @abc.abstractmethod - def apply_locale(self, locale, out_fn=None): - raise NotImplementedError() - - @abc.abstractmethod - def set_timezone(self, tz): - raise NotImplementedError() - - def _get_localhost_ip(self): - return "127.0.0.1" - - def get_locale(self): - raise NotImplementedError() - - @abc.abstractmethod - def _read_hostname(self, filename, default=None): - raise NotImplementedError() - - @abc.abstractmethod - def _write_hostname(self, hostname, filename): - raise NotImplementedError() - - @abc.abstractmethod - def _read_system_hostname(self): - raise NotImplementedError() - - def _apply_hostname(self, hostname): - # This really only sets the hostname - # temporarily (until reboot so it should - # not be depended on). Use the write - # hostname functions for 'permanent' adjustments. - LOG.debug( - "Non-persistently setting the system hostname to %s", hostname - ) - try: - subp.subp(["hostname", hostname]) - except subp.ProcessExecutionError: - util.logexc( - LOG, - "Failed to non-persistently adjust the system hostname to %s", - hostname, - ) - - def _select_hostname(self, hostname, fqdn): - # Prefer the short hostname over the long - # fully qualified domain name - if ( - util.get_cfg_option_bool( - self._cfg, "prefer_fqdn_over_hostname", self.prefer_fqdn - ) - and fqdn - ): - return fqdn - if not hostname: - return fqdn - return hostname - - @staticmethod - def expand_osfamily(family_list): - distros = [] - for family in family_list: - if family not in OSFAMILIES: - raise ValueError( - "No distributions found for osfamily {}".format(family) - ) - distros.extend(OSFAMILIES[family]) - return distros - - def update_hostname(self, hostname, fqdn, prev_hostname_fn): - applying_hostname = hostname - - # Determine what the actual written hostname should be - hostname = self._select_hostname(hostname, fqdn) - - # If the previous hostname file exists lets see if we - # can get a hostname from it - if prev_hostname_fn and os.path.exists(prev_hostname_fn): - prev_hostname = self._read_hostname(prev_hostname_fn) - else: - prev_hostname = None - - # Lets get where we should write the system hostname - # and what the system hostname is - (sys_fn, sys_hostname) = self._read_system_hostname() - update_files = [] - - # If there is no previous hostname or it differs - # from what we want, lets update it or create the - # file in the first place - if not prev_hostname or prev_hostname != hostname: - update_files.append(prev_hostname_fn) - - # If the system hostname is different than the previous - # one or the desired one lets update it as well - if (not sys_hostname) or ( - sys_hostname == prev_hostname and sys_hostname != hostname - ): - update_files.append(sys_fn) - - # If something else has changed the hostname after we set it - # initially, we should not overwrite those changes (we should - # only be setting the hostname once per instance) - if sys_hostname and prev_hostname and sys_hostname != prev_hostname: - LOG.info( - "%s differs from %s, assuming user maintained hostname.", - prev_hostname_fn, - sys_fn, - ) - return - - # Remove duplicates (incase the previous config filename) - # is the same as the system config filename, don't bother - # doing it twice - update_files = set([f for f in update_files if f]) - LOG.debug( - "Attempting to update hostname to %s in %s files", - hostname, - len(update_files), - ) - - for fn in update_files: - try: - self._write_hostname(hostname, fn) - except IOError: - util.logexc( - LOG, "Failed to write hostname %s to %s", hostname, fn - ) - - # If the system hostname file name was provided set the - # non-fqdn as the transient hostname. - if sys_fn in update_files: - self._apply_hostname(applying_hostname) - - def update_etc_hosts(self, hostname, fqdn): - header = "" - if os.path.exists(self.hosts_fn): - eh = hosts.HostsConf(util.load_text_file(self.hosts_fn)) - else: - eh = hosts.HostsConf("") - header = util.make_header(base="added") - local_ip = self._get_localhost_ip() - prev_info = eh.get_entry(local_ip) - need_change = False - if not prev_info: - eh.add_entry(local_ip, fqdn, hostname) - need_change = True - else: - need_change = True - for entry in prev_info: - entry_fqdn = None - entry_aliases = [] - if len(entry) >= 1: - entry_fqdn = entry[0] - if len(entry) >= 2: - entry_aliases = entry[1:] - if entry_fqdn is not None and entry_fqdn == fqdn: - if hostname in entry_aliases: - # Exists already, leave it be - need_change = False - if need_change: - # Doesn't exist, add that entry in... - new_entries = list(prev_info) - new_entries.append([fqdn, hostname]) - eh.del_entries(local_ip) - for entry in new_entries: - if len(entry) == 1: - eh.add_entry(local_ip, entry[0]) - elif len(entry) >= 2: - eh.add_entry(local_ip, *entry) - if need_change: - contents = StringIO() - if header: - contents.write("%s\n" % (header)) - contents.write("%s\n" % (eh)) - util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644) - - @property - def preferred_ntp_clients(self): - """Allow distro to determine the preferred ntp client list""" - if not self._preferred_ntp_clients: - self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS) - - return self._preferred_ntp_clients - - def get_default_user(self): - return self.get_option("default_user") - - def add_user(self, name, **kwargs): - """ - Add a user to the system using standard GNU tools - - This should be overridden on distros where useradd is not desirable or - not available. - """ - # XXX need to make add_user idempotent somehow as we - # still want to add groups or modify SSH keys on pre-existing - # users in the image. - if util.is_user(name): - LOG.info("User %s already exists, skipping.", name) - return - - if "create_groups" in kwargs: - create_groups = kwargs.pop("create_groups") - else: - create_groups = True - - useradd_cmd = ["useradd", name] - log_useradd_cmd = ["useradd", name] - if util.system_is_snappy(): - useradd_cmd.append("--extrausers") - log_useradd_cmd.append("--extrausers") - - # Since we are creating users, we want to carefully validate the - # inputs. If something goes wrong, we can end up with a system - # that nobody can login to. - useradd_opts = { - "gecos": "--comment", - "homedir": "--home", - "primary_group": "--gid", - "uid": "--uid", - "groups": "--groups", - "passwd": "--password", - "shell": "--shell", - "expiredate": "--expiredate", - "inactive": "--inactive", - "selinux_user": "--selinux-user", - } - - useradd_flags = { - "no_user_group": "--no-user-group", - "system": "--system", - "no_log_init": "--no-log-init", - } - - redact_opts = ["passwd"] - - # support kwargs having groups=[list] or groups="g1,g2" - groups = kwargs.get("groups") - if groups: - if isinstance(groups, str): - groups = groups.split(",") - - if isinstance(groups, dict): - util.deprecate( - deprecated=f"The user {name} has a 'groups' config value " - "of type dict", - deprecated_version="22.3", - extra_message="Use a comma-delimited string or " - "array instead: group1,group2.", - ) - - # remove any white spaces in group names, most likely - # that came in as a string like: groups: group1, group2 - groups = [g.strip() for g in groups] - - # kwargs.items loop below wants a comma delimited string - # that can go right through to the command. - kwargs["groups"] = ",".join(groups) - - primary_group = kwargs.get("primary_group") - if primary_group: - groups.append(primary_group) - - if create_groups and groups: - for group in groups: - if not util.is_group(group): - self.create_group(group) - LOG.debug("created group '%s' for user '%s'", group, name) - if "uid" in kwargs.keys(): - kwargs["uid"] = str(kwargs["uid"]) - - # Check the values and create the command - for key, val in sorted(kwargs.items()): - if key in useradd_opts and val and isinstance(val, str): - useradd_cmd.extend([useradd_opts[key], val]) - - # Redact certain fields from the logs - if key in redact_opts: - log_useradd_cmd.extend([useradd_opts[key], "REDACTED"]) - else: - log_useradd_cmd.extend([useradd_opts[key], val]) - - elif key in useradd_flags and val: - useradd_cmd.append(useradd_flags[key]) - log_useradd_cmd.append(useradd_flags[key]) - - # Don't create the home directory if directed so or if the user is a - # system user - if kwargs.get("no_create_home") or kwargs.get("system"): - useradd_cmd.append("-M") - log_useradd_cmd.append("-M") - else: - useradd_cmd.append("-m") - log_useradd_cmd.append("-m") - - # Run the command - LOG.debug("Adding user %s", name) - try: - subp.subp(useradd_cmd, logstring=log_useradd_cmd) - except Exception as e: - util.logexc(LOG, "Failed to create user %s", name) - raise e - - def add_snap_user(self, name, **kwargs): - """ - Add a snappy user to the system using snappy tools - """ - - snapuser = kwargs.get("snapuser") - known = kwargs.get("known", False) - create_user_cmd = ["snap", "create-user", "--sudoer", "--json"] - if known: - create_user_cmd.append("--known") - create_user_cmd.append(snapuser) - - # Run the command - LOG.debug("Adding snap user %s", name) - try: - (out, err) = subp.subp( - create_user_cmd, logstring=create_user_cmd, capture=True - ) - LOG.debug("snap create-user returned: %s:%s", out, err) - jobj = util.load_json(out) - username = jobj.get("username", None) - except Exception as e: - util.logexc(LOG, "Failed to create snap user %s", name) - raise e - - return username - - def create_user(self, name, **kwargs): - """ - Creates or partially updates the ``name`` user in the system. - - This defers the actual user creation to ``self.add_user`` or - ``self.add_snap_user``, and most of the keys in ``kwargs`` will be - processed there if and only if the user does not already exist. - - Once the existence of the ``name`` user has been ensured, this method - then processes these keys (for both just-created and pre-existing - users): - - * ``plain_text_passwd`` - * ``hashed_passwd`` - * ``lock_passwd`` - * ``doas`` - * ``sudo`` - * ``ssh_authorized_keys`` - * ``ssh_redirect_user`` - """ - - # Add a snap user, if requested - if "snapuser" in kwargs: - return self.add_snap_user(name, **kwargs) - - # Add the user - self.add_user(name, **kwargs) - - # Set password if plain-text password provided and non-empty - if "plain_text_passwd" in kwargs and kwargs["plain_text_passwd"]: - self.set_passwd(name, kwargs["plain_text_passwd"]) - - # Set password if hashed password is provided and non-empty - if "hashed_passwd" in kwargs and kwargs["hashed_passwd"]: - self.set_passwd(name, kwargs["hashed_passwd"], hashed=True) - - # Default locking down the account. 'lock_passwd' defaults to True. - # lock account unless lock_password is False. - if kwargs.get("lock_passwd", True): - self.lock_passwd(name) - - # Configure doas access - if "doas" in kwargs: - if kwargs["doas"]: - self.write_doas_rules(name, kwargs["doas"]) - - # Configure sudo access - if "sudo" in kwargs: - if kwargs["sudo"]: - self.write_sudo_rules(name, kwargs["sudo"]) - elif kwargs["sudo"] is False: - util.deprecate( - deprecated=f"The value of 'false' in user {name}'s " - "'sudo' config", - deprecated_version="22.3", - extra_message="Use 'null' instead.", - ) - - # Import SSH keys - if "ssh_authorized_keys" in kwargs: - # Try to handle this in a smart manner. - keys = kwargs["ssh_authorized_keys"] - if isinstance(keys, str): - keys = [keys] - elif isinstance(keys, dict): - keys = list(keys.values()) - if keys is not None: - if not isinstance(keys, (tuple, list, set)): - LOG.warning( - "Invalid type '%s' detected for" - " 'ssh_authorized_keys', expected list," - " string, dict, or set.", - type(keys), - ) - keys = [] - else: - keys = set(keys) or [] - ssh_util.setup_user_keys(set(keys), name) - if "ssh_redirect_user" in kwargs: - cloud_keys = kwargs.get("cloud_public_ssh_keys", []) - if not cloud_keys: - LOG.warning( - "Unable to disable SSH logins for %s given" - " ssh_redirect_user: %s. No cloud public-keys present.", - name, - kwargs["ssh_redirect_user"], - ) - else: - redirect_user = kwargs["ssh_redirect_user"] - disable_option = ssh_util.DISABLE_USER_OPTS - disable_option = disable_option.replace("$USER", redirect_user) - disable_option = disable_option.replace("$DISABLE_USER", name) - ssh_util.setup_user_keys( - set(cloud_keys), name, options=disable_option - ) - return True - - def lock_passwd(self, name): - """ - Lock the password of a user, i.e., disable password logins - """ - # passwd must use short '-l' due to SLES11 lacking long form '--lock' - lock_tools = (["passwd", "-l", name], ["usermod", "--lock", name]) - try: - cmd = next(tool for tool in lock_tools if subp.which(tool[0])) - except StopIteration as e: - raise RuntimeError( - "Unable to lock user account '%s'. No tools available. " - " Tried: %s." % (name, [c[0] for c in lock_tools]) - ) from e - try: - subp.subp(cmd) - except Exception as e: - util.logexc(LOG, "Failed to disable password for user %s", name) - raise e - - def expire_passwd(self, user): - try: - subp.subp(["passwd", "--expire", user]) - except Exception as e: - util.logexc(LOG, "Failed to set 'expire' for %s", user) - raise e - - def set_passwd(self, user, passwd, hashed=False): - pass_string = "%s:%s" % (user, passwd) - cmd = ["chpasswd"] - - if hashed: - # Need to use the short option name '-e' instead of '--encrypted' - # (which would be more descriptive) since SLES 11 doesn't know - # about long names. - cmd.append("-e") - - try: - subp.subp( - cmd, data=pass_string, logstring="chpasswd for %s" % user - ) - except Exception as e: - util.logexc(LOG, "Failed to set password for %s", user) - raise e - - return True - - def chpasswd(self, plist_in: list, hashed: bool): - payload = ( - "\n".join( - (":".join([name, password]) for name, password in plist_in) - ) - + "\n" - ) - cmd = ["chpasswd"] + (["-e"] if hashed else []) - subp.subp(cmd, data=payload) - - def is_doas_rule_valid(self, user, rule): - rule_pattern = ( - r"^(?:permit|deny)" - r"(?:\s+(?:nolog|nopass|persist|keepenv|setenv \{[^}]+\})+)*" - r"\s+([a-zA-Z0-9_]+)+" - r"(?:\s+as\s+[a-zA-Z0-9_]+)*" - r"(?:\s+cmd\s+[^\s]+(?:\s+args\s+[^\s]+(?:\s*[^\s]+)*)*)*" - r"\s*$" - ) - - LOG.debug( - "Checking if user '%s' is referenced in doas rule %r", user, rule - ) - - valid_match = re.search(rule_pattern, rule) - if valid_match: - LOG.debug( - "User '%s' referenced in doas rule", valid_match.group(1) - ) - if valid_match.group(1) == user: - LOG.debug("Correct user is referenced in doas rule") - return True - else: - LOG.debug( - "Incorrect user '%s' is referenced in doas rule", - valid_match.group(1), - ) - return False - else: - LOG.debug("doas rule does not appear to reference any user") - return False - - def write_doas_rules(self, user, rules, doas_file=None): - if not doas_file: - doas_file = self.doas_fn - - for rule in rules: - if not self.is_doas_rule_valid(user, rule): - msg = ( - "Invalid doas rule %r for user '%s'," - " not writing any doas rules for user!" % (rule, user) - ) - LOG.error(msg) - return - - lines = ["", "# cloud-init User rules for %s" % user] - for rule in rules: - lines.append("%s" % rule) - content = "\n".join(lines) - content += "\n" # trailing newline - - if not os.path.exists(doas_file): - contents = [util.make_header(), content] - try: - util.write_file(doas_file, "\n".join(contents), mode=0o440) - except IOError as e: - util.logexc(LOG, "Failed to write doas file %s", doas_file) - raise e - else: - if content not in util.load_text_file(doas_file): - try: - util.append_file(doas_file, content) - except IOError as e: - util.logexc( - LOG, "Failed to append to doas file %s", doas_file - ) - raise e - - def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"): - # Ensure the dir is included and that - # it actually exists as a directory - sudoers_contents = "" - base_exists = False - if os.path.exists(sudo_base): - sudoers_contents = util.load_text_file(sudo_base) - base_exists = True - found_include = False - for line in sudoers_contents.splitlines(): - line = line.strip() - include_match = re.search(r"^[#|@]includedir\s+(.*)$", line) - if not include_match: - continue - included_dir = include_match.group(1).strip() - if not included_dir: - continue - included_dir = os.path.abspath(included_dir) - if included_dir == path: - found_include = True - break - if not found_include: - try: - if not base_exists: - lines = [ - "# See sudoers(5) for more information" - ' on "#include" directives:', - "", - util.make_header(base="added"), - "#includedir %s" % (path), - "", - ] - sudoers_contents = "\n".join(lines) - util.write_file(sudo_base, sudoers_contents, 0o440) - else: - lines = [ - "", - util.make_header(base="added"), - "#includedir %s" % (path), - "", - ] - sudoers_contents = "\n".join(lines) - util.append_file(sudo_base, sudoers_contents) - LOG.debug("Added '#includedir %s' to %s", path, sudo_base) - except IOError as e: - util.logexc(LOG, "Failed to write %s", sudo_base) - raise e - util.ensure_dir(path, 0o750) - - def write_sudo_rules(self, user, rules, sudo_file=None): - if not sudo_file: - sudo_file = self.ci_sudoers_fn - - lines = [ - "", - "# User rules for %s" % user, - ] - if isinstance(rules, (list, tuple)): - for rule in rules: - lines.append("%s %s" % (user, rule)) - elif isinstance(rules, str): - lines.append("%s %s" % (user, rules)) - else: - msg = "Can not create sudoers rule addition with type %r" - raise TypeError(msg % (type_utils.obj_name(rules))) - content = "\n".join(lines) - content += "\n" # trailing newline - - self.ensure_sudo_dir(os.path.dirname(sudo_file)) - - if not os.path.exists(sudo_file): - contents = [ - util.make_header(), - content, - ] - try: - util.write_file(sudo_file, "\n".join(contents), 0o440) - except IOError as e: - util.logexc(LOG, "Failed to write sudoers file %s", sudo_file) - raise e - else: - if content not in util.load_text_file(sudo_file): - try: - util.append_file(sudo_file, content) - except IOError as e: - util.logexc( - LOG, "Failed to append to sudoers file %s", sudo_file - ) - raise e - - def create_group(self, name, members=None): - group_add_cmd = ["groupadd", name] - if util.system_is_snappy(): - group_add_cmd.append("--extrausers") - if not members: - members = [] - - # Check if group exists, and then add it doesn't - if util.is_group(name): - LOG.warning("Skipping creation of existing group '%s'", name) - else: - try: - subp.subp(group_add_cmd) - LOG.info("Created new group %s", name) - except Exception: - util.logexc(LOG, "Failed to create group %s", name) - - # Add members to the group, if so defined - if len(members) > 0: - for member in members: - if not util.is_user(member): - LOG.warning( - "Unable to add group member '%s' to group '%s'" - "; user does not exist.", - member, - name, - ) - continue - - subp.subp(["usermod", "-a", "-G", name, member]) - LOG.info("Added user '%s' to group '%s'", member, name) - - def shutdown_command(self, *, mode, delay, message): - # called from cc_power_state_change.load_power_state - command = ["shutdown", self.shutdown_options_map[mode]] - try: - if delay != "now": - delay = "+%d" % int(delay) - except ValueError as e: - raise TypeError( - "power_state[delay] must be 'now' or '+m' (minutes)." - " found '%s'." % (delay,) - ) from e - args = command + [delay] - if message: - args.append(message) - return args - - @classmethod - def reload_init(cls, rcs=None): - """ - Reload systemd startup daemon. - May raise ProcessExecutionError - """ - init_cmd = cls.init_cmd - if cls.uses_systemd() or "systemctl" in init_cmd: - cmd = [init_cmd, "daemon-reload"] - return subp.subp(cmd, capture=True, rcs=rcs) - - @classmethod - def manage_service( - cls, action: str, service: str, *extra_args: str, rcs=None - ): - """ - Perform the requested action on a service. This handles the common - 'systemctl' and 'service' cases and may be overridden in subclasses - as necessary. - May raise ProcessExecutionError - """ - init_cmd = cls.init_cmd - if cls.uses_systemd() or "systemctl" in init_cmd: - init_cmd = ["systemctl"] - cmds = { - "stop": ["stop", service], - "start": ["start", service], - "enable": ["enable", service], - "disable": ["disable", service], - "restart": ["restart", service], - "reload": ["reload-or-restart", service], - "try-reload": ["reload-or-try-restart", service], - "status": ["status", service], - } - else: - cmds = { - "stop": [service, "stop"], - "start": [service, "start"], - "enable": [service, "start"], - "disable": [service, "stop"], - "restart": [service, "restart"], - "reload": [service, "restart"], - "try-reload": [service, "restart"], - "status": [service, "status"], - } - cmd = list(init_cmd) + list(cmds[action]) - return subp.subp(cmd, capture=True, rcs=rcs) - - def set_keymap(self, layout: str, model: str, variant: str, options: str): - if self.uses_systemd(): - subp.subp( - [ - "localectl", - "set-x11-keymap", - layout, - model, - variant, - options, - ] - ) - else: - raise NotImplementedError() - - def get_tmp_exec_path(self) -> str: - tmp_dir = temp_utils.get_tmp_ancestor(needs_exe=True) - if not util.has_mount_opt(tmp_dir, "noexec"): - return tmp_dir - return os.path.join(self.usr_lib_exec, "cloud-init", "clouddir") - - def do_as(self, command: list, user: str, cwd: str = "", **kwargs): - """ - Perform a command as the requested user. Behaves like subp() - - Note: We pass `PATH` to the user env by using `env`. This could be - probably simplified after bionic EOL by using - `su --whitelist-environment=PATH ...`, more info on: - https://lore.kernel.org/all/20180815110445.4qefy5zx5gfgbqly@ws.net.home/T/ - """ - directory = f"cd {cwd} && " if cwd else "" - return subp.subp( - [ - "su", - "-", - user, - "-c", - directory + "env PATH=$PATH " + " ".join(command), - ], - **kwargs, - ) - - @staticmethod - def build_dhclient_cmd( - path: str, - lease_file: str, - pid_file: str, - interface: str, - config_file: str, - ) -> list: - return [ - path, - "-1", - "-v", - "-lf", - lease_file, - "-pf", - pid_file, - "-sf", - "/bin/true", - ] + (["-cf", config_file, interface] if config_file else [interface]) - - @property - def fallback_interface(self): - """Determine the network interface used during local network config.""" - if self._fallback_interface is None: - self._fallback_interface = net.find_fallback_nic() - if not self._fallback_interface: - LOG.warning( - "Did not find a fallback interface on distro: %s.", - self.name, - ) - return self._fallback_interface - - @fallback_interface.setter - def fallback_interface(self, value): - self._fallback_interface = value - - @staticmethod - def get_proc_ppid(pid: int) -> Optional[int]: - """Return the parent pid of a process by parsing /proc/$pid/stat""" - match = Distro._get_proc_stat_by_index(pid, 4) - if match is not None: - with suppress(ValueError): - return int(match) - LOG.warning("/proc/%s/stat has an invalid ppid [%s]", pid, match) - return None - - @staticmethod - def get_proc_pgid(pid: int) -> Optional[int]: - """Return the parent pid of a process by parsing /proc/$pid/stat""" - match = Distro._get_proc_stat_by_index(pid, 5) - if match is not None: - with suppress(ValueError): - return int(match) - LOG.warning("/proc/%s/stat has an invalid pgid [%s]", pid, match) - return None - - @staticmethod - def _get_proc_stat_by_index(pid: int, field: int) -> Optional[int]: - """ - parse /proc/$pid/stat for a specific field as numbered in man:proc(5) - - param pid: integer to query /proc/$pid/stat for - param field: field number within /proc/$pid/stat to return - """ - try: - content: str = util.load_text_file( - "/proc/%s/stat" % pid, quiet=True - ).strip() # pyright: ignore - match = re.search( - r"^(\d+) (\(.+\)) ([RSDZTtWXxKPI]) (\d+) (\d+)", content - ) - if not match: - LOG.warning( - "/proc/%s/stat has an invalid contents [%s]", pid, content - ) - return None - return int(match.group(field)) - except IOError as e: - LOG.warning("Failed to load /proc/%s/stat. %s", pid, e) - except IndexError: - LOG.warning( - "Unable to match field %s of process pid=%s (%s) (%s)", - field, - pid, - content, # pyright: ignore - match, # pyright: ignore - ) - return None - - @staticmethod - def eject_media(device: str) -> None: - cmd = None - if subp.which("eject"): - cmd = ["eject", device] - elif subp.which("/lib/udev/cdrom_id"): - cmd = ["/lib/udev/cdrom_id", "--eject-media", device] - else: - raise subp.ProcessExecutionError( - cmd="eject_media_cmd", - description="eject command not found", - reason="neither eject nor /lib/udev/cdrom_id are found", - ) - subp.subp(cmd) - - -def _apply_hostname_transformations_to_url(url: str, transformations: list): - """ - Apply transformations to a URL's hostname, return transformed URL. - - This is a separate function because unwrapping and rewrapping only the - hostname portion of a URL is complex. - - :param url: - The URL to operate on. - :param transformations: - A list of ``(str) -> Optional[str]`` functions, which will be applied - in order to the hostname portion of the URL. If any function - (regardless of ordering) returns None, ``url`` will be returned without - any modification. - - :return: - A string whose value is ``url`` with the hostname ``transformations`` - applied, or ``None`` if ``url`` is unparsable. - """ - try: - parts = urllib.parse.urlsplit(url) - except ValueError: - # If we can't even parse the URL, we shouldn't use it for anything - return None - new_hostname = parts.hostname - if new_hostname is None: - # The URL given doesn't have a hostname component, so (a) we can't - # transform it, and (b) it won't work as a mirror; return None. - return None - - for transformation in transformations: - new_hostname = transformation(new_hostname) - if new_hostname is None: - # If a transformation returns None, that indicates we should abort - # processing and return `url` unmodified - return url - - new_netloc = new_hostname - if parts.port is not None: - new_netloc = "{}:{}".format(new_netloc, parts.port) - return urllib.parse.urlunsplit(parts._replace(netloc=new_netloc)) - - -def _sanitize_mirror_url(url: str): - """ - Given a mirror URL, replace or remove any invalid URI characters. - - This performs the following actions on the URL's hostname: - * Checks if it is an IP address, returning the URL immediately if it is - * Converts it to its IDN form (see below for details) - * Replaces any non-Letters/Digits/Hyphen (LDH) characters in it with - hyphens - * Removes any leading/trailing hyphens from each domain name label - - Before we replace any invalid domain name characters, we first need to - ensure that any valid non-ASCII characters in the hostname will not be - replaced, by ensuring the hostname is in its Internationalized domain name - (IDN) representation (see RFC 5890). This conversion has to be applied to - the whole hostname (rather than just the substitution variables), because - the Punycode algorithm used by IDNA transcodes each part of the hostname as - a whole string (rather than encoding individual characters). It cannot be - applied to the whole URL, because (a) the Punycode algorithm expects to - operate on domain names so doesn't output a valid URL, and (b) non-ASCII - characters in non-hostname parts of the URL aren't encoded via Punycode. - - To put this in RFC 5890's terminology: before we remove or replace any - characters from our domain name (which we do to ensure that each label is a - valid LDH Label), we first ensure each label is in its A-label form. - - (Note that Python's builtin idna encoding is actually IDNA2003, not - IDNA2008. This changes the specifics of how some characters are encoded to - ASCII, but doesn't affect the logic here.) - - :param url: - The URL to operate on. - - :return: - A sanitized version of the URL, which will have been IDNA encoded if - necessary, or ``None`` if the generated string is not a parseable URL. - """ - # Acceptable characters are LDH characters, plus "." to separate each label - acceptable_chars = LDH_ASCII_CHARS + "." - transformations = [ - # This is an IP address, not a hostname, so no need to apply the - # transformations - lambda hostname: None if net.is_ip_address(hostname) else hostname, - # Encode with IDNA to get the correct characters (as `bytes`), then - # decode with ASCII so we return a `str` - lambda hostname: hostname.encode("idna").decode("ascii"), - # Replace any unacceptable characters with "-" - lambda hostname: "".join( - c if c in acceptable_chars else "-" for c in hostname - ), - # Drop leading/trailing hyphens from each part of the hostname - lambda hostname: ".".join( - part.strip("-") for part in hostname.split(".") - ), - ] - - return _apply_hostname_transformations_to_url(url, transformations) - - -def _get_package_mirror_info( - mirror_info, data_source=None, mirror_filter=util.search_for_mirror -): - # given a arch specific 'mirror_info' entry (from package_mirrors) - # search through the 'search' entries, and fallback appropriately - # return a dict with only {name: mirror} entries. - if not mirror_info: - mirror_info = {} - - subst = {} - if data_source and data_source.availability_zone: - subst["availability_zone"] = data_source.availability_zone - - # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) - # the region is us-east-1. so region = az[0:-1] - if _EC2_AZ_RE.match(data_source.availability_zone): - ec2_region = data_source.availability_zone[0:-1] - - if ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES: - subst["ec2_region"] = "%s" % ec2_region - elif data_source.platform_type == "ec2": - subst["ec2_region"] = "%s" % ec2_region - - if data_source and data_source.region: - subst["region"] = data_source.region - - results = {} - for name, mirror in mirror_info.get("failsafe", {}).items(): - results[name] = mirror - - for name, searchlist in mirror_info.get("search", {}).items(): - mirrors = [] - for tmpl in searchlist: - try: - mirror = tmpl % subst - except KeyError: - continue - - mirror = _sanitize_mirror_url(mirror) - if mirror is not None: - mirrors.append(mirror) - - found = mirror_filter(mirrors) - if found: - results[name] = found - - LOG.debug("filtered distro mirror info: %s", results) - - return results - - -def _get_arch_package_mirror_info(package_mirrors, arch): - # pull out the specific arch from a 'package_mirrors' config option - default = None - for item in package_mirrors: - arches = item.get("arches") - if arch in arches: - return item - if "default" in arches: - default = item - return default - - -def fetch(name: str) -> Type[Distro]: - locs, looked_locs = importer.find_module(name, ["", __name__], ["Distro"]) - if not locs: - raise ImportError( - "No distribution found for distro %s (searched %s)" - % (name, looked_locs) - ) - mod = importer.import_module(locs[0]) - cls = getattr(mod, "Distro") - return cls - - -def set_etc_timezone( - tz, tz_file=None, tz_conf="/etc/timezone", tz_local="/etc/localtime" -): - util.write_file(tz_conf, str(tz).rstrip() + "\n") - # This ensures that the correct tz will be used for the system - if tz_local and tz_file: - # use a symlink if there exists a symlink or tz_local is not present - islink = os.path.islink(tz_local) - if islink or not os.path.exists(tz_local): - if islink: - util.del_file(tz_local) - os.symlink(tz_file, tz_local) - else: - util.copy(tz_file, tz_local) - return - - -def uses_systemd(): - try: - res = os.lstat("/run/systemd/system") - return stat.S_ISDIR(res.st_mode) - except Exception: - return False diff --git a/.pc/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without/tests/unittests/distros/test_init.py b/.pc/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without/tests/unittests/distros/test_init.py deleted file mode 100644 index 986ccafcb..000000000 --- a/.pc/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without/tests/unittests/distros/test_init.py +++ /dev/null @@ -1,469 +0,0 @@ -# Copyright (C) 2020 Canonical Ltd. -# -# Author: Daniel Watkins -# -# This file is part of cloud-init. See LICENSE file for license information. -"""Tests for cloudinit/distros/__init__.py""" - -from unittest import mock - -import pytest - -from cloudinit.distros import ( - LDH_ASCII_CHARS, - PackageInstallerError, - _get_package_mirror_info, -) -from tests.unittests.distros import _get_distro - -# In newer versions of Python, these characters will be omitted instead -# of substituted because of security concerns. -# See https://bugs.python.org/issue43882 -SECURITY_URL_CHARS = "\n\r\t" - -# Define a set of characters we would expect to be replaced -INVALID_URL_CHARS = [ - chr(x) - for x in range(127) - if chr(x) not in LDH_ASCII_CHARS + SECURITY_URL_CHARS -] -for separator in [":", ".", "/", "#", "?", "@", "[", "]"]: - # Remove from the set characters that either separate hostname parts (":", - # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be - # unable to parse URLs ("[", "]"). - INVALID_URL_CHARS.remove(separator) - - -class TestGetPackageMirrorInfo: - """ - Tests for cloudinit.distros._get_package_mirror_info. - - These supplement the tests in tests/unittests/test_distros/test_generic.py - which are more focused on testing a single production-like configuration. - These tests are more focused on specific aspects of the unit under test. - """ - - @pytest.mark.parametrize( - "mirror_info,expected", - [ - # Empty info gives empty return - ({}, {}), - # failsafe values used if present - ( - { - "failsafe": { - "primary": "http://value", - "security": "http://other", - } - }, - {"primary": "http://value", "security": "http://other"}, - ), - # search values used if present - ( - { - "search": { - "primary": ["http://value"], - "security": ["http://other"], - } - }, - {"primary": ["http://value"], "security": ["http://other"]}, - ), - # failsafe values used if search value not present - ( - { - "search": {"primary": ["http://value"]}, - "failsafe": {"security": "http://other"}, - }, - {"primary": ["http://value"], "security": "http://other"}, - ), - ], - ) - def test_get_package_mirror_info_failsafe(self, mirror_info, expected): - """ - Test the interaction between search and failsafe inputs - - (This doesn't test the case where the mirror_filter removes all search - options; test_failsafe_used_if_all_search_results_filtered_out covers - that.) - """ - assert expected == _get_package_mirror_info( - mirror_info, mirror_filter=lambda x: x - ) - - def test_failsafe_used_if_all_search_results_filtered_out(self): - """Test the failsafe option used if all search options eliminated.""" - mirror_info = { - "search": {"primary": ["http://value"]}, - "failsafe": {"primary": "http://other"}, - } - assert {"primary": "http://other"} == _get_package_mirror_info( - mirror_info, mirror_filter=lambda x: False - ) - - @pytest.mark.parametrize( - "allow_ec2_mirror, platform_type", [(True, "ec2")] - ) - @pytest.mark.parametrize( - "availability_zone,region,patterns,expected", - ( - # Test ec2_region alone - ( - "fk-fake-1f", - None, - ["http://EC2-%(ec2_region)s/ubuntu"], - ["http://ec2-fk-fake-1/ubuntu"], - ), - # Test availability_zone alone - ( - "fk-fake-1f", - None, - ["http://AZ-%(availability_zone)s/ubuntu"], - ["http://az-fk-fake-1f/ubuntu"], - ), - # Test region alone - ( - None, - "fk-fake-1", - ["http://RG-%(region)s/ubuntu"], - ["http://rg-fk-fake-1/ubuntu"], - ), - # Test that ec2_region is not available for non-matching AZs - ( - "fake-fake-1f", - None, - [ - "http://EC2-%(ec2_region)s/ubuntu", - "http://AZ-%(availability_zone)s/ubuntu", - ], - ["http://az-fake-fake-1f/ubuntu"], - ), - # Test that template order maintained - ( - None, - "fake-region", - [ - "http://RG-%(region)s-2/ubuntu", - "http://RG-%(region)s-1/ubuntu", - ], - [ - "http://rg-fake-region-2/ubuntu", - "http://rg-fake-region-1/ubuntu", - ], - ), - # Test that non-ASCII hostnames are IDNA encoded; - # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" - ( - None, - "ТεЅТ̣", - ["http://www.IDNA-%(region)s.com/ubuntu"], - ["http://www.xn--idna--4kd53hh6aba3q.com/ubuntu"], - ), - # Test that non-ASCII hostnames with a port are IDNA encoded; - # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" - ( - None, - "ТεЅТ̣", - ["http://www.IDNA-%(region)s.com:8080/ubuntu"], - ["http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu"], - ), - # Test that non-ASCII non-hostname parts of URLs are unchanged - ( - None, - "ТεЅТ̣", - ["http://www.example.com/%(region)s/ubuntu"], - ["http://www.example.com/ТεЅТ̣/ubuntu"], - ), - # Test that IPv4 addresses are unchanged - ( - None, - "fk-fake-1", - ["http://192.168.1.1:8080/%(region)s/ubuntu"], - ["http://192.168.1.1:8080/fk-fake-1/ubuntu"], - ), - # Test that IPv6 addresses are unchanged - ( - None, - "fk-fake-1", - ["http://[2001:67c:1360:8001::23]/%(region)s/ubuntu"], - ["http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu"], - ), - # Test that unparseable URLs are filtered out of the mirror list - ( - None, - "inv[lid", - [ - "http://%(region)s.in.hostname/should/be/filtered", - "http://but.not.in.the.path/%(region)s", - ], - ["http://but.not.in.the.path/inv[lid"], - ), - ( - None, - "-some-region-", - ["http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu"], - ["http://lead-ing.some-region.trail-ing.example.com/ubuntu"], - ), - ) - + tuple( - # Dynamically generate a test case for each non-LDH - # (Letters/Digits/Hyphen) ASCII character, testing that it is - # substituted with a hyphen - ( - None, - "fk{0}fake{0}1".format(invalid_char), - ["http://%(region)s/ubuntu"], - ["http://fk-fake-1/ubuntu"], - ) - for invalid_char in INVALID_URL_CHARS - ), - ) - def test_valid_substitution( - self, - allow_ec2_mirror, - platform_type, - availability_zone, - region, - patterns, - expected, - ): - """Test substitution works as expected.""" - flag_path = ( - "cloudinit.distros.ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" - ) - - m_data_source = mock.Mock( - availability_zone=availability_zone, - region=region, - platform_type=platform_type, - ) - mirror_info = {"search": {"primary": patterns}} - - with mock.patch(flag_path, allow_ec2_mirror): - ret = _get_package_mirror_info( - mirror_info, - data_source=m_data_source, - mirror_filter=lambda x: x, - ) - print(allow_ec2_mirror) - print(platform_type) - print(availability_zone) - print(region) - print(patterns) - print(expected) - assert {"primary": expected} == ret - - -class TestInstall: - """Tests for cloudinit.distros.Distro.install_packages.""" - - @pytest.fixture(autouse=True) - def ensure_available(self, mocker): - mocker.patch( - "cloudinit.distros.package_management.apt.Apt.available", - return_value=True, - ) - mocker.patch( - "cloudinit.distros.package_management.snap.Snap.available", - return_value=True, - ) - - @pytest.fixture - def m_apt_install(self, mocker): - return mocker.patch( - "cloudinit.distros.package_management.apt.Apt.install_packages", - return_value=[], - ) - - @pytest.fixture - def m_snap_install(self, mocker): - return mocker.patch( - "cloudinit.distros.package_management.snap.Snap.install_packages", - return_value=[], - ) - - @pytest.fixture - def m_subp(self, mocker): - return mocker.patch( - "cloudinit.distros.bsd.subp.subp", return_value=("", "") - ) - - def test_invalid_yaml(self, m_apt_install): - """Test that an invalid YAML raises an exception.""" - with pytest.raises(ValueError): - _get_distro("debian").install_packages([["invalid"]]) - m_apt_install.assert_not_called() - - def test_unknown_package_manager(self, m_apt_install, caplog): - """Test that an unknown package manager raises an exception.""" - _get_distro("debian").install_packages( - [{"apt": ["pkg1"]}, "pkg2", {"invalid": ["pkg3"]}] - ) - assert ( - "Cannot install packages under 'invalid' as it is not a supported " - "package manager!" in caplog.text - ) - install_args = m_apt_install.call_args_list[0][0][0] - assert "pkg1" in install_args - assert "pkg2" in install_args - assert "pkg3" not in install_args - - def test_non_default_package_manager(self, m_apt_install, m_snap_install): - """Test success from package manager not supported by distro.""" - _get_distro("debian").install_packages( - [{"apt": ["pkg1"]}, "pkg2", {"snap": ["pkg3"]}] - ) - apt_install_args = m_apt_install.call_args_list[0][0][0] - assert "pkg1" in apt_install_args - assert "pkg2" in apt_install_args - assert "pkg3" not in apt_install_args - - assert "pkg3" in m_snap_install.call_args_list[0][1]["pkglist"] - - def test_non_default_package_manager_fail( - self, m_apt_install, mocker, caplog - ): - """Test fail from package manager not supported by distro.""" - m_snap_install = mocker.patch( - "cloudinit.distros.package_management.snap.Snap.install_packages", - return_value=["pkg3"], - ) - with pytest.raises( - PackageInstallerError, - match="Failed to install the following packages: {'pkg3'}", - ): - _get_distro("debian").install_packages( - [{"apt": ["pkg1"]}, "pkg2", {"snap": ["pkg3"]}] - ) - - assert "pkg3" in m_snap_install.call_args_list[0][1]["pkglist"] - - def test_default_and_specific_package_manager( - self, m_apt_install, m_snap_install - ): - """Test success from package manager not supported by distro.""" - _get_distro("ubuntu").install_packages( - ["pkg1", ["pkg3", "ver3"], {"apt": [["pkg2", "ver2"]]}] - ) - apt_install_args = m_apt_install.call_args_list[0][0][0] - assert "pkg1" in apt_install_args - assert ("pkg2", "ver2") in apt_install_args - assert "pkg3" not in apt_install_args - - m_snap_install.assert_not_called() - - def test_specific_package_manager_fail_doesnt_retry( - self, mocker, m_snap_install - ): - """Test fail from package manager doesn't retry as generic.""" - m_apt_install = mocker.patch( - "cloudinit.distros.package_management.apt.Apt.install_packages", - return_value=["pkg1"], - ) - with pytest.raises(PackageInstallerError): - _get_distro("ubuntu").install_packages([{"apt": ["pkg1"]}]) - apt_install_args = m_apt_install.call_args_list[0][0][0] - assert "pkg1" in apt_install_args - m_snap_install.assert_not_called() - - def test_no_attempt_if_no_package_manager( - self, mocker, m_apt_install, m_snap_install, caplog - ): - """Test that no attempt is made if there are no package manager.""" - mocker.patch( - "cloudinit.distros.package_management.apt.Apt.available", - return_value=False, - ) - mocker.patch( - "cloudinit.distros.package_management.snap.Snap.available", - return_value=False, - ) - with pytest.raises(PackageInstallerError): - _get_distro("ubuntu").install_packages( - ["pkg1", "pkg2", {"other": "pkg3"}] - ) - m_apt_install.assert_not_called() - m_snap_install.assert_not_called() - - assert "Package manager 'apt' not available" in caplog.text - assert "Package manager 'snap' not available" in caplog.text - - @pytest.mark.parametrize( - "distro,pkg_list,apt_available,apt_failed,snap_failed,total_failed", - [ - pytest.param( - "debian", - ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], - False, - [], - ["pkg1", "pkg3"], - ["pkg1", "pkg2", "pkg3"], - id="debian_no_apt", - ), - pytest.param( - "debian", - ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], - True, - ["pkg2"], - ["pkg3"], - ["pkg2", "pkg3"], - id="debian_with_apt", - ), - pytest.param( - "ubuntu", - ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], - False, - [], - [], - ["pkg2"], - id="ubuntu_no_apt", - ), - pytest.param( - "ubuntu", - ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], - True, - ["pkg1"], - ["pkg3"], - ["pkg3"], - id="ubuntu_with_apt", - ), - ], - ) - def test_failed( - self, - distro, - pkg_list, - apt_available, - apt_failed, - snap_failed, - total_failed, - mocker, - m_apt_install, - m_snap_install, - ): - """Test that failed packages are properly tracked. - - We need to ensure that the failed packages are properly tracked: - 1. When package install fails normally - 2. When package manager is not available - 3. When package manager is not explicitly supported by the distro - - So test various combinations of these scenarios. - """ - mocker.patch( - "cloudinit.distros.package_management.apt.Apt.available", - return_value=apt_available, - ) - mocker.patch( - "cloudinit.distros.package_management.apt.Apt.install_packages", - return_value=apt_failed, - ) - mocker.patch( - "cloudinit.distros.package_management.snap.Snap.install_packages", - return_value=snap_failed, - ) - with pytest.raises(PackageInstallerError) as exc: - _get_distro(distro).install_packages(pkg_list) - message = exc.value.args[0] - assert "Failed to install the following packages" in message - for pkg in total_failed: - assert pkg in message diff --git a/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/cloudinit/distros/__init__.py b/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/cloudinit/distros/__init__.py deleted file mode 100644 index e3239daae..000000000 --- a/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/cloudinit/distros/__init__.py +++ /dev/null @@ -1,1546 +0,0 @@ -# Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Juerg Haefliger -# Author: Joshua Harlow -# Author: Ben Howard -# -# This file is part of cloud-init. See LICENSE file for license information. - -import abc -import logging -import os -import re -import stat -import string -import urllib.parse -from collections import defaultdict -from contextlib import suppress -from io import StringIO -from typing import ( - Any, - Dict, - List, - Mapping, - MutableMapping, - Optional, - Set, - Tuple, - Type, - Union, -) - -import cloudinit.net.netops.iproute2 as iproute2 -from cloudinit import ( - helpers, - importer, - net, - persistence, - ssh_util, - subp, - temp_utils, - type_utils, - util, -) -from cloudinit.distros.networking import LinuxNetworking, Networking -from cloudinit.distros.package_management.package_manager import PackageManager -from cloudinit.distros.package_management.utils import known_package_managers -from cloudinit.distros.parsers import hosts -from cloudinit.features import ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES -from cloudinit.net import activators, dhcp, renderers -from cloudinit.net.network_state import parse_net_config_data -from cloudinit.net.renderer import Renderer - -# Used when a cloud-config module can be run on all cloud-init distributions. -# The value 'all' is surfaced in module documentation for distro support. -ALL_DISTROS = "all" - -OSFAMILIES = { - "alpine": ["alpine"], - "arch": ["arch"], - "debian": ["debian", "ubuntu"], - "freebsd": ["freebsd", "dragonfly"], - "gentoo": ["gentoo", "cos"], - "netbsd": ["netbsd"], - "openbsd": ["openbsd"], - "redhat": [ - "almalinux", - "amazon", - "centos", - "cloudlinux", - "eurolinux", - "fedora", - "mariner", - "miraclelinux", - "openmandriva", - "photon", - "rhel", - "rocky", - "virtuozzo", - ], - "suse": [ - "opensuse", - "opensuse-leap", - "opensuse-microos", - "opensuse-tumbleweed", - "sle_hpc", - "sle-micro", - "sles", - "suse", - ], - "openeuler": ["openeuler"], - "OpenCloudOS": ["OpenCloudOS", "TencentOS"], -} - -LOG = logging.getLogger(__name__) - -# This is a best guess regex, based on current EC2 AZs on 2017-12-11. -# It could break when Amazon adds new regions and new AZs. -_EC2_AZ_RE = re.compile("^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$") - -# Default NTP Client Configurations -PREFERRED_NTP_CLIENTS = ["chrony", "systemd-timesyncd", "ntp", "ntpdate"] - -# Letters/Digits/Hyphen characters, for use in domain name validation -LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-" - -# Before you try to go rewriting this better using Unions, read -# https://github.com/microsoft/pyright/blob/main/docs/type-concepts.md#generic-types # noqa: E501 -# The Immutable types mentioned there won't work for us because -# we need to distinguish between a str and a Sequence[str] -# This also isn't exhaustive. If you have a unique case that adheres to -# the `packages` schema, you can add it here. -PackageList = Union[ - List[str], - List[Mapping], - List[List[str]], - List[Union[str, List[str]]], - List[Union[str, List[str], Mapping]], -] - - -class PackageInstallerError(Exception): - pass - - -class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): - pip_package_name = "python3-pip" - usr_lib_exec = "/usr/lib" - hosts_fn = "/etc/hosts" - doas_fn = "/etc/doas.conf" - ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users" - hostname_conf_fn = "/etc/hostname" - tz_zone_dir = "/usr/share/zoneinfo" - default_owner = "root:root" - init_cmd = ["service"] # systemctl, service etc - renderer_configs: Mapping[str, MutableMapping[str, Any]] = {} - _preferred_ntp_clients = None - networking_cls: Type[Networking] = LinuxNetworking - # This is used by self.shutdown_command(), and can be overridden in - # subclasses - shutdown_options_map = {"halt": "-H", "poweroff": "-P", "reboot": "-r"} - net_ops = iproute2.Iproute2 - - _ci_pkl_version = 1 - prefer_fqdn = False - resolve_conf_fn = "/etc/resolv.conf" - - osfamily: str - # Directory where the distro stores their DHCP leases. - # The children classes should override this with their dhcp leases - # directory - dhclient_lease_directory: Optional[str] = None - # A regex to match DHCP lease file(s) - # The children classes should override this with a regex matching - # their lease file name format - dhclient_lease_file_regex: Optional[str] = None - - def __init__(self, name, cfg, paths): - self._paths = paths - self._cfg = cfg - self.name = name - self.networking: Networking = self.networking_cls() - self.dhcp_client_priority = dhcp.ALL_DHCP_CLIENTS - self.net_ops = iproute2.Iproute2 - self._runner = helpers.Runners(paths) - self.package_managers: List[PackageManager] = [] - self._dhcp_client = None - self._fallback_interface = None - - def _unpickle(self, ci_pkl_version: int) -> None: - """Perform deserialization fixes for Distro.""" - if "networking" not in self.__dict__ or not self.networking.__dict__: - # This is either a Distro pickle with no networking attribute OR - # this is a Distro pickle with a networking attribute but from - # before ``Networking`` had any state (meaning that - # Networking.__setstate__ will not be called). In either case, we - # want to ensure that `self.networking` is freshly-instantiated: - # either because it isn't present at all, or because it will be - # missing expected instance state otherwise. - self.networking = self.networking_cls() - if not hasattr(self, "_dhcp_client"): - self._dhcp_client = None - if not hasattr(self, "_fallback_interface"): - self._fallback_interface = None - - def _validate_entry(self, entry): - if isinstance(entry, str): - return entry - elif isinstance(entry, (list, tuple)): - if len(entry) == 2: - return tuple(entry) - raise ValueError( - "Invalid 'packages' yaml specification. " - "Check schema definition." - ) - - def _extract_package_by_manager( - self, pkglist: PackageList - ) -> Tuple[Dict[Type[PackageManager], Set], Set]: - """Transform the generic package list to package by package manager. - - Additionally, include list of generic packages - """ - packages_by_manager = defaultdict(set) - generic_packages: Set = set() - for entry in pkglist: - if isinstance(entry, dict): - for package_manager, package_list in entry.items(): - for definition in package_list: - definition = self._validate_entry(definition) - try: - packages_by_manager[ - known_package_managers[package_manager] - ].add(definition) - except KeyError: - LOG.error( - "Cannot install packages under '%s' as it is " - "not a supported package manager!", - package_manager, - ) - else: - generic_packages.add(self._validate_entry(entry)) - return dict(packages_by_manager), generic_packages - - def install_packages(self, pkglist: PackageList): - error_message = ( - "Failed to install the following packages: %s. " - "See associated package manager logs for more details." - ) - # If an entry hasn't been included with an explicit package name, - # add it to a 'generic' list of packages - ( - packages_by_manager, - generic_packages, - ) = self._extract_package_by_manager(pkglist) - - # First install packages using package manager(s) - # supported by the distro - total_failed: Set[str] = set() - for manager in self.package_managers: - - manager_packages = packages_by_manager.get( - manager.__class__, set() - ) - - to_try = manager_packages | generic_packages - # Remove any failed we will try for this package manager - total_failed.difference_update(to_try) - if not manager.available(): - LOG.debug("Package manager '%s' not available", manager.name) - total_failed.update(to_try) - continue - if not to_try: - continue - failed = manager.install_packages(to_try) - total_failed.update(failed) - if failed: - LOG.info(error_message, failed) - # Ensure we don't attempt to install packages specific to - # one particular package manager using another package manager - generic_packages = set(failed) - manager_packages - - # Now attempt any specified package managers not explicitly supported - # by distro - for manager_type, packages in packages_by_manager.items(): - if manager_type.name in [p.name for p in self.package_managers]: - # We already installed/attempted these; don't try again - continue - total_failed.update( - manager_type.from_config( - self._runner, self._cfg - ).install_packages(pkglist=packages) - ) - - if total_failed: - raise PackageInstallerError(error_message % total_failed) - - @property - def dhcp_client(self) -> dhcp.DhcpClient: - """access the distro's preferred dhcp client - - if no client has been selected yet select one - uses - self.dhcp_client_priority, which may be overridden in each distro's - object to eliminate checking for clients which will not be provided - by the distro - """ - if self._dhcp_client: - return self._dhcp_client - - # no client has been selected yet, so pick one - # - # set the default priority list to the distro-defined priority list - dhcp_client_priority = self.dhcp_client_priority - - # if the configuration includes a network.dhcp_client_priority list - # then attempt to use it - config_priority = util.get_cfg_by_path( - self._cfg, ("network", "dhcp_client_priority"), [] - ) - - if config_priority: - # user or image builder configured a custom dhcp client priority - # list - found_clients = [] - LOG.debug( - "Using configured dhcp client priority list: %s", - config_priority, - ) - for client_configured in config_priority: - for client_class in dhcp.ALL_DHCP_CLIENTS: - if client_configured == client_class.client_name: - found_clients.append(client_class) - break - else: - LOG.warning( - "Configured dhcp client %s is not supported, skipping", - client_configured, - ) - # If dhcp_client_priority is defined in the configuration, but none - # of the defined clients are supported by cloud-init, then we don't - # override the distro default. If at least one client in the - # configured list exists, then we use that for our list of clients - # to check. - if found_clients: - dhcp_client_priority = found_clients - - # iterate through our priority list and use the first client that is - # installed on the system - for client in dhcp_client_priority: - try: - self._dhcp_client = client() - LOG.debug("DHCP client selected: %s", client.client_name) - return self._dhcp_client - except (dhcp.NoDHCPLeaseMissingDhclientError,): - LOG.debug("DHCP client not found: %s", client.client_name) - raise dhcp.NoDHCPLeaseMissingDhclientError() - - @property - def network_activator(self) -> Optional[Type[activators.NetworkActivator]]: - """Return the configured network activator for this environment.""" - priority = util.get_cfg_by_path( - self._cfg, ("network", "activators"), None - ) - try: - return activators.select_activator(priority=priority) - except activators.NoActivatorException: - return None - - def _get_renderer(self) -> Renderer: - priority = util.get_cfg_by_path( - self._cfg, ("network", "renderers"), None - ) - - name, render_cls = renderers.select(priority=priority) - LOG.debug( - "Selected renderer '%s' from priority list: %s", name, priority - ) - renderer = render_cls(config=self.renderer_configs.get(name)) - return renderer - - def _write_network_state(self, network_state, renderer: Renderer): - renderer.render_network_state(network_state) - - def _find_tz_file(self, tz): - tz_file = os.path.join(self.tz_zone_dir, str(tz)) - if not os.path.isfile(tz_file): - raise IOError( - "Invalid timezone %s, no file found at %s" % (tz, tz_file) - ) - return tz_file - - def get_option(self, opt_name, default=None): - return self._cfg.get(opt_name, default) - - def set_option(self, opt_name, value=None): - self._cfg[opt_name] = value - - def set_hostname(self, hostname, fqdn=None): - writeable_hostname = self._select_hostname(hostname, fqdn) - self._write_hostname(writeable_hostname, self.hostname_conf_fn) - self._apply_hostname(writeable_hostname) - - @staticmethod - def uses_systemd(): - """Wrapper to report whether this distro uses systemd or sysvinit.""" - return uses_systemd() - - @abc.abstractmethod - def package_command(self, command, args=None, pkgs=None): - # Long-term, this method should be removed and callers refactored. - # Very few commands are going to be consistent across all package - # managers. - raise NotImplementedError() - - def update_package_sources(self): - for manager in self.package_managers: - if not manager.available(): - LOG.debug( - "Skipping update for package manager '%s': not available.", - manager.name, - ) - continue - try: - manager.update_package_sources() - except Exception as e: - LOG.error( - "Failed to update package using %s: %s", manager.name, e - ) - - def get_primary_arch(self): - arch = os.uname()[4] - if arch in ("i386", "i486", "i586", "i686"): - return "i386" - return arch - - def _get_arch_package_mirror_info(self, arch=None): - mirror_info = self.get_option("package_mirrors", []) - if not arch: - arch = self.get_primary_arch() - return _get_arch_package_mirror_info(mirror_info, arch) - - def get_package_mirror_info(self, arch=None, data_source=None): - # This resolves the package_mirrors config option - # down to a single dict of {mirror_name: mirror_url} - arch_info = self._get_arch_package_mirror_info(arch) - return _get_package_mirror_info( - data_source=data_source, mirror_info=arch_info - ) - - def generate_fallback_config(self): - return net.generate_fallback_config() - - def apply_network_config(self, netconfig, bring_up=False) -> bool: - """Apply the network config. - - If bring_up is True, attempt to bring up the passed in devices. If - devices is None, attempt to bring up devices returned by - _write_network_config. - - Returns True if any devices failed to come up, otherwise False. - """ - renderer = self._get_renderer() - network_state = parse_net_config_data(netconfig, renderer=renderer) - self._write_network_state(network_state, renderer) - - # Now try to bring them up - if bring_up: - LOG.debug("Bringing up newly configured network interfaces") - network_activator = self.network_activator - if not network_activator: - LOG.warning( - "No network activator found, not bringing up " - "network interfaces" - ) - return True - network_activator.bring_up_all_interfaces(network_state) - else: - LOG.debug("Not bringing up newly configured network interfaces") - return False - - @abc.abstractmethod - def apply_locale(self, locale, out_fn=None): - raise NotImplementedError() - - @abc.abstractmethod - def set_timezone(self, tz): - raise NotImplementedError() - - def _get_localhost_ip(self): - return "127.0.0.1" - - def get_locale(self): - raise NotImplementedError() - - @abc.abstractmethod - def _read_hostname(self, filename, default=None): - raise NotImplementedError() - - @abc.abstractmethod - def _write_hostname(self, hostname, filename): - raise NotImplementedError() - - @abc.abstractmethod - def _read_system_hostname(self): - raise NotImplementedError() - - def _apply_hostname(self, hostname): - # This really only sets the hostname - # temporarily (until reboot so it should - # not be depended on). Use the write - # hostname functions for 'permanent' adjustments. - LOG.debug( - "Non-persistently setting the system hostname to %s", hostname - ) - try: - subp.subp(["hostname", hostname]) - except subp.ProcessExecutionError: - util.logexc( - LOG, - "Failed to non-persistently adjust the system hostname to %s", - hostname, - ) - - def _select_hostname(self, hostname, fqdn): - # Prefer the short hostname over the long - # fully qualified domain name - if ( - util.get_cfg_option_bool( - self._cfg, "prefer_fqdn_over_hostname", self.prefer_fqdn - ) - and fqdn - ): - return fqdn - if not hostname: - return fqdn - return hostname - - @staticmethod - def expand_osfamily(family_list): - distros = [] - for family in family_list: - if family not in OSFAMILIES: - raise ValueError( - "No distributions found for osfamily {}".format(family) - ) - distros.extend(OSFAMILIES[family]) - return distros - - def update_hostname(self, hostname, fqdn, prev_hostname_fn): - applying_hostname = hostname - - # Determine what the actual written hostname should be - hostname = self._select_hostname(hostname, fqdn) - - # If the previous hostname file exists lets see if we - # can get a hostname from it - if prev_hostname_fn and os.path.exists(prev_hostname_fn): - prev_hostname = self._read_hostname(prev_hostname_fn) - else: - prev_hostname = None - - # Lets get where we should write the system hostname - # and what the system hostname is - (sys_fn, sys_hostname) = self._read_system_hostname() - update_files = [] - - # If there is no previous hostname or it differs - # from what we want, lets update it or create the - # file in the first place - if not prev_hostname or prev_hostname != hostname: - update_files.append(prev_hostname_fn) - - # If the system hostname is different than the previous - # one or the desired one lets update it as well - if (not sys_hostname) or ( - sys_hostname == prev_hostname and sys_hostname != hostname - ): - update_files.append(sys_fn) - - # If something else has changed the hostname after we set it - # initially, we should not overwrite those changes (we should - # only be setting the hostname once per instance) - if sys_hostname and prev_hostname and sys_hostname != prev_hostname: - LOG.info( - "%s differs from %s, assuming user maintained hostname.", - prev_hostname_fn, - sys_fn, - ) - return - - # Remove duplicates (incase the previous config filename) - # is the same as the system config filename, don't bother - # doing it twice - update_files = set([f for f in update_files if f]) - LOG.debug( - "Attempting to update hostname to %s in %s files", - hostname, - len(update_files), - ) - - for fn in update_files: - try: - self._write_hostname(hostname, fn) - except IOError: - util.logexc( - LOG, "Failed to write hostname %s to %s", hostname, fn - ) - - # If the system hostname file name was provided set the - # non-fqdn as the transient hostname. - if sys_fn in update_files: - self._apply_hostname(applying_hostname) - - def update_etc_hosts(self, hostname, fqdn): - header = "" - if os.path.exists(self.hosts_fn): - eh = hosts.HostsConf(util.load_text_file(self.hosts_fn)) - else: - eh = hosts.HostsConf("") - header = util.make_header(base="added") - local_ip = self._get_localhost_ip() - prev_info = eh.get_entry(local_ip) - need_change = False - if not prev_info: - eh.add_entry(local_ip, fqdn, hostname) - need_change = True - else: - need_change = True - for entry in prev_info: - entry_fqdn = None - entry_aliases = [] - if len(entry) >= 1: - entry_fqdn = entry[0] - if len(entry) >= 2: - entry_aliases = entry[1:] - if entry_fqdn is not None and entry_fqdn == fqdn: - if hostname in entry_aliases: - # Exists already, leave it be - need_change = False - if need_change: - # Doesn't exist, add that entry in... - new_entries = list(prev_info) - new_entries.append([fqdn, hostname]) - eh.del_entries(local_ip) - for entry in new_entries: - if len(entry) == 1: - eh.add_entry(local_ip, entry[0]) - elif len(entry) >= 2: - eh.add_entry(local_ip, *entry) - if need_change: - contents = StringIO() - if header: - contents.write("%s\n" % (header)) - contents.write("%s\n" % (eh)) - util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644) - - @property - def preferred_ntp_clients(self): - """Allow distro to determine the preferred ntp client list""" - if not self._preferred_ntp_clients: - self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS) - - return self._preferred_ntp_clients - - def get_default_user(self): - return self.get_option("default_user") - - def add_user(self, name, **kwargs): - """ - Add a user to the system using standard GNU tools - - This should be overridden on distros where useradd is not desirable or - not available. - """ - # XXX need to make add_user idempotent somehow as we - # still want to add groups or modify SSH keys on pre-existing - # users in the image. - if util.is_user(name): - LOG.info("User %s already exists, skipping.", name) - return - - if "create_groups" in kwargs: - create_groups = kwargs.pop("create_groups") - else: - create_groups = True - - useradd_cmd = ["useradd", name] - log_useradd_cmd = ["useradd", name] - if util.system_is_snappy(): - useradd_cmd.append("--extrausers") - log_useradd_cmd.append("--extrausers") - - # Since we are creating users, we want to carefully validate the - # inputs. If something goes wrong, we can end up with a system - # that nobody can login to. - useradd_opts = { - "gecos": "--comment", - "homedir": "--home", - "primary_group": "--gid", - "uid": "--uid", - "groups": "--groups", - "passwd": "--password", - "shell": "--shell", - "expiredate": "--expiredate", - "inactive": "--inactive", - "selinux_user": "--selinux-user", - } - - useradd_flags = { - "no_user_group": "--no-user-group", - "system": "--system", - "no_log_init": "--no-log-init", - } - - redact_opts = ["passwd"] - - # support kwargs having groups=[list] or groups="g1,g2" - groups = kwargs.get("groups") - if groups: - if isinstance(groups, str): - groups = groups.split(",") - - if isinstance(groups, dict): - util.deprecate( - deprecated=f"The user {name} has a 'groups' config value " - "of type dict", - deprecated_version="22.3", - extra_message="Use a comma-delimited string or " - "array instead: group1,group2.", - ) - - # remove any white spaces in group names, most likely - # that came in as a string like: groups: group1, group2 - groups = [g.strip() for g in groups] - - # kwargs.items loop below wants a comma delimited string - # that can go right through to the command. - kwargs["groups"] = ",".join(groups) - - primary_group = kwargs.get("primary_group") - if primary_group: - groups.append(primary_group) - - if create_groups and groups: - for group in groups: - if not util.is_group(group): - self.create_group(group) - LOG.debug("created group '%s' for user '%s'", group, name) - if "uid" in kwargs.keys(): - kwargs["uid"] = str(kwargs["uid"]) - - # Check the values and create the command - for key, val in sorted(kwargs.items()): - if key in useradd_opts and val and isinstance(val, str): - useradd_cmd.extend([useradd_opts[key], val]) - - # Redact certain fields from the logs - if key in redact_opts: - log_useradd_cmd.extend([useradd_opts[key], "REDACTED"]) - else: - log_useradd_cmd.extend([useradd_opts[key], val]) - - elif key in useradd_flags and val: - useradd_cmd.append(useradd_flags[key]) - log_useradd_cmd.append(useradd_flags[key]) - - # Don't create the home directory if directed so or if the user is a - # system user - if kwargs.get("no_create_home") or kwargs.get("system"): - useradd_cmd.append("-M") - log_useradd_cmd.append("-M") - else: - useradd_cmd.append("-m") - log_useradd_cmd.append("-m") - - # Run the command - LOG.debug("Adding user %s", name) - try: - subp.subp(useradd_cmd, logstring=log_useradd_cmd) - except Exception as e: - util.logexc(LOG, "Failed to create user %s", name) - raise e - - def add_snap_user(self, name, **kwargs): - """ - Add a snappy user to the system using snappy tools - """ - - snapuser = kwargs.get("snapuser") - known = kwargs.get("known", False) - create_user_cmd = ["snap", "create-user", "--sudoer", "--json"] - if known: - create_user_cmd.append("--known") - create_user_cmd.append(snapuser) - - # Run the command - LOG.debug("Adding snap user %s", name) - try: - (out, err) = subp.subp( - create_user_cmd, logstring=create_user_cmd, capture=True - ) - LOG.debug("snap create-user returned: %s:%s", out, err) - jobj = util.load_json(out) - username = jobj.get("username", None) - except Exception as e: - util.logexc(LOG, "Failed to create snap user %s", name) - raise e - - return username - - def create_user(self, name, **kwargs): - """ - Creates or partially updates the ``name`` user in the system. - - This defers the actual user creation to ``self.add_user`` or - ``self.add_snap_user``, and most of the keys in ``kwargs`` will be - processed there if and only if the user does not already exist. - - Once the existence of the ``name`` user has been ensured, this method - then processes these keys (for both just-created and pre-existing - users): - - * ``plain_text_passwd`` - * ``hashed_passwd`` - * ``lock_passwd`` - * ``doas`` - * ``sudo`` - * ``ssh_authorized_keys`` - * ``ssh_redirect_user`` - """ - - # Add a snap user, if requested - if "snapuser" in kwargs: - return self.add_snap_user(name, **kwargs) - - # Add the user - self.add_user(name, **kwargs) - - # Set password if plain-text password provided and non-empty - if "plain_text_passwd" in kwargs and kwargs["plain_text_passwd"]: - self.set_passwd(name, kwargs["plain_text_passwd"]) - - # Set password if hashed password is provided and non-empty - if "hashed_passwd" in kwargs and kwargs["hashed_passwd"]: - self.set_passwd(name, kwargs["hashed_passwd"], hashed=True) - - # Default locking down the account. 'lock_passwd' defaults to True. - # lock account unless lock_password is False. - if kwargs.get("lock_passwd", True): - self.lock_passwd(name) - - # Configure doas access - if "doas" in kwargs: - if kwargs["doas"]: - self.write_doas_rules(name, kwargs["doas"]) - - # Configure sudo access - if "sudo" in kwargs: - if kwargs["sudo"]: - self.write_sudo_rules(name, kwargs["sudo"]) - elif kwargs["sudo"] is False: - util.deprecate( - deprecated=f"The value of 'false' in user {name}'s " - "'sudo' config", - deprecated_version="22.3", - extra_message="Use 'null' instead.", - ) - - # Import SSH keys - if "ssh_authorized_keys" in kwargs: - # Try to handle this in a smart manner. - keys = kwargs["ssh_authorized_keys"] - if isinstance(keys, str): - keys = [keys] - elif isinstance(keys, dict): - keys = list(keys.values()) - if keys is not None: - if not isinstance(keys, (tuple, list, set)): - LOG.warning( - "Invalid type '%s' detected for" - " 'ssh_authorized_keys', expected list," - " string, dict, or set.", - type(keys), - ) - keys = [] - else: - keys = set(keys) or [] - ssh_util.setup_user_keys(set(keys), name) - if "ssh_redirect_user" in kwargs: - cloud_keys = kwargs.get("cloud_public_ssh_keys", []) - if not cloud_keys: - LOG.warning( - "Unable to disable SSH logins for %s given" - " ssh_redirect_user: %s. No cloud public-keys present.", - name, - kwargs["ssh_redirect_user"], - ) - else: - redirect_user = kwargs["ssh_redirect_user"] - disable_option = ssh_util.DISABLE_USER_OPTS - disable_option = disable_option.replace("$USER", redirect_user) - disable_option = disable_option.replace("$DISABLE_USER", name) - ssh_util.setup_user_keys( - set(cloud_keys), name, options=disable_option - ) - return True - - def lock_passwd(self, name): - """ - Lock the password of a user, i.e., disable password logins - """ - # passwd must use short '-l' due to SLES11 lacking long form '--lock' - lock_tools = (["passwd", "-l", name], ["usermod", "--lock", name]) - try: - cmd = next(tool for tool in lock_tools if subp.which(tool[0])) - except StopIteration as e: - raise RuntimeError( - "Unable to lock user account '%s'. No tools available. " - " Tried: %s." % (name, [c[0] for c in lock_tools]) - ) from e - try: - subp.subp(cmd) - except Exception as e: - util.logexc(LOG, "Failed to disable password for user %s", name) - raise e - - def expire_passwd(self, user): - try: - subp.subp(["passwd", "--expire", user]) - except Exception as e: - util.logexc(LOG, "Failed to set 'expire' for %s", user) - raise e - - def set_passwd(self, user, passwd, hashed=False): - pass_string = "%s:%s" % (user, passwd) - cmd = ["chpasswd"] - - if hashed: - # Need to use the short option name '-e' instead of '--encrypted' - # (which would be more descriptive) since SLES 11 doesn't know - # about long names. - cmd.append("-e") - - try: - subp.subp( - cmd, data=pass_string, logstring="chpasswd for %s" % user - ) - except Exception as e: - util.logexc(LOG, "Failed to set password for %s", user) - raise e - - return True - - def chpasswd(self, plist_in: list, hashed: bool): - payload = ( - "\n".join( - (":".join([name, password]) for name, password in plist_in) - ) - + "\n" - ) - cmd = ["chpasswd"] + (["-e"] if hashed else []) - subp.subp(cmd, data=payload) - - def is_doas_rule_valid(self, user, rule): - rule_pattern = ( - r"^(?:permit|deny)" - r"(?:\s+(?:nolog|nopass|persist|keepenv|setenv \{[^}]+\})+)*" - r"\s+([a-zA-Z0-9_]+)+" - r"(?:\s+as\s+[a-zA-Z0-9_]+)*" - r"(?:\s+cmd\s+[^\s]+(?:\s+args\s+[^\s]+(?:\s*[^\s]+)*)*)*" - r"\s*$" - ) - - LOG.debug( - "Checking if user '%s' is referenced in doas rule %r", user, rule - ) - - valid_match = re.search(rule_pattern, rule) - if valid_match: - LOG.debug( - "User '%s' referenced in doas rule", valid_match.group(1) - ) - if valid_match.group(1) == user: - LOG.debug("Correct user is referenced in doas rule") - return True - else: - LOG.debug( - "Incorrect user '%s' is referenced in doas rule", - valid_match.group(1), - ) - return False - else: - LOG.debug("doas rule does not appear to reference any user") - return False - - def write_doas_rules(self, user, rules, doas_file=None): - if not doas_file: - doas_file = self.doas_fn - - for rule in rules: - if not self.is_doas_rule_valid(user, rule): - msg = ( - "Invalid doas rule %r for user '%s'," - " not writing any doas rules for user!" % (rule, user) - ) - LOG.error(msg) - return - - lines = ["", "# cloud-init User rules for %s" % user] - for rule in rules: - lines.append("%s" % rule) - content = "\n".join(lines) - content += "\n" # trailing newline - - if not os.path.exists(doas_file): - contents = [util.make_header(), content] - try: - util.write_file(doas_file, "\n".join(contents), mode=0o440) - except IOError as e: - util.logexc(LOG, "Failed to write doas file %s", doas_file) - raise e - else: - if content not in util.load_text_file(doas_file): - try: - util.append_file(doas_file, content) - except IOError as e: - util.logexc( - LOG, "Failed to append to doas file %s", doas_file - ) - raise e - - def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"): - # Ensure the dir is included and that - # it actually exists as a directory - sudoers_contents = "" - base_exists = False - if os.path.exists(sudo_base): - sudoers_contents = util.load_text_file(sudo_base) - base_exists = True - found_include = False - for line in sudoers_contents.splitlines(): - line = line.strip() - include_match = re.search(r"^[#|@]includedir\s+(.*)$", line) - if not include_match: - continue - included_dir = include_match.group(1).strip() - if not included_dir: - continue - included_dir = os.path.abspath(included_dir) - if included_dir == path: - found_include = True - break - if not found_include: - try: - if not base_exists: - lines = [ - "# See sudoers(5) for more information" - ' on "#include" directives:', - "", - util.make_header(base="added"), - "#includedir %s" % (path), - "", - ] - sudoers_contents = "\n".join(lines) - util.write_file(sudo_base, sudoers_contents, 0o440) - else: - lines = [ - "", - util.make_header(base="added"), - "#includedir %s" % (path), - "", - ] - sudoers_contents = "\n".join(lines) - util.append_file(sudo_base, sudoers_contents) - LOG.debug("Added '#includedir %s' to %s", path, sudo_base) - except IOError as e: - util.logexc(LOG, "Failed to write %s", sudo_base) - raise e - util.ensure_dir(path, 0o750) - - def write_sudo_rules(self, user, rules, sudo_file=None): - if not sudo_file: - sudo_file = self.ci_sudoers_fn - - lines = [ - "", - "# User rules for %s" % user, - ] - if isinstance(rules, (list, tuple)): - for rule in rules: - lines.append("%s %s" % (user, rule)) - elif isinstance(rules, str): - lines.append("%s %s" % (user, rules)) - else: - msg = "Can not create sudoers rule addition with type %r" - raise TypeError(msg % (type_utils.obj_name(rules))) - content = "\n".join(lines) - content += "\n" # trailing newline - - self.ensure_sudo_dir(os.path.dirname(sudo_file)) - - if not os.path.exists(sudo_file): - contents = [ - util.make_header(), - content, - ] - try: - util.write_file(sudo_file, "\n".join(contents), 0o440) - except IOError as e: - util.logexc(LOG, "Failed to write sudoers file %s", sudo_file) - raise e - else: - if content not in util.load_text_file(sudo_file): - try: - util.append_file(sudo_file, content) - except IOError as e: - util.logexc( - LOG, "Failed to append to sudoers file %s", sudo_file - ) - raise e - - def create_group(self, name, members=None): - group_add_cmd = ["groupadd", name] - if util.system_is_snappy(): - group_add_cmd.append("--extrausers") - if not members: - members = [] - - # Check if group exists, and then add it doesn't - if util.is_group(name): - LOG.warning("Skipping creation of existing group '%s'", name) - else: - try: - subp.subp(group_add_cmd) - LOG.info("Created new group %s", name) - except Exception: - util.logexc(LOG, "Failed to create group %s", name) - - # Add members to the group, if so defined - if len(members) > 0: - for member in members: - if not util.is_user(member): - LOG.warning( - "Unable to add group member '%s' to group '%s'" - "; user does not exist.", - member, - name, - ) - continue - - subp.subp(["usermod", "-a", "-G", name, member]) - LOG.info("Added user '%s' to group '%s'", member, name) - - def shutdown_command(self, *, mode, delay, message): - # called from cc_power_state_change.load_power_state - command = ["shutdown", self.shutdown_options_map[mode]] - try: - if delay != "now": - delay = "+%d" % int(delay) - except ValueError as e: - raise TypeError( - "power_state[delay] must be 'now' or '+m' (minutes)." - " found '%s'." % (delay,) - ) from e - args = command + [delay] - if message: - args.append(message) - return args - - @classmethod - def reload_init(cls, rcs=None): - """ - Reload systemd startup daemon. - May raise ProcessExecutionError - """ - init_cmd = cls.init_cmd - if cls.uses_systemd() or "systemctl" in init_cmd: - cmd = [init_cmd, "daemon-reload"] - return subp.subp(cmd, capture=True, rcs=rcs) - - @classmethod - def manage_service( - cls, action: str, service: str, *extra_args: str, rcs=None - ): - """ - Perform the requested action on a service. This handles the common - 'systemctl' and 'service' cases and may be overridden in subclasses - as necessary. - May raise ProcessExecutionError - """ - init_cmd = cls.init_cmd - if cls.uses_systemd() or "systemctl" in init_cmd: - init_cmd = ["systemctl"] - cmds = { - "stop": ["stop", service], - "start": ["start", service], - "enable": ["enable", service], - "disable": ["disable", service], - "restart": ["restart", service], - "reload": ["reload-or-restart", service], - "try-reload": ["reload-or-try-restart", service], - "status": ["status", service], - } - else: - cmds = { - "stop": [service, "stop"], - "start": [service, "start"], - "enable": [service, "start"], - "disable": [service, "stop"], - "restart": [service, "restart"], - "reload": [service, "restart"], - "try-reload": [service, "restart"], - "status": [service, "status"], - } - cmd = list(init_cmd) + list(cmds[action]) - return subp.subp(cmd, capture=True, rcs=rcs) - - def set_keymap(self, layout: str, model: str, variant: str, options: str): - if self.uses_systemd(): - subp.subp( - [ - "localectl", - "set-x11-keymap", - layout, - model, - variant, - options, - ] - ) - else: - raise NotImplementedError() - - def get_tmp_exec_path(self) -> str: - tmp_dir = temp_utils.get_tmp_ancestor(needs_exe=True) - if not util.has_mount_opt(tmp_dir, "noexec"): - return tmp_dir - return os.path.join(self.usr_lib_exec, "cloud-init", "clouddir") - - def do_as(self, command: list, user: str, cwd: str = "", **kwargs): - """ - Perform a command as the requested user. Behaves like subp() - - Note: We pass `PATH` to the user env by using `env`. This could be - probably simplified after bionic EOL by using - `su --whitelist-environment=PATH ...`, more info on: - https://lore.kernel.org/all/20180815110445.4qefy5zx5gfgbqly@ws.net.home/T/ - """ - directory = f"cd {cwd} && " if cwd else "" - return subp.subp( - [ - "su", - "-", - user, - "-c", - directory + "env PATH=$PATH " + " ".join(command), - ], - **kwargs, - ) - - @staticmethod - def build_dhclient_cmd( - path: str, - lease_file: str, - pid_file: str, - interface: str, - config_file: str, - ) -> list: - return [ - path, - "-1", - "-v", - "-lf", - lease_file, - "-pf", - pid_file, - "-sf", - "/bin/true", - ] + (["-cf", config_file, interface] if config_file else [interface]) - - @property - def fallback_interface(self): - """Determine the network interface used during local network config.""" - if self._fallback_interface is None: - self._fallback_interface = net.find_fallback_nic() - if not self._fallback_interface: - LOG.warning( - "Did not find a fallback interface on distro: %s.", - self.name, - ) - return self._fallback_interface - - @fallback_interface.setter - def fallback_interface(self, value): - self._fallback_interface = value - - @staticmethod - def get_proc_ppid(pid: int) -> Optional[int]: - """Return the parent pid of a process by parsing /proc/$pid/stat""" - match = Distro._get_proc_stat_by_index(pid, 4) - if match is not None: - with suppress(ValueError): - return int(match) - LOG.warning("/proc/%s/stat has an invalid ppid [%s]", pid, match) - return None - - @staticmethod - def get_proc_pgid(pid: int) -> Optional[int]: - """Return the parent pid of a process by parsing /proc/$pid/stat""" - match = Distro._get_proc_stat_by_index(pid, 5) - if match is not None: - with suppress(ValueError): - return int(match) - LOG.warning("/proc/%s/stat has an invalid pgid [%s]", pid, match) - return None - - @staticmethod - def _get_proc_stat_by_index(pid: int, field: int) -> Optional[int]: - """ - parse /proc/$pid/stat for a specific field as numbered in man:proc(5) - - param pid: integer to query /proc/$pid/stat for - param field: field number within /proc/$pid/stat to return - """ - try: - content: str = util.load_text_file( - "/proc/%s/stat" % pid, quiet=True - ).strip() # pyright: ignore - match = re.search( - r"^(\d+) (\(.+\)) ([RSDZTtWXxKPI]) (\d+) (\d+)", content - ) - if not match: - LOG.warning( - "/proc/%s/stat has an invalid contents [%s]", pid, content - ) - return None - return int(match.group(field)) - except IOError as e: - LOG.warning("Failed to load /proc/%s/stat. %s", pid, e) - except IndexError: - LOG.warning( - "Unable to match field %s of process pid=%s (%s) (%s)", - field, - pid, - content, # pyright: ignore - match, # pyright: ignore - ) - return None - - @staticmethod - def eject_media(device: str) -> None: - cmd = None - if subp.which("eject"): - cmd = ["eject", device] - elif subp.which("/lib/udev/cdrom_id"): - cmd = ["/lib/udev/cdrom_id", "--eject-media", device] - else: - raise subp.ProcessExecutionError( - cmd="eject_media_cmd", - description="eject command not found", - reason="neither eject nor /lib/udev/cdrom_id are found", - ) - subp.subp(cmd) - - -def _apply_hostname_transformations_to_url(url: str, transformations: list): - """ - Apply transformations to a URL's hostname, return transformed URL. - - This is a separate function because unwrapping and rewrapping only the - hostname portion of a URL is complex. - - :param url: - The URL to operate on. - :param transformations: - A list of ``(str) -> Optional[str]`` functions, which will be applied - in order to the hostname portion of the URL. If any function - (regardless of ordering) returns None, ``url`` will be returned without - any modification. - - :return: - A string whose value is ``url`` with the hostname ``transformations`` - applied, or ``None`` if ``url`` is unparsable. - """ - try: - parts = urllib.parse.urlsplit(url) - except ValueError: - # If we can't even parse the URL, we shouldn't use it for anything - return None - new_hostname = parts.hostname - if new_hostname is None: - # The URL given doesn't have a hostname component, so (a) we can't - # transform it, and (b) it won't work as a mirror; return None. - return None - - for transformation in transformations: - new_hostname = transformation(new_hostname) - if new_hostname is None: - # If a transformation returns None, that indicates we should abort - # processing and return `url` unmodified - return url - - new_netloc = new_hostname - if parts.port is not None: - new_netloc = "{}:{}".format(new_netloc, parts.port) - return urllib.parse.urlunsplit(parts._replace(netloc=new_netloc)) - - -def _sanitize_mirror_url(url: str): - """ - Given a mirror URL, replace or remove any invalid URI characters. - - This performs the following actions on the URL's hostname: - * Checks if it is an IP address, returning the URL immediately if it is - * Converts it to its IDN form (see below for details) - * Replaces any non-Letters/Digits/Hyphen (LDH) characters in it with - hyphens - * Removes any leading/trailing hyphens from each domain name label - - Before we replace any invalid domain name characters, we first need to - ensure that any valid non-ASCII characters in the hostname will not be - replaced, by ensuring the hostname is in its Internationalized domain name - (IDN) representation (see RFC 5890). This conversion has to be applied to - the whole hostname (rather than just the substitution variables), because - the Punycode algorithm used by IDNA transcodes each part of the hostname as - a whole string (rather than encoding individual characters). It cannot be - applied to the whole URL, because (a) the Punycode algorithm expects to - operate on domain names so doesn't output a valid URL, and (b) non-ASCII - characters in non-hostname parts of the URL aren't encoded via Punycode. - - To put this in RFC 5890's terminology: before we remove or replace any - characters from our domain name (which we do to ensure that each label is a - valid LDH Label), we first ensure each label is in its A-label form. - - (Note that Python's builtin idna encoding is actually IDNA2003, not - IDNA2008. This changes the specifics of how some characters are encoded to - ASCII, but doesn't affect the logic here.) - - :param url: - The URL to operate on. - - :return: - A sanitized version of the URL, which will have been IDNA encoded if - necessary, or ``None`` if the generated string is not a parseable URL. - """ - # Acceptable characters are LDH characters, plus "." to separate each label - acceptable_chars = LDH_ASCII_CHARS + "." - transformations = [ - # This is an IP address, not a hostname, so no need to apply the - # transformations - lambda hostname: None if net.is_ip_address(hostname) else hostname, - # Encode with IDNA to get the correct characters (as `bytes`), then - # decode with ASCII so we return a `str` - lambda hostname: hostname.encode("idna").decode("ascii"), - # Replace any unacceptable characters with "-" - lambda hostname: "".join( - c if c in acceptable_chars else "-" for c in hostname - ), - # Drop leading/trailing hyphens from each part of the hostname - lambda hostname: ".".join( - part.strip("-") for part in hostname.split(".") - ), - ] - - return _apply_hostname_transformations_to_url(url, transformations) - - -def _get_package_mirror_info( - mirror_info, data_source=None, mirror_filter=util.search_for_mirror -): - # given a arch specific 'mirror_info' entry (from package_mirrors) - # search through the 'search' entries, and fallback appropriately - # return a dict with only {name: mirror} entries. - if not mirror_info: - mirror_info = {} - - subst = {} - if data_source and data_source.availability_zone: - subst["availability_zone"] = data_source.availability_zone - - # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) - # the region is us-east-1. so region = az[0:-1] - if _EC2_AZ_RE.match(data_source.availability_zone): - ec2_region = data_source.availability_zone[0:-1] - - if ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES: - subst["ec2_region"] = "%s" % ec2_region - elif data_source.platform_type == "ec2": - subst["ec2_region"] = "%s" % ec2_region - - if data_source and data_source.region: - subst["region"] = data_source.region - - results = {} - for name, mirror in mirror_info.get("failsafe", {}).items(): - results[name] = mirror - - for name, searchlist in mirror_info.get("search", {}).items(): - mirrors = [] - for tmpl in searchlist: - try: - mirror = tmpl % subst - except KeyError: - continue - - mirror = _sanitize_mirror_url(mirror) - if mirror is not None: - mirrors.append(mirror) - - found = mirror_filter(mirrors) - if found: - results[name] = found - - LOG.debug("filtered distro mirror info: %s", results) - - return results - - -def _get_arch_package_mirror_info(package_mirrors, arch): - # pull out the specific arch from a 'package_mirrors' config option - default = None - for item in package_mirrors: - arches = item.get("arches") - if arch in arches: - return item - if "default" in arches: - default = item - return default - - -def fetch(name: str) -> Type[Distro]: - locs, looked_locs = importer.find_module(name, ["", __name__], ["Distro"]) - if not locs: - raise ImportError( - "No distribution found for distro %s (searched %s)" - % (name, looked_locs) - ) - mod = importer.import_module(locs[0]) - cls = getattr(mod, "Distro") - return cls - - -def set_etc_timezone( - tz, tz_file=None, tz_conf="/etc/timezone", tz_local="/etc/localtime" -): - util.write_file(tz_conf, str(tz).rstrip() + "\n") - # This ensures that the correct tz will be used for the system - if tz_local and tz_file: - # use a symlink if there exists a symlink or tz_local is not present - islink = os.path.islink(tz_local) - if islink or not os.path.exists(tz_local): - if islink: - util.del_file(tz_local) - os.symlink(tz_file, tz_local) - else: - util.copy(tz_file, tz_local) - return - - -def uses_systemd(): - try: - res = os.lstat("/run/systemd/system") - return stat.S_ISDIR(res.st_mode) - except Exception: - return False diff --git a/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/cloudinit/sources/DataSourceEc2.py b/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/cloudinit/sources/DataSourceEc2.py deleted file mode 100644 index 70d5c1e43..000000000 --- a/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/cloudinit/sources/DataSourceEc2.py +++ /dev/null @@ -1,1214 +0,0 @@ -# Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Juerg Hafliger -# Author: Joshua Harlow -# -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import logging -import os -import time -from typing import Dict, List - -from cloudinit import dmi, net, sources -from cloudinit import url_helper as uhelp -from cloudinit import util, warnings -from cloudinit.distros import Distro -from cloudinit.event import EventScope, EventType -from cloudinit.net import activators -from cloudinit.net.dhcp import NoDHCPLeaseError -from cloudinit.net.ephemeral import EphemeralIPNetwork -from cloudinit.sources.helpers import ec2 - -LOG = logging.getLogger(__name__) - -SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) - -STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") -STRICT_ID_DEFAULT = "warn" - - -class CloudNames: - ALIYUN = "aliyun" - AWS = "aws" - BRIGHTBOX = "brightbox" - ZSTACK = "zstack" - E24CLOUD = "e24cloud" - OUTSCALE = "outscale" - # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', - # then an attempt at the Ec2 Metadata service will be made. - UNKNOWN = "unknown" - # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata - # service available. No attempt at the Ec2 Metadata service will be made. - NO_EC2_METADATA = "no-ec2-metadata" - - -# Drop when LP: #1988157 tag handling is fixed -def skip_404_tag_errors(exception): - return exception.code == 404 and "meta-data/tags/" in exception.url - - -# Cloud platforms that support IMDSv2 style metadata server -IDMSV2_SUPPORTED_CLOUD_PLATFORMS = [CloudNames.AWS, CloudNames.ALIYUN] - -# Only trigger hook-hotplug on NICs with Ec2 drivers. Avoid triggering -# it on docker virtual NICs and the like. LP: #1946003 -_EXTRA_HOTPLUG_UDEV_RULES = """ -ENV{ID_NET_DRIVER}=="vif|ena|ixgbevf", GOTO="cloudinit_hook" -GOTO="cloudinit_end" -""" - - -class DataSourceEc2(sources.DataSource): - dsname = "Ec2" - # Default metadata urls that will be used if none are provided - # They will be checked for 'resolveability' and some of the - # following may be discarded if they do not resolve - metadata_urls = [ - "http://169.254.169.254", - "http://[fd00:ec2::254]", - "http://instance-data.:8773", - ] - - # The minimum supported metadata_version from the ec2 metadata apis - min_metadata_version = "2009-04-04" - - # Priority ordered list of additional metadata versions which will be tried - # for extended metadata content. IPv6 support comes in 2016-09-02. - # Tags support comes in 2021-03-23. - extended_metadata_versions: List[str] = [ - "2021-03-23", - "2018-09-24", - "2016-09-02", - ] - - # Setup read_url parameters per get_url_params. - url_max_wait = 120 - url_timeout = 50 - - _api_token = None # API token for accessing the metadata service - _network_config = sources.UNSET # Used to cache calculated network cfg v1 - - # Whether we want to get network configuration from the metadata service. - perform_dhcp_setup = False - - supported_update_events = { - EventScope.NETWORK: { - EventType.BOOT_NEW_INSTANCE, - EventType.BOOT, - EventType.BOOT_LEGACY, - EventType.HOTPLUG, - } - } - - extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES - - def __init__(self, sys_cfg, distro, paths): - super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) - self.metadata_address = None - - def _unpickle(self, ci_pkl_version: int) -> None: - super()._unpickle(ci_pkl_version) - self.extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES - - def _get_cloud_name(self): - """Return the cloud name as identified during _get_data.""" - return identify_platform() - - def _get_data(self): - strict_mode, _sleep = read_strict_mode( - util.get_cfg_by_path( - self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT - ), - ("warn", None), - ) - - LOG.debug( - "strict_mode: %s, cloud_name=%s cloud_platform=%s", - strict_mode, - self.cloud_name, - self.platform, - ) - if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN: - return False - elif self.cloud_name == CloudNames.NO_EC2_METADATA: - return False - - if self.perform_dhcp_setup: # Setup networking in init-local stage. - if util.is_FreeBSD(): - LOG.debug("FreeBSD doesn't support running dhclient with -sf") - return False - try: - with EphemeralIPNetwork( - self.distro, - self.distro.fallback_interface, - ipv4=True, - ipv6=True, - ) as netw: - state_msg = f" {netw.state_msg}" if netw.state_msg else "" - self._crawled_metadata = util.log_time( - logfunc=LOG.debug, - msg=f"Crawl of metadata service{state_msg}", - func=self.crawl_metadata, - ) - - except NoDHCPLeaseError: - return False - else: - self._crawled_metadata = util.log_time( - logfunc=LOG.debug, - msg="Crawl of metadata service", - func=self.crawl_metadata, - ) - if not self._crawled_metadata: - return False - self.metadata = self._crawled_metadata.get("meta-data", None) - self.userdata_raw = self._crawled_metadata.get("user-data", None) - self.identity = ( - self._crawled_metadata.get("dynamic", {}) - .get("instance-identity", {}) - .get("document", {}) - ) - return True - - def is_classic_instance(self): - """Report if this instance type is Ec2 Classic (non-vpc).""" - if not self.metadata: - # Can return False on inconclusive as we are also called in - # network_config where metadata will be present. - # Secondary call site is in packaging postinst script. - return False - ifaces_md = self.metadata.get("network", {}).get("interfaces", {}) - for _mac, mac_data in ifaces_md.get("macs", {}).items(): - if "vpc-id" in mac_data: - return False - return True - - @property - def launch_index(self): - if not self.metadata: - return None - return self.metadata.get("ami-launch-index") - - @property - def platform(self): - # Handle upgrade path of pickled ds - if not hasattr(self, "_platform_type"): - self._platform_type = DataSourceEc2.dsname.lower() - if not self._platform_type: - self._platform_type = DataSourceEc2.dsname.lower() - return self._platform_type - - # IMDSv2 related parameters from the ec2 metadata api document - @property - def api_token_route(self): - return "latest/api/token" - - @property - def imdsv2_token_ttl_seconds(self): - return "21600" - - @property - def imdsv2_token_put_header(self): - return "X-aws-ec2-metadata-token" - - @property - def imdsv2_token_req_header(self): - return self.imdsv2_token_put_header + "-ttl-seconds" - - @property - def imdsv2_token_redact(self): - return [self.imdsv2_token_put_header, self.imdsv2_token_req_header] - - def get_metadata_api_version(self): - """Get the best supported api version from the metadata service. - - Loop through all extended support metadata versions in order and - return the most-fully featured metadata api version discovered. - - If extended_metadata_versions aren't present, return the datasource's - min_metadata_version. - """ - # Assumes metadata service is already up - url_tmpl = "{0}/{1}/meta-data/instance-id" - headers = self._get_headers() - for api_ver in self.extended_metadata_versions: - url = url_tmpl.format(self.metadata_address, api_ver) - try: - resp = uhelp.readurl( - url=url, - headers=headers, - headers_redact=self.imdsv2_token_redact, - ) - except uhelp.UrlError as e: - LOG.debug("url %s raised exception %s", url, e) - else: - if resp.code == 200: - LOG.debug("Found preferred metadata version %s", api_ver) - return api_ver - elif resp.code == 404: - msg = "Metadata api version %s not present. Headers: %s" - LOG.debug(msg, api_ver, resp.headers) - return self.min_metadata_version - - def get_instance_id(self): - if self.cloud_name == CloudNames.AWS: - # Prefer the ID from the instance identity document, but fall back - if not getattr(self, "identity", None): - # If re-using cached datasource, it's get_data run didn't - # setup self.identity. So we need to do that now. - api_version = self.get_metadata_api_version() - self.identity = ec2.get_instance_identity( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=self.imdsv2_token_redact, - exception_cb=self._refresh_stale_aws_token_cb, - ).get("document", {}) - return self.identity.get( - "instanceId", self.metadata["instance-id"] - ) - else: - return self.metadata["instance-id"] - - def _maybe_fetch_api_token(self, mdurls): - """Get an API token for EC2 Instance Metadata Service. - - On EC2. IMDS will always answer an API token, unless - the instance owner has disabled the IMDS HTTP endpoint or - the network topology conflicts with the configured hop-limit. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return - - urls = [] - url2base = {} - url_path = self.api_token_route - request_method = "PUT" - for url in mdurls: - cur = "{0}/{1}".format(url, url_path) - urls.append(cur) - url2base[cur] = url - - # use the self._imds_exception_cb to check for Read errors - LOG.debug("Fetching Ec2 IMDSv2 API Token") - - response = None - url = None - url_params = self.get_url_params() - try: - url, response = uhelp.wait_for_url( - urls=urls, - max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, - status_cb=LOG.warning, - headers_cb=self._get_headers, - exception_cb=self._imds_exception_cb, - request_method=request_method, - headers_redact=self.imdsv2_token_redact, - connect_synchronously=False, - ) - except uhelp.UrlError: - # We use the raised exception to interrupt the retry loop. - # Nothing else to do here. - pass - - if url and response: - self._api_token = response - return url2base[url] - - # If we get here, then wait_for_url timed out, waiting for IMDS - # or the IMDS HTTP endpoint is disabled - return None - - def wait_for_metadata_service(self): - mcfg = self.ds_cfg - - url_params = self.get_url_params() - if url_params.max_wait_seconds <= 0: - return False - - # Remove addresses from the list that wont resolve. - mdurls = mcfg.get("metadata_urls", self.metadata_urls) - filtered = [x for x in mdurls if util.is_resolvable_url(x)] - - if set(filtered) != set(mdurls): - LOG.debug( - "Removed the following from metadata urls: %s", - list((set(mdurls) - set(filtered))), - ) - - if len(filtered): - mdurls = filtered - else: - LOG.warning("Empty metadata url list! using default list") - mdurls = self.metadata_urls - - # try the api token path first - metadata_address = self._maybe_fetch_api_token(mdurls) - # When running on EC2, we always access IMDS with an API token. - # If we could not get an API token, then we assume the IMDS - # endpoint was disabled and we move on without a data source. - # Fallback to IMDSv1 if not running on EC2 - if ( - not metadata_address - and self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS - ): - # if we can't get a token, use instance-id path - urls = [] - url2base = {} - url_path = "{ver}/meta-data/instance-id".format( - ver=self.min_metadata_version - ) - request_method = "GET" - for url in mdurls: - cur = "{0}/{1}".format(url, url_path) - urls.append(cur) - url2base[cur] = url - - start_time = time.time() - url, _ = uhelp.wait_for_url( - urls=urls, - max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, - status_cb=LOG.warning, - headers_redact=self.imdsv2_token_redact, - headers_cb=self._get_headers, - request_method=request_method, - ) - - if url: - metadata_address = url2base[url] - - if metadata_address: - self.metadata_address = metadata_address - LOG.debug("Using metadata source: '%s'", self.metadata_address) - elif self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - LOG.warning("IMDS's HTTP endpoint is probably disabled") - else: - LOG.critical( - "Giving up on md from %s after %s seconds", - urls, - int(time.time() - start_time), - ) - - return bool(metadata_address) - - def device_name_to_device(self, name): - # Consult metadata service, that has - # ephemeral0: sdb - # and return 'sdb' for input 'ephemeral0' - if "block-device-mapping" not in self.metadata: - return None - - # Example: - # 'block-device-mapping': - # {'ami': '/dev/sda1', - # 'ephemeral0': '/dev/sdb', - # 'root': '/dev/sda1'} - found = None - bdm = self.metadata["block-device-mapping"] - if not isinstance(bdm, dict): - LOG.debug("block-device-mapping not a dictionary: '%s'", bdm) - return None - - for entname, device in bdm.items(): - if entname == name: - found = device - break - # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0' - if entname == "ephemeral" and name == "ephemeral0": - found = device - - if found is None: - LOG.debug("Unable to convert %s to a device", name) - return None - - ofound = found - if not found.startswith("/"): - found = "/dev/%s" % found - - if os.path.exists(found): - return found - - remapped = self._remap_device(os.path.basename(found)) - if remapped: - LOG.debug("Remapped device name %s => %s", found, remapped) - return remapped - - # On t1.micro, ephemeral0 will appear in block-device-mapping from - # metadata, but it will not exist on disk (and never will) - # at this point, we've verified that the path did not exist - # in the special case of 'ephemeral0' return None to avoid bogus - # fstab entry (LP: #744019) - if name == "ephemeral0": - return None - return ofound - - @property - def availability_zone(self): - try: - if self.cloud_name == CloudNames.AWS: - return self.identity.get( - "availabilityZone", - self.metadata["placement"]["availability-zone"], - ) - else: - return self.metadata["placement"]["availability-zone"] - except KeyError: - return None - - @property - def region(self): - if self.cloud_name == CloudNames.AWS: - region = self.identity.get("region") - # Fallback to trimming the availability zone if region is missing - if self.availability_zone and not region: - region = self.availability_zone[:-1] - return region - else: - az = self.availability_zone - if az is not None: - return az[:-1] - return None - - def activate(self, cfg, is_new_instance): - if not is_new_instance: - return - if self.cloud_name == CloudNames.UNKNOWN: - warn_if_necessary( - util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), - cfg, - ) - - @property - def network_config(self): - """Return a network config dict for rendering ENI or netplan files.""" - if self._network_config != sources.UNSET: - return self._network_config - - if self.metadata is None: - # this would happen if get_data hadn't been called. leave as UNSET - LOG.warning( - "Unexpected call to network_config when metadata is None." - ) - return None - - result = None - no_network_metadata_on_aws = bool( - "network" not in self.metadata - and self.cloud_name == CloudNames.AWS - ) - if no_network_metadata_on_aws: - LOG.debug( - "Metadata 'network' not present:" - " Refreshing stale metadata from prior to upgrade." - ) - util.log_time( - logfunc=LOG.debug, - msg="Re-crawl of metadata service", - func=self.get_data, - ) - - iface = self.distro.fallback_interface - net_md = self.metadata.get("network") - if isinstance(net_md, dict): - # SRU_BLOCKER: xenial, bionic and eoan should default - # apply_full_imds_network_config to False to retain original - # behavior on those releases. - result = convert_ec2_metadata_network_config( - net_md, - self.distro, - fallback_nic=iface, - full_network_config=util.get_cfg_option_bool( - self.ds_cfg, "apply_full_imds_network_config", True - ), - ) - - # Non-VPC (aka Classic) Ec2 instances need to rewrite the - # network config file every boot due to MAC address change. - if self.is_classic_instance(): - self.default_update_events = copy.deepcopy( - self.default_update_events - ) - self.default_update_events[EventScope.NETWORK].add( - EventType.BOOT - ) - self.default_update_events[EventScope.NETWORK].add( - EventType.BOOT_LEGACY - ) - else: - LOG.warning("Metadata 'network' key not valid: %s.", net_md) - self._network_config = result - - return self._network_config - - def crawl_metadata(self): - """Crawl metadata service when available. - - @returns: Dictionary of crawled metadata content containing the keys: - meta-data, user-data and dynamic. - """ - if not self.wait_for_metadata_service(): - return {} - api_version = self.get_metadata_api_version() - redact = self.imdsv2_token_redact - crawled_metadata = {} - if self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - exc_cb = self._refresh_stale_aws_token_cb - exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb - skip_cb = None - elif self.cloud_name == CloudNames.OUTSCALE: - exc_cb = exc_cb_ud = None - skip_cb = skip_404_tag_errors - else: - exc_cb = exc_cb_ud = skip_cb = None - try: - raw_userdata = ec2.get_instance_userdata( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb_ud, - ) - crawled_metadata["user-data"] = util.maybe_b64decode(raw_userdata) - crawled_metadata["meta-data"] = ec2.get_instance_metadata( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb, - retrieval_exception_ignore_cb=skip_cb, - ) - if self.cloud_name == CloudNames.AWS: - identity = ec2.get_instance_identity( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb, - ) - crawled_metadata["dynamic"] = {"instance-identity": identity} - except Exception: - util.logexc( - LOG, - "Failed reading from metadata address %s", - self.metadata_address, - ) - return {} - crawled_metadata["_metadata_api_version"] = api_version - return crawled_metadata - - def _refresh_api_token(self, seconds=None): - """Request new metadata API token. - @param seconds: The lifetime of the token in seconds - - @return: The API token or None if unavailable. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return None - - if seconds is None: - seconds = self.imdsv2_token_ttl_seconds - - LOG.debug("Refreshing Ec2 metadata API token") - request_header = {self.imdsv2_token_req_header: seconds} - token_url = "{}/{}".format(self.metadata_address, self.api_token_route) - try: - response = uhelp.readurl( - token_url, - headers=request_header, - headers_redact=self.imdsv2_token_redact, - request_method="PUT", - ) - except uhelp.UrlError as e: - LOG.warning( - "Unable to get API token: %s raised exception %s", token_url, e - ) - return None - return response.contents - - def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): - """Callback will not retry on SKIP_USERDATA_CODES or if no token - is available.""" - retry = ec2.skip_retry_on_codes( - ec2.SKIP_USERDATA_CODES, msg, exception - ) - if not retry: - return False # False raises exception - return self._refresh_stale_aws_token_cb(msg, exception) - - def _refresh_stale_aws_token_cb(self, msg, exception): - """Exception handler for Ec2 to refresh token if token is stale.""" - if isinstance(exception, uhelp.UrlError) and exception.code == 401: - # With _api_token as None, _get_headers will _refresh_api_token. - LOG.debug("Clearing cached Ec2 API token due to expiry") - self._api_token = None - return True # always retry - - def _imds_exception_cb(self, msg, exception=None): - """Fail quickly on proper AWS if IMDSv2 rejects API token request - - Guidance from Amazon is that if IMDSv2 had disabled token requests - by returning a 403, or cloud-init malformed requests resulting in - other 40X errors, we want the datasource detection to fail quickly - without retries as those symptoms will likely not be resolved by - retries. - - Exceptions such as requests.ConnectionError due to IMDS being - temporarily unroutable or unavailable will still retry due to the - callsite wait_for_url. - """ - if isinstance(exception, uhelp.UrlError): - # requests.ConnectionError will have exception.code == None - if exception.code and exception.code >= 400: - if exception.code == 403: - LOG.warning( - "Ec2 IMDS endpoint returned a 403 error. " - "HTTP endpoint is disabled. Aborting." - ) - else: - LOG.warning( - "Fatal error while requesting Ec2 IMDSv2 API tokens" - ) - raise exception - - def _get_headers(self, url=""): - """Return a dict of headers for accessing a url. - - If _api_token is unset on AWS, attempt to refresh the token via a PUT - and then return the updated token header. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return {} - # Request a 6 hour token if URL is api_token_route - request_token_header = { - self.imdsv2_token_req_header: self.imdsv2_token_ttl_seconds - } - if self.api_token_route in url: - return request_token_header - if not self._api_token: - # If we don't yet have an API token, get one via a PUT against - # api_token_route. This _api_token may get unset by a 403 due - # to an invalid or expired token - self._api_token = self._refresh_api_token() - if not self._api_token: - return {} - return {self.imdsv2_token_put_header: self._api_token} - - -class DataSourceEc2Local(DataSourceEc2): - """Datasource run at init-local which sets up network to query metadata. - - In init-local, no network is available. This subclass sets up minimal - networking with dhclient on a viable nic so that it can talk to the - metadata service. If the metadata service provides network configuration - then render the network configuration for that instance based on metadata. - """ - - perform_dhcp_setup = True # Use dhcp before querying metadata - - def get_data(self): - supported_platforms = (CloudNames.AWS, CloudNames.OUTSCALE) - if self.cloud_name not in supported_platforms: - LOG.debug( - "Local Ec2 mode only supported on %s, not %s", - supported_platforms, - self.cloud_name, - ) - return False - return super(DataSourceEc2Local, self).get_data() - - -def read_strict_mode(cfgval, default): - try: - return parse_strict_mode(cfgval) - except ValueError as e: - LOG.warning(e) - return default - - -def parse_strict_mode(cfgval): - # given a mode like: - # true, false, warn,[sleep] - # return tuple with string mode (true|false|warn) and sleep. - if cfgval is True: - return "true", None - if cfgval is False: - return "false", None - - if not cfgval: - return "warn", 0 - - mode, _, sleep = cfgval.partition(",") - if mode not in ("true", "false", "warn"): - raise ValueError( - "Invalid mode '%s' in strict_id setting '%s': " - "Expected one of 'true', 'false', 'warn'." % (mode, cfgval) - ) - - if sleep: - try: - sleep = int(sleep) - except ValueError as e: - raise ValueError( - "Invalid sleep '%s' in strict_id setting '%s': not an integer" - % (sleep, cfgval) - ) from e - else: - sleep = None - - return mode, sleep - - -def warn_if_necessary(cfgval, cfg): - try: - mode, sleep = parse_strict_mode(cfgval) - except ValueError as e: - LOG.warning(e) - return - - if mode == "false": - return - - warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep) - - -def identify_aliyun(data): - if data["product_name"] == "Alibaba Cloud ECS": - return CloudNames.ALIYUN - - -def identify_aws(data): - # data is a dictionary returned by _collect_platform_data. - if data["uuid"].startswith("ec2") and ( - data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"] - ): - return CloudNames.AWS - - return None - - -def identify_brightbox(data): - if data["serial"].endswith(".brightbox.com"): - return CloudNames.BRIGHTBOX - - -def identify_zstack(data): - if data["asset_tag"].endswith(".zstack.io"): - return CloudNames.ZSTACK - - -def identify_e24cloud(data): - if data["vendor"] == "e24cloud": - return CloudNames.E24CLOUD - - -def identify_outscale(data): - if ( - data["product_name"] == "3DS Outscale VM".lower() - and data["vendor"] == "3DS Outscale".lower() - ): - return CloudNames.OUTSCALE - - -def identify_platform(): - # identify the platform and return an entry in CloudNames. - data = _collect_platform_data() - checks = ( - identify_aws, - identify_brightbox, - identify_zstack, - identify_e24cloud, - identify_outscale, - identify_aliyun, - lambda x: CloudNames.UNKNOWN, - ) - for checker in checks: - try: - result = checker(data) - if result: - return result - except Exception as e: - LOG.warning( - "calling %s with %s raised exception: %s", checker, data, e - ) - - -def _collect_platform_data(): - """Returns a dictionary of platform info from dmi or /sys/hypervisor. - - Keys in the dictionary are as follows: - uuid: system-uuid from dmi or /sys/hypervisor - uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' - serial: dmi 'system-serial-number' (/sys/.../product_serial) - asset_tag: 'dmidecode -s chassis-asset-tag' - vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) - product_name: dmi 'system-product-name' (/sys/.../system-manufacturer) - - On Ec2 instances experimentation is that product_serial is upper case, - and product_uuid is lower case. This returns lower case values for both. - """ - data = {} - try: - uuid = util.load_text_file("/sys/hypervisor/uuid").strip() - data["uuid_source"] = "hypervisor" - except Exception: - uuid = dmi.read_dmi_data("system-uuid") - data["uuid_source"] = "dmi" - - if uuid is None: - uuid = "" - data["uuid"] = uuid.lower() - - serial = dmi.read_dmi_data("system-serial-number") - if serial is None: - serial = "" - - data["serial"] = serial.lower() - - asset_tag = dmi.read_dmi_data("chassis-asset-tag") - if asset_tag is None: - asset_tag = "" - - data["asset_tag"] = asset_tag.lower() - - vendor = dmi.read_dmi_data("system-manufacturer") - data["vendor"] = (vendor if vendor else "").lower() - - product_name = dmi.read_dmi_data("system-product-name") - data["product_name"] = (product_name if product_name else "").lower() - - return data - - -def _build_nic_order( - macs_metadata: Dict[str, Dict], macs: List[str] -) -> Dict[str, int]: - """ - Builds a dictionary containing macs as keys nad nic orders as values, - taking into account `network-card` and `device-number` if present. - - Note that the first NIC will be the primary NIC as it will be the one with - [network-card] == 0 and device-number == 0 if present. - - @param macs_metadata: dictionary with mac address as key and contents like: - {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} - @macs: list of macs to consider - - @return: Dictionary with macs as keys and nic orders as values. - """ - nic_order: Dict[str, int] = {} - if len(macs) == 0 or len(macs_metadata) == 0: - return nic_order - - valid_macs_metadata = filter( - # filter out nics without metadata (not a physical nic) - lambda mmd: mmd[1] is not None, - # filter by macs - map(lambda mac: (mac, macs_metadata.get(mac)), macs), - ) - - def _get_key_as_int_or(dikt, key, alt_value): - value = dikt.get(key, None) - if value is not None: - return int(value) - return alt_value - - # Sort by (network_card, device_index) as some instances could have - # multiple network cards with repeated device indexes. - # - # On platforms where network-card and device-number are not present, - # as AliYun, the order will be by mac, as before the introduction of this - # function. - return { - mac: i - for i, (mac, _mac_metadata) in enumerate( - sorted( - valid_macs_metadata, - key=lambda mmd: ( - _get_key_as_int_or( - mmd[1], "network-card", float("infinity") - ), - _get_key_as_int_or( - mmd[1], "device-number", float("infinity") - ), - ), - ) - ) - } - - -def _configure_policy_routing( - dev_config: dict, - *, - nic_name: str, - nic_metadata: dict, - distro: Distro, - is_ipv4: bool, - table: int, -) -> None: - """ - Configure policy-based routing on secondary NICs / secondary IPs to - ensure outgoing packets are routed via the correct interface. - - @param: dev_config: network cfg v2 to be updated inplace. - @param: nic_name: nic name. Only used if ipv4. - @param: nic_metadata: nic metadata from IMDS. - @param: distro: Instance of Distro. Only used if ipv4. - @param: is_ipv4: Boolean indicating if we are acting over ipv4 or not. - @param: table: Routing table id. - """ - if is_ipv4: - subnet_prefix_routes = nic_metadata.get("subnet-ipv4-cidr-block") - ips = nic_metadata.get("local-ipv4s") - else: - subnet_prefix_routes = nic_metadata.get("subnet-ipv6-cidr-blocks") - ips = nic_metadata.get("ipv6s") - if not (subnet_prefix_routes and ips): - LOG.debug( - "Not enough IMDS information to configure policy routing " - "for IPv%s", - "4" if is_ipv4 else "6", - ) - return - - if not dev_config.get("routes"): - dev_config["routes"] = [] - if is_ipv4: - try: - lease = distro.dhcp_client.dhcp_discovery(nic_name, distro=distro) - gateway = lease["routers"] - except NoDHCPLeaseError as e: - LOG.warning( - "Could not perform dhcp discovery on %s to find its " - "gateway. Not adding default route via the gateway. " - "Error: %s", - nic_name, - e, - ) - else: - # Add default route via the NIC's gateway - dev_config["routes"].append( - { - "to": "0.0.0.0/0", - "via": gateway, - "table": table, - }, - ) - - subnet_prefix_routes = ( - [subnet_prefix_routes] - if isinstance(subnet_prefix_routes, str) - else subnet_prefix_routes - ) - for prefix_route in subnet_prefix_routes: - dev_config["routes"].append( - { - "to": prefix_route, - "table": table, - }, - ) - - if not dev_config.get("routing-policy"): - dev_config["routing-policy"] = [] - # Packets coming from any IP associated with the current NIC - # will be routed using `table` routing table - ips = [ips] if isinstance(ips, str) else ips - for ip in ips: - dev_config["routing-policy"].append( - { - "from": ip, - "table": table, - }, - ) - - -def convert_ec2_metadata_network_config( - network_md, - distro, - macs_to_nics=None, - fallback_nic=None, - full_network_config=True, -): - """Convert ec2 metadata to network config version 2 data dict. - - @param: network_md: 'network' portion of EC2 metadata. - generally formed as {"interfaces": {"macs": {}} where - 'macs' is a dictionary with mac address as key and contents like: - {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} - @param: distro: instance of Distro. - @param: macs_to_nics: Optional dict of mac addresses and nic names. If - not provided, get_interfaces_by_mac is called to get it from the OS. - @param: fallback_nic: Optionally provide the primary nic interface name. - This nic will be guaranteed to minimally have a dhcp4 configuration. - @param: full_network_config: Boolean set True to configure all networking - presented by IMDS. This includes rendering secondary IPv4 and IPv6 - addresses on all NICs and rendering network config on secondary NICs. - If False, only the primary nic will be configured and only with dhcp - (IPv4/IPv6). - - @return A dict of network config version 2 based on the metadata and macs. - """ - netcfg = {"version": 2, "ethernets": {}} - if not macs_to_nics: - macs_to_nics = net.get_interfaces_by_mac() - macs_metadata = network_md["interfaces"]["macs"] - - if not full_network_config: - for mac, nic_name in macs_to_nics.items(): - if nic_name == fallback_nic: - break - dev_config = { - "dhcp4": True, - "dhcp6": False, - "match": {"macaddress": mac.lower()}, - "set-name": nic_name, - } - nic_metadata = macs_metadata.get(mac) - if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured - dev_config["dhcp6"] = True - netcfg["ethernets"][nic_name] = dev_config - return netcfg - # Apply network config for all nics and any secondary IPv4/v6 addresses - is_netplan = distro.network_activator == activators.NetplanActivator - macs = sorted(macs_to_nics.keys()) - nic_order = _build_nic_order(macs_metadata, macs) - for mac in macs: - nic_name = macs_to_nics[mac] - nic_metadata = macs_metadata.get(mac) - if not nic_metadata: - continue # Not a physical nic represented in metadata - nic_idx = nic_order[mac] - is_primary_nic = nic_idx == 0 - # nic_idx + 1 to start route_metric at 100 (nic_idx is 0-indexed) - dhcp_override = {"route-metric": (nic_idx + 1) * 100} - dev_config = { - "dhcp4": True, - "dhcp4-overrides": dhcp_override, - "dhcp6": False, - "match": {"macaddress": mac.lower()}, - "set-name": nic_name, - } - # This config only works on systems using Netplan because Networking - # config V2 does not support `routing-policy`, but this config is - # passed through on systems using Netplan. - # See: https://github.com/canonical/cloud-init/issues/4862 - # - # If device-number is not present (AliYun or other ec2-like platforms), - # do not configure source-routing as we cannot determine which is the - # primary NIC. - table = 100 + nic_idx - if ( - is_netplan - and nic_metadata.get("device-number") - and not is_primary_nic - ): - dhcp_override["use-routes"] = True - _configure_policy_routing( - dev_config, - distro=distro, - nic_name=nic_name, - nic_metadata=nic_metadata, - is_ipv4=True, - table=table, - ) - if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured - dev_config["dhcp6"] = True - dev_config["dhcp6-overrides"] = dhcp_override - if ( - is_netplan - and nic_metadata.get("device-number") - and not is_primary_nic - ): - _configure_policy_routing( - dev_config, - distro=distro, - nic_name=nic_name, - nic_metadata=nic_metadata, - is_ipv4=False, - table=table, - ) - dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac) - if not dev_config["addresses"]: - dev_config.pop("addresses") # Since we found none configured - - netcfg["ethernets"][nic_name] = dev_config - # Remove route-metric dhcp overrides and routes / routing-policy if only - # one nic configured - if len(netcfg["ethernets"]) == 1: - for nic_name in netcfg["ethernets"].keys(): - netcfg["ethernets"][nic_name].pop("dhcp4-overrides") - netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None) - netcfg["ethernets"][nic_name].pop("routes", None) - netcfg["ethernets"][nic_name].pop("routing-policy", None) - return netcfg - - -def get_secondary_addresses(nic_metadata, mac): - """Parse interface-specific nic metadata and return any secondary IPs - - :return: List of secondary IPv4 or IPv6 addresses to configure on the - interface - """ - ipv4s = nic_metadata.get("local-ipv4s") - ipv6s = nic_metadata.get("ipv6s") - addresses = [] - # In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP - if bool(isinstance(ipv4s, list) and len(ipv4s) > 1): - addresses.extend( - _get_secondary_addresses( - nic_metadata, "subnet-ipv4-cidr-block", mac, ipv4s, "24" - ) - ) - if bool(isinstance(ipv6s, list) and len(ipv6s) > 1): - addresses.extend( - _get_secondary_addresses( - nic_metadata, "subnet-ipv6-cidr-block", mac, ipv6s, "128" - ) - ) - return sorted(addresses) - - -def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix): - """Return list of IP addresses as CIDRs for secondary IPs - - The CIDR prefix will be default_prefix if cidr_key is absent or not - parseable in nic_metadata. - """ - addresses = [] - cidr = nic_metadata.get(cidr_key) - prefix = default_prefix - if not cidr or len(cidr.split("/")) != 2: - ip_type = "ipv4" if "ipv4" in cidr_key else "ipv6" - LOG.warning( - "Could not parse %s %s for mac %s. %s network" - " config prefix defaults to /%s", - cidr_key, - cidr, - mac, - ip_type, - prefix, - ) - else: - prefix = cidr.split("/")[1] - # We know we have > 1 ips for in metadata for this IP type - for ip in ips[1:]: - addresses.append("{ip}/{prefix}".format(ip=ip, prefix=prefix)) - return addresses - - -# Used to match classes to dependencies -datasources = [ - (DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local - (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -] - - -# Return a list of data sources that match this set of dependencies -def get_datasource_list(depends): - return sources.list_from_depends(depends, datasources) diff --git a/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/tests/unittests/sources/test_ec2.py b/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/tests/unittests/sources/test_ec2.py deleted file mode 100644 index 4a2b57608..000000000 --- a/.pc/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321/tests/unittests/sources/test_ec2.py +++ /dev/null @@ -1,1615 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import json -import threading -from unittest import mock - -import pytest -import requests -import responses - -from cloudinit import helpers -from cloudinit.net import activators -from cloudinit.sources import DataSourceEc2 as ec2 -from tests.unittests import helpers as test_helpers -from tests.unittests.util import MockDistro - -DYNAMIC_METADATA = { - "instance-identity": { - "document": json.dumps( - { - "devpayProductCodes": None, - "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], - "availabilityZone": "us-west-2b", - "privateIp": "10.158.112.84", - "version": "2017-09-30", - "instanceId": "my-identity-id", - "billingProducts": None, - "instanceType": "t2.micro", - "accountId": "123456789012", - "imageId": "ami-5fb8c835", - "pendingTime": "2016-11-19T16:32:11Z", - "architecture": "x86_64", - "kernelId": None, - "ramdiskId": None, - "region": "us-west-2", - } - ) - } -} - - -# collected from api version 2016-09-02/ with -# python3 -c 'import json -# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm -# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))' -# Note that the MAC addresses have been modified to sort in the opposite order -# to the device-number attribute, to test LP: #1876312 -DEFAULT_METADATA = { - "ami-id": "ami-8b92b4ee", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "hostname": "ip-172-31-31-158.us-east-2.compute.internal", - "instance-action": "none", - "instance-id": "i-0a33f80f09c96477f", - "instance-type": "t2.small", - "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", - "local-ipv4": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "metrics": {"vhostmd": ''}, - "network": { - "interfaces": { - "macs": { - "06:17:04:d7:26:09": { - "device-number": "0", - "interface-id": "eni-e44ef49e", - "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, - "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", - "local-hostname": ( - "ip-172-3-3-15.us-east-2.compute.internal" - ), - "local-ipv4s": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "owner-id": "950047163771", - "public-hostname": ( - "ec2-13-59-77-202.us-east-2.compute.amazonaws.com" - ), - "public-ipv4s": "13.59.77.202", - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56", - }, - "06:17:04:d7:26:08": { - "device-number": "1", # Only IPv4 local config - "interface-id": "eni-e44ef49f", - "ipv4-associations": {"": "172.3.3.16"}, - "ipv6s": "", # No IPv6 config - "local-hostname": ( - "ip-172-3-3-16.us-east-2.compute.internal" - ), - "local-ipv4s": "172.3.3.16", - "mac": "06:17:04:d7:26:08", - "owner-id": "950047163771", - "public-hostname": ( - "ec2-172-3-3-16.us-east-2.compute.amazonaws.com" - ), - "public-ipv4s": "", # No public ipv4 config - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "", - }, - } - } - }, - "placement": {"availability-zone": "us-east-2b"}, - "profile": "default-hvm", - "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com", - "public-ipv4": "13.59.77.202", - "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, - "reservation-id": "r-01efbc9996bac1bd6", - "security-groups": "my-wide-open", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -# collected from api version 2018-09-24/ with -# python3 -c 'import json -# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm -# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))' - -NIC1_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "0", - "interface-id": "eni-0d6335689899ce9cc", - "ipv4-associations": {"18.218.219.181": "172.31.44.13"}, - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - "2600:1f16:292:100:f152:2222:3333:4444", - ], - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4s": ["172.31.44.13", "172.31.45.70"], - "mac": "0a:07:84:3d:6e:38", - "owner-id": "329910648901", - "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", - "public-ipv4s": "18.218.219.181", - "security-group-ids": "sg-0c387755222ba8d2e", - "security-groups": "launch-wizard-4", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56", -} - -NIC2_MD = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", -} - -NIC2_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - ], - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:292:100::/56", -} - -MULTI_NIC_V6_ONLY_MD = { - "macs": { - "02:6b:df:a2:4b:2b": { - "device-number": "1", - "interface-id": "eni-0669816d0cf606123", - "ipv6s": "2600:1f16:67f:f201:8d2e:4d1f:9e80:4ab9", - "local-hostname": "i-0951b6d0b66337123.us-east-2.compute.internal", - "mac": "02:6b:df:a2:4b:2b", - "owner-id": "483410185123", - "security-group-ids": "sg-0bf34e5c3cde1d123", - "security-groups": "default", - "subnet-id": "subnet-0903f279682c66123", - "subnet-ipv6-cidr-blocks": "2600:1f16:67f:f201:0:0:0:0/64", - "vpc-id": "vpc-0ac1befb8c824a123", - "vpc-ipv4-cidr-block": "192.168.0.0/20", - "vpc-ipv4-cidr-blocks": "192.168.0.0/20", - "vpc-ipv6-cidr-blocks": "2600:1f16:67f:f200:0:0:0:0/56", - }, - "02:7c:03:b8:5c:af": { - "device-number": "0", - "interface-id": "eni-0f3cddb84c16e1123", - "ipv6s": "2600:1f16:67f:f201:6613:29a2:dbf7:2f1f", - "local-hostname": "i-0951b6d0b66337123.us-east-2.compute.internal", - "mac": "02:7c:03:b8:5c:af", - "owner-id": "483410185123", - "security-group-ids": "sg-0bf34e5c3cde1d123", - "security-groups": "default", - "subnet-id": "subnet-0903f279682c66123", - "subnet-ipv6-cidr-blocks": "2600:1f16:67f:f201:0:0:0:0/64", - "vpc-id": "vpc-0ac1befb8c824a123", - "vpc-ipv4-cidr-block": "192.168.0.0/20", - "vpc-ipv4-cidr-blocks": "192.168.0.0/20", - "vpc-ipv6-cidr-blocks": "2600:1f16:67f:f200:0:0:0:0/56", - }, - } -} - -SECONDARY_IP_METADATA_2018_09_24 = { - "ami-id": "ami-0986c2ac728528ac2", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "events": {"maintenance": {"history": "[]", "scheduled": "[]"}}, - "hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "identity-credentials": { - "ec2": { - "info": { - "AccountId": "329910648901", - "Code": "Success", - "LastUpdated": "2019-07-06T14:22:56Z", - } - } - }, - "instance-action": "none", - "instance-id": "i-069e01e8cc43732f8", - "instance-type": "t2.micro", - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4": "172.31.44.13", - "mac": "0a:07:84:3d:6e:38", - "metrics": {"vhostmd": ''}, - "network": { - "interfaces": { - "macs": { - "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP, - } - } - }, - "placement": {"availability-zone": "us-east-2c"}, - "profile": "default-hvm", - "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", - "public-ipv4": "18.218.219.181", - "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]}, - "reservation-id": "r-09b4917135cdd33be", - "security-groups": "launch-wizard-4", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -M_PATH = "cloudinit.sources.DataSourceEc2." -M_PATH_NET = "cloudinit.sources.DataSourceEc2.net." - -TAGS_METADATA_2021_03_23: dict = { - **DEFAULT_METADATA, - "tags": { - "instance": { - "Environment": "production", - "Application": "test", - "TagWithoutValue": "", - } - }, -} - - -@pytest.fixture(autouse=True) -def disable_is_resolvable(): - with mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable"): - yield - - -def _register_ssh_keys(rfunc, base_url, keys_data): - """handle ssh key inconsistencies. - - public-keys in the ec2 metadata is inconsistently formated compared - to other entries. - Given keys_data of {name1: pubkey1, name2: pubkey2} - - This registers the following urls: - base_url 0={name1}\n1={name2} # (for each name) - base_url/ 0={name1}\n1={name2} # (for each name) - base_url/0 openssh-key - base_url/0/ openssh-key - base_url/0/openssh-key {pubkey1} - base_url/0/openssh-key/ {pubkey1} - ... - """ - - base_url = base_url.rstrip("/") - odd_index = "\n".join( - ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))] - ) - - rfunc(base_url, odd_index) - rfunc(base_url + "/", odd_index) - - for n, name in enumerate(sorted(keys_data)): - val = keys_data[name] - if isinstance(val, list): - val = "\n".join(val) - burl = base_url + "/%s" % n - rfunc(burl, "openssh-key") - rfunc(burl + "/", "openssh-key") - rfunc(burl + "/%s/openssh-key" % name, val) - rfunc(burl + "/%s/openssh-key/" % name, val) - - -def register_mock_metaserver(base_url, data, responses_mock=None): - """Register with responses a ec2 metadata like service serving 'data'. - - If given a dictionary, it will populate urls under base_url for - that dictionary. For example, input of - {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} - populates - base_url with 'instance-id\nmac' - base_url/ with 'instance-id\nmac' - base_url/instance-id with i-abc - base_url/mac with 00:16:3e:00:00:00 - In the index, references to lists or dictionaries have a trailing /. - """ - responses_mock = responses_mock or responses - - def register_helper(register, base_url, body): - if not isinstance(base_url, str): - register(base_url, body) - return - base_url = base_url.rstrip("/") - if isinstance(body, str): - register(base_url, body) - elif isinstance(body, list): - register(base_url, "\n".join(body) + "\n") - register(base_url + "/", "\n".join(body) + "\n") - elif isinstance(body, dict): - vals = [] - for k, v in body.items(): - if k == "public-keys": - _register_ssh_keys(register, base_url + "/public-keys/", v) - continue - suffix = k.rstrip("/") - if not isinstance(v, (str, list)): - suffix += "/" - vals.append(suffix) - url = base_url + "/" + suffix - register_helper(register, url, v) - register(base_url, "\n".join(vals) + "\n") - register(base_url + "/", "\n".join(vals) + "\n") - elif body is None: - register(base_url, "not found", status=404) - - def myreg(*argc, **kwargs): - url, body = argc - method = responses.PUT if "latest/api/token" in url else responses.GET - status = kwargs.get("status", 200) - return responses_mock.add(method, url, body, status=status) - - register_helper(myreg, base_url, data) - - -class TestEc2(test_helpers.ResponsesTestCase): - with_logs = True - maxDiff = None - - valid_platform_data = { - "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - "uuid_source": "dmi", - "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - } - - def setUp(self): - super(TestEc2, self).setUp() - self.datasource = ec2.DataSourceEc2 - self.metadata_addr = self.datasource.metadata_urls[0] - self.tmp = self.tmp_dir() - - def data_url(self, version, data_item="meta-data"): - """Return a metadata url based on the version provided.""" - return "/".join([self.metadata_addr, version, data_item]) - - def _patch_add_cleanup(self, mpath, *args, **kwargs): - p = mock.patch(mpath, *args, **kwargs) - p.start() - self.addCleanup(p.stop) - - def _setup_ds( - self, sys_cfg, platform_data, md, md_version=None, distro=None - ): - self.uris = [] - distro = distro or mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - paths = helpers.Paths({"run_dir": self.tmp}) - if sys_cfg is None: - sys_cfg = {} - ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) - event = threading.Event() - p = mock.patch("time.sleep", event.wait) - p.start() - - def _mock_sleep(): - event.set() - p.stop() - - self.addCleanup(_mock_sleep) - if not md_version: - md_version = ds.min_metadata_version - if platform_data is not None: - self._patch_add_cleanup( - "cloudinit.sources.DataSourceEc2._collect_platform_data", - return_value=platform_data, - ) - - if md: - all_versions = [ - ds.min_metadata_version - ] + ds.extended_metadata_versions - token_url = self.data_url("latest", data_item="api/token") - register_mock_metaserver(token_url, "API-TOKEN", self.responses) - for version in all_versions: - metadata_url = self.data_url(version) + "/" - if version == md_version: - # Register all metadata for desired version - register_mock_metaserver( - metadata_url, - md.get("md", DEFAULT_METADATA), - self.responses, - ) - userdata_url = self.data_url( - version, data_item="user-data" - ) - register_mock_metaserver( - userdata_url, md.get("ud", ""), self.responses - ) - identity_url = self.data_url( - version, data_item="dynamic/instance-identity" - ) - register_mock_metaserver( - identity_url, - md.get("id", DYNAMIC_METADATA), - self.responses, - ) - else: - instance_id_url = metadata_url + "instance-id" - if version == ds.min_metadata_version: - # Add min_metadata_version service availability check - register_mock_metaserver( - instance_id_url, - DEFAULT_METADATA["instance-id"], - self.responses, - ) - else: - # Register 404s for all unrequested extended versions - register_mock_metaserver( - instance_id_url, None, self.responses - ) - return ds - - def test_network_config_property_returns_version_2_network_data(self): - """network_config property returns network version 2 for metadata""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": "06:17:04:d7:26:09"}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_set_dhcp4(self): - """network_config property configures dhcp4 on nics with local-ipv4s. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1.lower()}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - dhcp_client = ds.distro.dhcp_client - dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_secondary_private_ips(self): - """network_config property configures any secondary ipv4 addresses. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": SECONDARY_IP_METADATA_2018_09_24}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6 - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1}, - "set-name": "eth9", - "addresses": [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - "dhcp4": True, - "dhcp6": True, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_is_cached_in_datasource(self): - """network_config property is cached in DataSourceEc2.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ds._network_config = {"cached": "data"} - self.assertEqual({"cached": "data"}, ds.network_config) - - @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): - """Refresh the network_config Ec2 cache if network key is absent. - - This catches an upgrade issue where obj.pkl contained stale metadata - which lacked newly required network key. - """ - old_metadata = copy.deepcopy(DEFAULT_METADATA) - old_metadata.pop("network") - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": old_metadata}, - ) - self.assertTrue(ds.get_data()) - - # Workaround https://github.com/getsentry/responses/issues/212 - if hasattr(self.responses, "_urls"): - # Can be removed when Bionic is EOL - for index, url in enumerate(self.responses._urls): - if url["url"].startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._urls[index] - elif hasattr(self.responses, "_matches"): - # Can be removed when Focal is EOL - for index, response in enumerate(self.responses._matches): - if response.url.startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._matches[index] - - # Provide new revision of metadata that contains network data - register_mock_metaserver( - "http://169.254.169.254/2009-04-04/meta-data/", - DEFAULT_METADATA, - self.responses, - ) - mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA - get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac" - ds.distro.fallback_nic = "eth9" - with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - nc = ds.network_config # Will re-crawl network metadata - self.assertIsNotNone(nc) - self.assertIn( - "Refreshing stale metadata from prior to upgrade", - self.logs.getvalue(), - ) - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - self.assertEqual(expected, ds.network_config) - - def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): - """get_instance-id gets DataSourceEc2Local.identity if not present. - - This handles an upgrade case where the old pickled datasource didn't - set up self.identity, but 'systemctl cloud-init init' runs - get_instance_id which traces on missing self.identity. lp:1748354. - """ - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - # Mock 404s on all versions except latest - all_versions = [ - ds.min_metadata_version - ] + ds.extended_metadata_versions - for ver in all_versions[:-1]: - register_mock_metaserver( - "http://[fd00:ec2::254]/{0}/meta-data/instance-id".format(ver), - None, - self.responses, - ) - - ds.metadata_address = "http://[fd00:ec2::254]" - register_mock_metaserver( - "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]), - DEFAULT_METADATA, - self.responses, - ) - # Register dynamic/instance-identity document which we now read. - register_mock_metaserver( - "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]), - DYNAMIC_METADATA, - self.responses, - ) - ds._cloud_name = ec2.CloudNames.AWS - # Setup cached metadata on the Datasource - ds.metadata = DEFAULT_METADATA - self.assertEqual("my-identity-id", ds.get_instance_id()) - - def test_classic_instance_true(self): - """If no vpc-id in metadata, is_classic_instance must return true.""" - md_copy = copy.deepcopy(DEFAULT_METADATA) - ifaces_md = md_copy.get("network", {}).get("interfaces", {}) - for _mac, mac_data in ifaces_md.get("macs", {}).items(): - if "vpc-id" in mac_data: - del mac_data["vpc-id"] - - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": md_copy}, - ) - self.assertTrue(ds.get_data()) - self.assertTrue(ds.is_classic_instance()) - - def test_classic_instance_false(self): - """If vpc-id in metadata, is_classic_instance must return false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - self.assertTrue(ds.get_data()) - self.assertFalse(ds.is_classic_instance()) - - def test_aws_inaccessible_imds_service_fails_with_retries(self): - """Inaccessibility of http://169.254.169.254 are retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md=None, - ) - - conn_error = requests.exceptions.ConnectionError( - "[Errno 113] no route to host" - ) - - mock_success = mock.MagicMock(contents=b"fakesuccess") - mock_success.ok.return_value = True - - with mock.patch("cloudinit.url_helper.readurl") as m_readurl: - # yikes, this endpoint needs help - m_readurl.side_effect = ( - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - mock_success, - ) - with mock.patch("cloudinit.url_helper.time.sleep"): - self.assertTrue(ds.wait_for_metadata_service()) - - # Just one /latest/api/token request - self.assertEqual(19, len(m_readurl.call_args_list)) - for readurl_call in m_readurl.call_args_list: - self.assertIn("latest/api/token", readurl_call[0][0]) - - def test_aws_token_403_fails_without_retries(self): - """Verify that 403s fetching AWS tokens are not retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md=None, - ) - - token_url = self.data_url("latest", data_item="api/token") - self.responses.add(responses.PUT, token_url, status=403) - self.assertFalse(ds.get_data()) - # Just one /latest/api/token request - logs = self.logs.getvalue() - expected_logs = [ - "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is" - " disabled. Aborting.", - "WARNING: IMDS's HTTP endpoint is probably disabled", - ] - for log in expected_logs: - self.assertIn(log, logs) - - def test_aws_token_redacted(self): - """Verify that aws tokens are redacted when logged.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - self.assertTrue(ds.get_data()) - all_logs = self.logs.getvalue().splitlines() - REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" - REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" - logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] - logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] - logs_with_token = [log for log in all_logs if "API-TOKEN" in log] - self.assertEqual(1, len(logs_with_redacted_ttl)) - self.assertEqual(83, len(logs_with_redacted)) - self.assertEqual(0, len(logs_with_token)) - - @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_valid_platform_with_strict_true(self, m_dhcp): - """Valid platform data should return true with strict_id true.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual("aws", ds.cloud_name) - self.assertEqual("ec2", ds.platform_type) - self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform) - - def test_valid_platform_with_strict_false(self): - """Valid platform data should return true with strict_id false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - - def test_unknown_platform_with_strict_true(self): - """Unknown platform data with strict_id true should return False.""" - uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" - ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertFalse(ret) - - def test_unknown_platform_with_strict_false(self): - """Unknown platform data with strict_id false should return True.""" - uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" - ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - - def test_ec2_local_returns_false_on_non_aws(self): - """DataSourceEc2Local returns False when platform is not AWS.""" - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - platform_attrs = [ - attr - for attr in ec2.CloudNames.__dict__.keys() - if not attr.startswith("__") - ] - for attr_name in platform_attrs: - platform_name = getattr(ec2.CloudNames, attr_name) - if platform_name not in ["aws", "outscale"]: - ds._cloud_name = platform_name - ret = ds.get_data() - self.assertEqual("ec2", ds.platform_type) - self.assertFalse(ret) - message = ( - "Local Ec2 mode only supported on ('aws', 'outscale')," - " not {0}".format(platform_name) - ) - self.assertIn(message, self.logs.getvalue()) - - @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): - """DataSourceEc2Local returns False on BSD. - - FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. - """ - m_is_freebsd.return_value = True - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertFalse(ret) - self.assertIn( - "FreeBSD doesn't support running dhclient with -sf", - self.logs.getvalue(), - ) - - @mock.patch("cloudinit.net.ephemeral.EphemeralIPv6Network") - @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") - @mock.patch("cloudinit.distros.net.find_fallback_nic") - @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") - @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_performs_dhcp_on_non_bsd( - self, m_is_bsd, m_dhcp, m_fallback_nic, m_net4, m_net6 - ): - """Ec2Local returns True for valid platform data on non-BSD with dhcp. - - DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. - Then the metadata services is crawled for more network config info. - When the platform data is valid, return True. - """ - - m_fallback_nic.return_value = "eth9" - m_is_bsd.return_value = False - m_dhcp.return_value = { - "interface": "eth9", - "fixed-address": "192.168.2.9", - "routers": "192.168.2.1", - "subnet-mask": "255.255.255.0", - "broadcast-address": "192.168.2.255", - } - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - distro=MockDistro("", {}, {}), - ) - - ret = ds.get_data() - self.assertTrue(ret) - m_dhcp.assert_called_once_with(ds.distro, "eth9", None) - m_net4.assert_called_once_with( - ds.distro, - broadcast="192.168.2.255", - interface="eth9", - ip="192.168.2.9", - prefix_or_mask="255.255.255.0", - router="192.168.2.1", - static_routes=None, - ) - self.assertIn("Crawl of metadata service ", self.logs.getvalue()) - - def test_get_instance_tags(self): - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": TAGS_METADATA_2021_03_23}, - ) - self.assertTrue(ds.get_data()) - self.assertIn("tags", ds.metadata) - self.assertIn("instance", ds.metadata["tags"]) - instance_tags = ds.metadata["tags"]["instance"] - self.assertEqual(instance_tags["Application"], "test") - self.assertEqual(instance_tags["Environment"], "production") - - -class TestGetSecondaryAddresses(test_helpers.CiTestCase): - mac = "06:17:04:d7:26:ff" - with_logs = True - - def test_md_with_no_secondary_addresses(self): - """Empty list is returned when nic metadata contains no secondary ip""" - self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) - - def test_md_with_secondary_v4_and_v6_addresses(self): - """All secondary addresses are returned from nic metadata""" - self.assertEqual( - [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac), - ) - - def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): - """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" - invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) - invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected" - invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is" - self.assertEqual( - [ - "172.31.45.70/24", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(invalid_cidr_md, self.mac), - ) - expected_logs = [ - "WARNING: Could not parse subnet-ipv4-cidr-block" - " something-unexpected for mac 06:17:04:d7:26:ff." - " ipv4 network config prefix defaults to /24", - "WARNING: Could not parse subnet-ipv6-cidr-block" - " not/sure/what/this/is for mac 06:17:04:d7:26:ff." - " ipv6 network config prefix defaults to /128", - ] - logs = self.logs.getvalue() - for log in expected_logs: - self.assertIn(log, logs) - - -class TestBuildNicOrder: - @pytest.mark.parametrize( - ["macs_metadata", "macs", "expected"], - [ - pytest.param({}, [], {}, id="all_empty"), - pytest.param( - {}, ["0a:f7:8d:96:f2:a1"], {}, id="empty_macs_metadata" - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - } - }, - [], - {}, - id="empty_macs", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:f7:8d:96:f2:a1": 0, "0a:0d:dd:44:cd:7b": 1}, - id="no-device-number-info", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1"], - {"0a:f7:8d:96:f2:a1": 0}, - id="no-device-number-info-subset", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1}, - id="device-numbers", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "network-card": "0", - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "network-card": "1", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - "0a:f7:8d:96:f2:a2": { - "network-card": "2", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], - { - "0a:0d:dd:44:cd:7b": 0, - "0a:f7:8d:96:f2:a1": 1, - "0a:f7:8d:96:f2:a2": 2, - }, - id="network-cardes", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "network-card": "0", - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "network-card": "1", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - "0a:f7:8d:96:f2:a2": { - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], - { - "0a:0d:dd:44:cd:7b": 0, - "0a:f7:8d:96:f2:a1": 1, - "0a:f7:8d:96:f2:a2": 2, - }, - id="network-card-partially-missing", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a9"], - {}, - id="macs-not-in-md", - ), - ], - ) - def test_build_nic_order(self, macs_metadata, macs, expected): - assert expected == ec2._build_nic_order(macs_metadata, macs) - - -class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): - def setUp(self): - super(TestConvertEc2MetadataNetworkConfig, self).setUp() - self.mac1 = "06:17:04:d7:26:09" - interface_dict = copy.deepcopy( - DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1] - ) - # These tests are written assuming the base interface doesn't have IPv6 - interface_dict.pop("ipv6s") - self.network_metadata = { - "interfaces": {"macs": {self.mac1: interface_dict}} - } - - def test_convert_ec2_metadata_network_config_skips_absent_macs(self): - """Any mac absent from metadata is skipped by network config.""" - macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"} - - # DE:AD:BE:EF:FF:FF represented by OS but not in metadata - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): - """Config dhcp6 when ipv6s is in metadata for a mac.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): - """Config dhcp4 when there are no public addresses in public-ipv4s.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["local-ipv4s"] = "172.3.3.15" - nic1_metadata.pop("public-ipv4s") - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): - """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["public-ipv4s"] = "" - - # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): - """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. - Source-routing configured for secondary NICs (routing-policy and extra - routing table).""" - mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth10": { - "match": {"macaddress": mac2}, - "set-name": "eth10", - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": False, - "routes": [ - # via DHCP gateway - {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, - # to NIC2_MD["subnet-ipv4-cidr-block"] - {"to": "172.31.32.0/20", "table": 101}, - ], - "routing-policy": [ - # NIC2_MD["local-ipv4s"] - {"from": "172.31.47.221", "table": 101} - ], - }, - }, - } - distro = mock.Mock() - distro.network_activator = activators.NetplanActivator - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( - self, - ): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. - Source-routing configured for secondary NICs (routing-policy and extra - routing table).""" - mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both["interfaces"]["macs"][ - mac2 - ] = NIC2_MD_IPV4_IPV6_MULTI_IP - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "match": {"macaddress": "06:17:04:d7:26:09"}, - "set-name": "eth9", - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth10": { - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": True, - "match": {"macaddress": "06:17:04:d7:26:08"}, - "set-name": "eth10", - "routes": [ - # via DHCP gateway - {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, - # to NIC2_MD["subnet-ipv4-cidr-block"] - {"to": "172.31.32.0/20", "table": 101}, - # to NIC2_MD["subnet-ipv6-cidr-blocks"] - {"to": "2600:1f16:292:100::/64", "table": 101}, - ], - "routing-policy": [ - # NIC2_MD["local-ipv4s"] - {"from": "172.31.47.221", "table": 101}, - { - "from": "2600:1f16:292:100:c187:593c:4349:136", - "table": 101, - }, - { - "from": "2600:1f16:292:100:f153:12a3:c37c:11f9", - "table": 101, - }, - ], - "dhcp6-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "addresses": ["2600:1f16:292:100:f153:12a3:c37c:11f9/128"], - }, - }, - } - distro = mock.Mock() - distro.network_activator = activators.NetplanActivator - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv6_only(self): - """Like above, but only ipv6s are present in metadata.""" - macs_to_nics = { - "02:7c:03:b8:5c:af": "eth0", - "02:6b:df:a2:4b:2b": "eth1", - } - mac_data = copy.deepcopy(MULTI_NIC_V6_ONLY_MD) - network_metadata = {"interfaces": mac_data} - expected = { - "version": 2, - "ethernets": { - "eth0": { - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "match": {"macaddress": "02:7c:03:b8:5c:af"}, - "set-name": "eth0", - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth1": { - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": True, - "match": {"macaddress": "02:6b:df:a2:4b:2b"}, - "set-name": "eth1", - "routes": [ - {"to": "2600:1f16:67f:f201:0:0:0:0/64", "table": 101}, - ], - "routing-policy": [ - { - "from": "2600:1f16:67f:f201:8d2e:4d1f:9e80:4ab9", - "table": 101, - }, - ], - "dhcp6-overrides": { - "route-metric": 200, - "use-routes": True, - }, - }, - }, - } - distro = mock.Mock() - distro.network_activator = activators.NetplanActivator - assert expected == ec2.convert_ec2_metadata_network_config( - network_metadata, distro, macs_to_nics - ) - distro.dhcp_client.dhcp_discovery.assert_not_called() - - def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): - """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): - """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - distro = mock.Mock() - with mock.patch(patch_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro - ), - ) - - -class TesIdentifyPlatform(test_helpers.CiTestCase): - def collmock(self, **kwargs): - """return non-special _collect_platform_data updated with changes.""" - unspecial = { - "asset_tag": "3857-0037-2746-7462-1818-3997-77", - "serial": "H23-C4J3JV-R6", - "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2", - "uuid_source": "dmi", - "vendor": "tothecloud", - "product_name": "cloudproduct", - } - unspecial.update(**kwargs) - return unspecial - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_aliyun(self, m_collect): - """aliyun should be identified if product name equals to - Alibaba Cloud ECS - """ - m_collect.return_value = self.collmock( - product_name="Alibaba Cloud ECS" - ) - self.assertEqual(ec2.CloudNames.ALIYUN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_zstack(self, m_collect): - """zstack should be identified if chassis-asset-tag - ends in .zstack.io - """ - m_collect.return_value = self.collmock(asset_tag="123456.zstack.io") - self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_zstack_full_domain_only(self, m_collect): - """zstack asset-tag matching should match only on - full domain boundary. - """ - m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_e24cloud(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor="e24cloud") - self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_e24cloud_negative(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor="e24cloudyday") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - # Outscale - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_outscale(self, m_collect): - """Should return true if the dmi product data has expected value.""" - m_collect.return_value = self.collmock( - vendor="3DS Outscale".lower(), - product_name="3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.OUTSCALE, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_false_on_wrong_sys_vendor(self, m_collect): - """Should return false on empty value returned.""" - m_collect.return_value = self.collmock( - vendor="Not 3DS Outscale".lower(), - product_name="3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_false_on_wrong_product_name(self, m_collect): - """Should return false on an unrelated string.""" - m_collect.return_value = self.collmock( - vendor="3DS Outscale".lower(), - product_name="Not 3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) diff --git a/.pc/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361/cloudinit/sources/DataSourceEc2.py b/.pc/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361/cloudinit/sources/DataSourceEc2.py deleted file mode 100644 index 77c2d98b2..000000000 --- a/.pc/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361/cloudinit/sources/DataSourceEc2.py +++ /dev/null @@ -1,1214 +0,0 @@ -# Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Juerg Hafliger -# Author: Joshua Harlow -# -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import logging -import os -import time -from typing import Dict, List - -from cloudinit import dmi, net, sources -from cloudinit import url_helper as uhelp -from cloudinit import util, warnings -from cloudinit.distros import Distro -from cloudinit.event import EventScope, EventType -from cloudinit.net import netplan -from cloudinit.net.dhcp import NoDHCPLeaseError -from cloudinit.net.ephemeral import EphemeralIPNetwork -from cloudinit.sources.helpers import ec2 - -LOG = logging.getLogger(__name__) - -SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) - -STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") -STRICT_ID_DEFAULT = "warn" - - -class CloudNames: - ALIYUN = "aliyun" - AWS = "aws" - BRIGHTBOX = "brightbox" - ZSTACK = "zstack" - E24CLOUD = "e24cloud" - OUTSCALE = "outscale" - # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', - # then an attempt at the Ec2 Metadata service will be made. - UNKNOWN = "unknown" - # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata - # service available. No attempt at the Ec2 Metadata service will be made. - NO_EC2_METADATA = "no-ec2-metadata" - - -# Drop when LP: #1988157 tag handling is fixed -def skip_404_tag_errors(exception): - return exception.code == 404 and "meta-data/tags/" in exception.url - - -# Cloud platforms that support IMDSv2 style metadata server -IDMSV2_SUPPORTED_CLOUD_PLATFORMS = [CloudNames.AWS, CloudNames.ALIYUN] - -# Only trigger hook-hotplug on NICs with Ec2 drivers. Avoid triggering -# it on docker virtual NICs and the like. LP: #1946003 -_EXTRA_HOTPLUG_UDEV_RULES = """ -ENV{ID_NET_DRIVER}=="vif|ena|ixgbevf", GOTO="cloudinit_hook" -GOTO="cloudinit_end" -""" - - -class DataSourceEc2(sources.DataSource): - dsname = "Ec2" - # Default metadata urls that will be used if none are provided - # They will be checked for 'resolveability' and some of the - # following may be discarded if they do not resolve - metadata_urls = [ - "http://169.254.169.254", - "http://[fd00:ec2::254]", - "http://instance-data.:8773", - ] - - # The minimum supported metadata_version from the ec2 metadata apis - min_metadata_version = "2009-04-04" - - # Priority ordered list of additional metadata versions which will be tried - # for extended metadata content. IPv6 support comes in 2016-09-02. - # Tags support comes in 2021-03-23. - extended_metadata_versions: List[str] = [ - "2021-03-23", - "2018-09-24", - "2016-09-02", - ] - - # Setup read_url parameters per get_url_params. - url_max_wait = 120 - url_timeout = 50 - - _api_token = None # API token for accessing the metadata service - _network_config = sources.UNSET # Used to cache calculated network cfg v1 - - # Whether we want to get network configuration from the metadata service. - perform_dhcp_setup = False - - supported_update_events = { - EventScope.NETWORK: { - EventType.BOOT_NEW_INSTANCE, - EventType.BOOT, - EventType.BOOT_LEGACY, - EventType.HOTPLUG, - } - } - - extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES - - def __init__(self, sys_cfg, distro, paths): - super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) - self.metadata_address = None - - def _unpickle(self, ci_pkl_version: int) -> None: - super()._unpickle(ci_pkl_version) - self.extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES - - def _get_cloud_name(self): - """Return the cloud name as identified during _get_data.""" - return identify_platform() - - def _get_data(self): - strict_mode, _sleep = read_strict_mode( - util.get_cfg_by_path( - self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT - ), - ("warn", None), - ) - - LOG.debug( - "strict_mode: %s, cloud_name=%s cloud_platform=%s", - strict_mode, - self.cloud_name, - self.platform, - ) - if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN: - return False - elif self.cloud_name == CloudNames.NO_EC2_METADATA: - return False - - if self.perform_dhcp_setup: # Setup networking in init-local stage. - if util.is_FreeBSD(): - LOG.debug("FreeBSD doesn't support running dhclient with -sf") - return False - try: - with EphemeralIPNetwork( - self.distro, - self.distro.fallback_interface, - ipv4=True, - ipv6=True, - ) as netw: - state_msg = f" {netw.state_msg}" if netw.state_msg else "" - self._crawled_metadata = util.log_time( - logfunc=LOG.debug, - msg=f"Crawl of metadata service{state_msg}", - func=self.crawl_metadata, - ) - - except NoDHCPLeaseError: - return False - else: - self._crawled_metadata = util.log_time( - logfunc=LOG.debug, - msg="Crawl of metadata service", - func=self.crawl_metadata, - ) - if not self._crawled_metadata: - return False - self.metadata = self._crawled_metadata.get("meta-data", None) - self.userdata_raw = self._crawled_metadata.get("user-data", None) - self.identity = ( - self._crawled_metadata.get("dynamic", {}) - .get("instance-identity", {}) - .get("document", {}) - ) - return True - - def is_classic_instance(self): - """Report if this instance type is Ec2 Classic (non-vpc).""" - if not self.metadata: - # Can return False on inconclusive as we are also called in - # network_config where metadata will be present. - # Secondary call site is in packaging postinst script. - return False - ifaces_md = self.metadata.get("network", {}).get("interfaces", {}) - for _mac, mac_data in ifaces_md.get("macs", {}).items(): - if "vpc-id" in mac_data: - return False - return True - - @property - def launch_index(self): - if not self.metadata: - return None - return self.metadata.get("ami-launch-index") - - @property - def platform(self): - # Handle upgrade path of pickled ds - if not hasattr(self, "_platform_type"): - self._platform_type = DataSourceEc2.dsname.lower() - if not self._platform_type: - self._platform_type = DataSourceEc2.dsname.lower() - return self._platform_type - - # IMDSv2 related parameters from the ec2 metadata api document - @property - def api_token_route(self): - return "latest/api/token" - - @property - def imdsv2_token_ttl_seconds(self): - return "21600" - - @property - def imdsv2_token_put_header(self): - return "X-aws-ec2-metadata-token" - - @property - def imdsv2_token_req_header(self): - return self.imdsv2_token_put_header + "-ttl-seconds" - - @property - def imdsv2_token_redact(self): - return [self.imdsv2_token_put_header, self.imdsv2_token_req_header] - - def get_metadata_api_version(self): - """Get the best supported api version from the metadata service. - - Loop through all extended support metadata versions in order and - return the most-fully featured metadata api version discovered. - - If extended_metadata_versions aren't present, return the datasource's - min_metadata_version. - """ - # Assumes metadata service is already up - url_tmpl = "{0}/{1}/meta-data/instance-id" - headers = self._get_headers() - for api_ver in self.extended_metadata_versions: - url = url_tmpl.format(self.metadata_address, api_ver) - try: - resp = uhelp.readurl( - url=url, - headers=headers, - headers_redact=self.imdsv2_token_redact, - ) - except uhelp.UrlError as e: - LOG.debug("url %s raised exception %s", url, e) - else: - if resp.code == 200: - LOG.debug("Found preferred metadata version %s", api_ver) - return api_ver - elif resp.code == 404: - msg = "Metadata api version %s not present. Headers: %s" - LOG.debug(msg, api_ver, resp.headers) - return self.min_metadata_version - - def get_instance_id(self): - if self.cloud_name == CloudNames.AWS: - # Prefer the ID from the instance identity document, but fall back - if not getattr(self, "identity", None): - # If re-using cached datasource, it's get_data run didn't - # setup self.identity. So we need to do that now. - api_version = self.get_metadata_api_version() - self.identity = ec2.get_instance_identity( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=self.imdsv2_token_redact, - exception_cb=self._refresh_stale_aws_token_cb, - ).get("document", {}) - return self.identity.get( - "instanceId", self.metadata["instance-id"] - ) - else: - return self.metadata["instance-id"] - - def _maybe_fetch_api_token(self, mdurls): - """Get an API token for EC2 Instance Metadata Service. - - On EC2. IMDS will always answer an API token, unless - the instance owner has disabled the IMDS HTTP endpoint or - the network topology conflicts with the configured hop-limit. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return - - urls = [] - url2base = {} - url_path = self.api_token_route - request_method = "PUT" - for url in mdurls: - cur = "{0}/{1}".format(url, url_path) - urls.append(cur) - url2base[cur] = url - - # use the self._imds_exception_cb to check for Read errors - LOG.debug("Fetching Ec2 IMDSv2 API Token") - - response = None - url = None - url_params = self.get_url_params() - try: - url, response = uhelp.wait_for_url( - urls=urls, - max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, - status_cb=LOG.warning, - headers_cb=self._get_headers, - exception_cb=self._imds_exception_cb, - request_method=request_method, - headers_redact=self.imdsv2_token_redact, - connect_synchronously=False, - ) - except uhelp.UrlError: - # We use the raised exception to interrupt the retry loop. - # Nothing else to do here. - pass - - if url and response: - self._api_token = response - return url2base[url] - - # If we get here, then wait_for_url timed out, waiting for IMDS - # or the IMDS HTTP endpoint is disabled - return None - - def wait_for_metadata_service(self): - mcfg = self.ds_cfg - - url_params = self.get_url_params() - if url_params.max_wait_seconds <= 0: - return False - - # Remove addresses from the list that wont resolve. - mdurls = mcfg.get("metadata_urls", self.metadata_urls) - filtered = [x for x in mdurls if util.is_resolvable_url(x)] - - if set(filtered) != set(mdurls): - LOG.debug( - "Removed the following from metadata urls: %s", - list((set(mdurls) - set(filtered))), - ) - - if len(filtered): - mdurls = filtered - else: - LOG.warning("Empty metadata url list! using default list") - mdurls = self.metadata_urls - - # try the api token path first - metadata_address = self._maybe_fetch_api_token(mdurls) - # When running on EC2, we always access IMDS with an API token. - # If we could not get an API token, then we assume the IMDS - # endpoint was disabled and we move on without a data source. - # Fallback to IMDSv1 if not running on EC2 - if ( - not metadata_address - and self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS - ): - # if we can't get a token, use instance-id path - urls = [] - url2base = {} - url_path = "{ver}/meta-data/instance-id".format( - ver=self.min_metadata_version - ) - request_method = "GET" - for url in mdurls: - cur = "{0}/{1}".format(url, url_path) - urls.append(cur) - url2base[cur] = url - - start_time = time.time() - url, _ = uhelp.wait_for_url( - urls=urls, - max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, - status_cb=LOG.warning, - headers_redact=self.imdsv2_token_redact, - headers_cb=self._get_headers, - request_method=request_method, - ) - - if url: - metadata_address = url2base[url] - - if metadata_address: - self.metadata_address = metadata_address - LOG.debug("Using metadata source: '%s'", self.metadata_address) - elif self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - LOG.warning("IMDS's HTTP endpoint is probably disabled") - else: - LOG.critical( - "Giving up on md from %s after %s seconds", - urls, - int(time.time() - start_time), - ) - - return bool(metadata_address) - - def device_name_to_device(self, name): - # Consult metadata service, that has - # ephemeral0: sdb - # and return 'sdb' for input 'ephemeral0' - if "block-device-mapping" not in self.metadata: - return None - - # Example: - # 'block-device-mapping': - # {'ami': '/dev/sda1', - # 'ephemeral0': '/dev/sdb', - # 'root': '/dev/sda1'} - found = None - bdm = self.metadata["block-device-mapping"] - if not isinstance(bdm, dict): - LOG.debug("block-device-mapping not a dictionary: '%s'", bdm) - return None - - for entname, device in bdm.items(): - if entname == name: - found = device - break - # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0' - if entname == "ephemeral" and name == "ephemeral0": - found = device - - if found is None: - LOG.debug("Unable to convert %s to a device", name) - return None - - ofound = found - if not found.startswith("/"): - found = "/dev/%s" % found - - if os.path.exists(found): - return found - - remapped = self._remap_device(os.path.basename(found)) - if remapped: - LOG.debug("Remapped device name %s => %s", found, remapped) - return remapped - - # On t1.micro, ephemeral0 will appear in block-device-mapping from - # metadata, but it will not exist on disk (and never will) - # at this point, we've verified that the path did not exist - # in the special case of 'ephemeral0' return None to avoid bogus - # fstab entry (LP: #744019) - if name == "ephemeral0": - return None - return ofound - - @property - def availability_zone(self): - try: - if self.cloud_name == CloudNames.AWS: - return self.identity.get( - "availabilityZone", - self.metadata["placement"]["availability-zone"], - ) - else: - return self.metadata["placement"]["availability-zone"] - except KeyError: - return None - - @property - def region(self): - if self.cloud_name == CloudNames.AWS: - region = self.identity.get("region") - # Fallback to trimming the availability zone if region is missing - if self.availability_zone and not region: - region = self.availability_zone[:-1] - return region - else: - az = self.availability_zone - if az is not None: - return az[:-1] - return None - - def activate(self, cfg, is_new_instance): - if not is_new_instance: - return - if self.cloud_name == CloudNames.UNKNOWN: - warn_if_necessary( - util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), - cfg, - ) - - @property - def network_config(self): - """Return a network config dict for rendering ENI or netplan files.""" - if self._network_config != sources.UNSET: - return self._network_config - - if self.metadata is None: - # this would happen if get_data hadn't been called. leave as UNSET - LOG.warning( - "Unexpected call to network_config when metadata is None." - ) - return None - - result = None - no_network_metadata_on_aws = bool( - "network" not in self.metadata - and self.cloud_name == CloudNames.AWS - ) - if no_network_metadata_on_aws: - LOG.debug( - "Metadata 'network' not present:" - " Refreshing stale metadata from prior to upgrade." - ) - util.log_time( - logfunc=LOG.debug, - msg="Re-crawl of metadata service", - func=self.get_data, - ) - - iface = self.distro.fallback_interface - net_md = self.metadata.get("network") - if isinstance(net_md, dict): - # SRU_BLOCKER: xenial, bionic and eoan should default - # apply_full_imds_network_config to False to retain original - # behavior on those releases. - result = convert_ec2_metadata_network_config( - net_md, - self.distro, - fallback_nic=iface, - full_network_config=util.get_cfg_option_bool( - self.ds_cfg, "apply_full_imds_network_config", True - ), - ) - - # Non-VPC (aka Classic) Ec2 instances need to rewrite the - # network config file every boot due to MAC address change. - if self.is_classic_instance(): - self.default_update_events = copy.deepcopy( - self.default_update_events - ) - self.default_update_events[EventScope.NETWORK].add( - EventType.BOOT - ) - self.default_update_events[EventScope.NETWORK].add( - EventType.BOOT_LEGACY - ) - else: - LOG.warning("Metadata 'network' key not valid: %s.", net_md) - self._network_config = result - - return self._network_config - - def crawl_metadata(self): - """Crawl metadata service when available. - - @returns: Dictionary of crawled metadata content containing the keys: - meta-data, user-data and dynamic. - """ - if not self.wait_for_metadata_service(): - return {} - api_version = self.get_metadata_api_version() - redact = self.imdsv2_token_redact - crawled_metadata = {} - if self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - exc_cb = self._refresh_stale_aws_token_cb - exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb - skip_cb = None - elif self.cloud_name == CloudNames.OUTSCALE: - exc_cb = exc_cb_ud = None - skip_cb = skip_404_tag_errors - else: - exc_cb = exc_cb_ud = skip_cb = None - try: - raw_userdata = ec2.get_instance_userdata( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb_ud, - ) - crawled_metadata["user-data"] = util.maybe_b64decode(raw_userdata) - crawled_metadata["meta-data"] = ec2.get_instance_metadata( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb, - retrieval_exception_ignore_cb=skip_cb, - ) - if self.cloud_name == CloudNames.AWS: - identity = ec2.get_instance_identity( - api_version, - self.metadata_address, - headers_cb=self._get_headers, - headers_redact=redact, - exception_cb=exc_cb, - ) - crawled_metadata["dynamic"] = {"instance-identity": identity} - except Exception: - util.logexc( - LOG, - "Failed reading from metadata address %s", - self.metadata_address, - ) - return {} - crawled_metadata["_metadata_api_version"] = api_version - return crawled_metadata - - def _refresh_api_token(self, seconds=None): - """Request new metadata API token. - @param seconds: The lifetime of the token in seconds - - @return: The API token or None if unavailable. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return None - - if seconds is None: - seconds = self.imdsv2_token_ttl_seconds - - LOG.debug("Refreshing Ec2 metadata API token") - request_header = {self.imdsv2_token_req_header: seconds} - token_url = "{}/{}".format(self.metadata_address, self.api_token_route) - try: - response = uhelp.readurl( - token_url, - headers=request_header, - headers_redact=self.imdsv2_token_redact, - request_method="PUT", - ) - except uhelp.UrlError as e: - LOG.warning( - "Unable to get API token: %s raised exception %s", token_url, e - ) - return None - return response.contents - - def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): - """Callback will not retry on SKIP_USERDATA_CODES or if no token - is available.""" - retry = ec2.skip_retry_on_codes( - ec2.SKIP_USERDATA_CODES, msg, exception - ) - if not retry: - return False # False raises exception - return self._refresh_stale_aws_token_cb(msg, exception) - - def _refresh_stale_aws_token_cb(self, msg, exception): - """Exception handler for Ec2 to refresh token if token is stale.""" - if isinstance(exception, uhelp.UrlError) and exception.code == 401: - # With _api_token as None, _get_headers will _refresh_api_token. - LOG.debug("Clearing cached Ec2 API token due to expiry") - self._api_token = None - return True # always retry - - def _imds_exception_cb(self, msg, exception=None): - """Fail quickly on proper AWS if IMDSv2 rejects API token request - - Guidance from Amazon is that if IMDSv2 had disabled token requests - by returning a 403, or cloud-init malformed requests resulting in - other 40X errors, we want the datasource detection to fail quickly - without retries as those symptoms will likely not be resolved by - retries. - - Exceptions such as requests.ConnectionError due to IMDS being - temporarily unroutable or unavailable will still retry due to the - callsite wait_for_url. - """ - if isinstance(exception, uhelp.UrlError): - # requests.ConnectionError will have exception.code == None - if exception.code and exception.code >= 400: - if exception.code == 403: - LOG.warning( - "Ec2 IMDS endpoint returned a 403 error. " - "HTTP endpoint is disabled. Aborting." - ) - else: - LOG.warning( - "Fatal error while requesting Ec2 IMDSv2 API tokens" - ) - raise exception - - def _get_headers(self, url=""): - """Return a dict of headers for accessing a url. - - If _api_token is unset on AWS, attempt to refresh the token via a PUT - and then return the updated token header. - """ - if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS: - return {} - # Request a 6 hour token if URL is api_token_route - request_token_header = { - self.imdsv2_token_req_header: self.imdsv2_token_ttl_seconds - } - if self.api_token_route in url: - return request_token_header - if not self._api_token: - # If we don't yet have an API token, get one via a PUT against - # api_token_route. This _api_token may get unset by a 403 due - # to an invalid or expired token - self._api_token = self._refresh_api_token() - if not self._api_token: - return {} - return {self.imdsv2_token_put_header: self._api_token} - - -class DataSourceEc2Local(DataSourceEc2): - """Datasource run at init-local which sets up network to query metadata. - - In init-local, no network is available. This subclass sets up minimal - networking with dhclient on a viable nic so that it can talk to the - metadata service. If the metadata service provides network configuration - then render the network configuration for that instance based on metadata. - """ - - perform_dhcp_setup = True # Use dhcp before querying metadata - - def get_data(self): - supported_platforms = (CloudNames.AWS, CloudNames.OUTSCALE) - if self.cloud_name not in supported_platforms: - LOG.debug( - "Local Ec2 mode only supported on %s, not %s", - supported_platforms, - self.cloud_name, - ) - return False - return super(DataSourceEc2Local, self).get_data() - - -def read_strict_mode(cfgval, default): - try: - return parse_strict_mode(cfgval) - except ValueError as e: - LOG.warning(e) - return default - - -def parse_strict_mode(cfgval): - # given a mode like: - # true, false, warn,[sleep] - # return tuple with string mode (true|false|warn) and sleep. - if cfgval is True: - return "true", None - if cfgval is False: - return "false", None - - if not cfgval: - return "warn", 0 - - mode, _, sleep = cfgval.partition(",") - if mode not in ("true", "false", "warn"): - raise ValueError( - "Invalid mode '%s' in strict_id setting '%s': " - "Expected one of 'true', 'false', 'warn'." % (mode, cfgval) - ) - - if sleep: - try: - sleep = int(sleep) - except ValueError as e: - raise ValueError( - "Invalid sleep '%s' in strict_id setting '%s': not an integer" - % (sleep, cfgval) - ) from e - else: - sleep = None - - return mode, sleep - - -def warn_if_necessary(cfgval, cfg): - try: - mode, sleep = parse_strict_mode(cfgval) - except ValueError as e: - LOG.warning(e) - return - - if mode == "false": - return - - warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep) - - -def identify_aliyun(data): - if data["product_name"] == "Alibaba Cloud ECS": - return CloudNames.ALIYUN - - -def identify_aws(data): - # data is a dictionary returned by _collect_platform_data. - if data["uuid"].startswith("ec2") and ( - data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"] - ): - return CloudNames.AWS - - return None - - -def identify_brightbox(data): - if data["serial"].endswith(".brightbox.com"): - return CloudNames.BRIGHTBOX - - -def identify_zstack(data): - if data["asset_tag"].endswith(".zstack.io"): - return CloudNames.ZSTACK - - -def identify_e24cloud(data): - if data["vendor"] == "e24cloud": - return CloudNames.E24CLOUD - - -def identify_outscale(data): - if ( - data["product_name"] == "3DS Outscale VM".lower() - and data["vendor"] == "3DS Outscale".lower() - ): - return CloudNames.OUTSCALE - - -def identify_platform(): - # identify the platform and return an entry in CloudNames. - data = _collect_platform_data() - checks = ( - identify_aws, - identify_brightbox, - identify_zstack, - identify_e24cloud, - identify_outscale, - identify_aliyun, - lambda x: CloudNames.UNKNOWN, - ) - for checker in checks: - try: - result = checker(data) - if result: - return result - except Exception as e: - LOG.warning( - "calling %s with %s raised exception: %s", checker, data, e - ) - - -def _collect_platform_data(): - """Returns a dictionary of platform info from dmi or /sys/hypervisor. - - Keys in the dictionary are as follows: - uuid: system-uuid from dmi or /sys/hypervisor - uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' - serial: dmi 'system-serial-number' (/sys/.../product_serial) - asset_tag: 'dmidecode -s chassis-asset-tag' - vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) - product_name: dmi 'system-product-name' (/sys/.../system-manufacturer) - - On Ec2 instances experimentation is that product_serial is upper case, - and product_uuid is lower case. This returns lower case values for both. - """ - data = {} - try: - uuid = util.load_text_file("/sys/hypervisor/uuid").strip() - data["uuid_source"] = "hypervisor" - except Exception: - uuid = dmi.read_dmi_data("system-uuid") - data["uuid_source"] = "dmi" - - if uuid is None: - uuid = "" - data["uuid"] = uuid.lower() - - serial = dmi.read_dmi_data("system-serial-number") - if serial is None: - serial = "" - - data["serial"] = serial.lower() - - asset_tag = dmi.read_dmi_data("chassis-asset-tag") - if asset_tag is None: - asset_tag = "" - - data["asset_tag"] = asset_tag.lower() - - vendor = dmi.read_dmi_data("system-manufacturer") - data["vendor"] = (vendor if vendor else "").lower() - - product_name = dmi.read_dmi_data("system-product-name") - data["product_name"] = (product_name if product_name else "").lower() - - return data - - -def _build_nic_order( - macs_metadata: Dict[str, Dict], macs: List[str] -) -> Dict[str, int]: - """ - Builds a dictionary containing macs as keys nad nic orders as values, - taking into account `network-card` and `device-number` if present. - - Note that the first NIC will be the primary NIC as it will be the one with - [network-card] == 0 and device-number == 0 if present. - - @param macs_metadata: dictionary with mac address as key and contents like: - {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} - @macs: list of macs to consider - - @return: Dictionary with macs as keys and nic orders as values. - """ - nic_order: Dict[str, int] = {} - if len(macs) == 0 or len(macs_metadata) == 0: - return nic_order - - valid_macs_metadata = filter( - # filter out nics without metadata (not a physical nic) - lambda mmd: mmd[1] is not None, - # filter by macs - map(lambda mac: (mac, macs_metadata.get(mac)), macs), - ) - - def _get_key_as_int_or(dikt, key, alt_value): - value = dikt.get(key, None) - if value is not None: - return int(value) - return alt_value - - # Sort by (network_card, device_index) as some instances could have - # multiple network cards with repeated device indexes. - # - # On platforms where network-card and device-number are not present, - # as AliYun, the order will be by mac, as before the introduction of this - # function. - return { - mac: i - for i, (mac, _mac_metadata) in enumerate( - sorted( - valid_macs_metadata, - key=lambda mmd: ( - _get_key_as_int_or( - mmd[1], "network-card", float("infinity") - ), - _get_key_as_int_or( - mmd[1], "device-number", float("infinity") - ), - ), - ) - ) - } - - -def _configure_policy_routing( - dev_config: dict, - *, - nic_name: str, - nic_metadata: dict, - distro: Distro, - is_ipv4: bool, - table: int, -) -> None: - """ - Configure policy-based routing on secondary NICs / secondary IPs to - ensure outgoing packets are routed via the correct interface. - - @param: dev_config: network cfg v2 to be updated inplace. - @param: nic_name: nic name. Only used if ipv4. - @param: nic_metadata: nic metadata from IMDS. - @param: distro: Instance of Distro. Only used if ipv4. - @param: is_ipv4: Boolean indicating if we are acting over ipv4 or not. - @param: table: Routing table id. - """ - if is_ipv4: - subnet_prefix_routes = nic_metadata.get("subnet-ipv4-cidr-block") - ips = nic_metadata.get("local-ipv4s") - else: - subnet_prefix_routes = nic_metadata.get("subnet-ipv6-cidr-blocks") - ips = nic_metadata.get("ipv6s") - if not (subnet_prefix_routes and ips): - LOG.debug( - "Not enough IMDS information to configure policy routing " - "for IPv%s", - "4" if is_ipv4 else "6", - ) - return - - if not dev_config.get("routes"): - dev_config["routes"] = [] - if is_ipv4: - try: - lease = distro.dhcp_client.dhcp_discovery(nic_name, distro=distro) - gateway = lease["routers"] - except NoDHCPLeaseError as e: - LOG.warning( - "Could not perform dhcp discovery on %s to find its " - "gateway. Not adding default route via the gateway. " - "Error: %s", - nic_name, - e, - ) - else: - # Add default route via the NIC's gateway - dev_config["routes"].append( - { - "to": "0.0.0.0/0", - "via": gateway, - "table": table, - }, - ) - - subnet_prefix_routes = ( - [subnet_prefix_routes] - if isinstance(subnet_prefix_routes, str) - else subnet_prefix_routes - ) - for prefix_route in subnet_prefix_routes: - dev_config["routes"].append( - { - "to": prefix_route, - "table": table, - }, - ) - - if not dev_config.get("routing-policy"): - dev_config["routing-policy"] = [] - # Packets coming from any IP associated with the current NIC - # will be routed using `table` routing table - ips = [ips] if isinstance(ips, str) else ips - for ip in ips: - dev_config["routing-policy"].append( - { - "from": ip, - "table": table, - }, - ) - - -def convert_ec2_metadata_network_config( - network_md, - distro, - macs_to_nics=None, - fallback_nic=None, - full_network_config=True, -): - """Convert ec2 metadata to network config version 2 data dict. - - @param: network_md: 'network' portion of EC2 metadata. - generally formed as {"interfaces": {"macs": {}} where - 'macs' is a dictionary with mac address as key and contents like: - {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} - @param: distro: instance of Distro. - @param: macs_to_nics: Optional dict of mac addresses and nic names. If - not provided, get_interfaces_by_mac is called to get it from the OS. - @param: fallback_nic: Optionally provide the primary nic interface name. - This nic will be guaranteed to minimally have a dhcp4 configuration. - @param: full_network_config: Boolean set True to configure all networking - presented by IMDS. This includes rendering secondary IPv4 and IPv6 - addresses on all NICs and rendering network config on secondary NICs. - If False, only the primary nic will be configured and only with dhcp - (IPv4/IPv6). - - @return A dict of network config version 2 based on the metadata and macs. - """ - netcfg = {"version": 2, "ethernets": {}} - if not macs_to_nics: - macs_to_nics = net.get_interfaces_by_mac() - macs_metadata = network_md["interfaces"]["macs"] - - if not full_network_config: - for mac, nic_name in macs_to_nics.items(): - if nic_name == fallback_nic: - break - dev_config = { - "dhcp4": True, - "dhcp6": False, - "match": {"macaddress": mac.lower()}, - "set-name": nic_name, - } - nic_metadata = macs_metadata.get(mac) - if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured - dev_config["dhcp6"] = True - netcfg["ethernets"][nic_name] = dev_config - return netcfg - # Apply network config for all nics and any secondary IPv4/v6 addresses - is_netplan = distro.network_renderer == netplan.Renderer - macs = sorted(macs_to_nics.keys()) - nic_order = _build_nic_order(macs_metadata, macs) - for mac in macs: - nic_name = macs_to_nics[mac] - nic_metadata = macs_metadata.get(mac) - if not nic_metadata: - continue # Not a physical nic represented in metadata - nic_idx = nic_order[mac] - is_primary_nic = nic_idx == 0 - # nic_idx + 1 to start route_metric at 100 (nic_idx is 0-indexed) - dhcp_override = {"route-metric": (nic_idx + 1) * 100} - dev_config = { - "dhcp4": True, - "dhcp4-overrides": dhcp_override, - "dhcp6": False, - "match": {"macaddress": mac.lower()}, - "set-name": nic_name, - } - # This config only works on systems using Netplan because Networking - # config V2 does not support `routing-policy`, but this config is - # passed through on systems using Netplan. - # See: https://github.com/canonical/cloud-init/issues/4862 - # - # If device-number is not present (AliYun or other ec2-like platforms), - # do not configure source-routing as we cannot determine which is the - # primary NIC. - table = 100 + nic_idx - if ( - is_netplan - and nic_metadata.get("device-number") - and not is_primary_nic - ): - dhcp_override["use-routes"] = True - _configure_policy_routing( - dev_config, - distro=distro, - nic_name=nic_name, - nic_metadata=nic_metadata, - is_ipv4=True, - table=table, - ) - if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured - dev_config["dhcp6"] = True - dev_config["dhcp6-overrides"] = dhcp_override - if ( - is_netplan - and nic_metadata.get("device-number") - and not is_primary_nic - ): - _configure_policy_routing( - dev_config, - distro=distro, - nic_name=nic_name, - nic_metadata=nic_metadata, - is_ipv4=False, - table=table, - ) - dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac) - if not dev_config["addresses"]: - dev_config.pop("addresses") # Since we found none configured - - netcfg["ethernets"][nic_name] = dev_config - # Remove route-metric dhcp overrides and routes / routing-policy if only - # one nic configured - if len(netcfg["ethernets"]) == 1: - for nic_name in netcfg["ethernets"].keys(): - netcfg["ethernets"][nic_name].pop("dhcp4-overrides") - netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None) - netcfg["ethernets"][nic_name].pop("routes", None) - netcfg["ethernets"][nic_name].pop("routing-policy", None) - return netcfg - - -def get_secondary_addresses(nic_metadata, mac): - """Parse interface-specific nic metadata and return any secondary IPs - - :return: List of secondary IPv4 or IPv6 addresses to configure on the - interface - """ - ipv4s = nic_metadata.get("local-ipv4s") - ipv6s = nic_metadata.get("ipv6s") - addresses = [] - # In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP - if bool(isinstance(ipv4s, list) and len(ipv4s) > 1): - addresses.extend( - _get_secondary_addresses( - nic_metadata, "subnet-ipv4-cidr-block", mac, ipv4s, "24" - ) - ) - if bool(isinstance(ipv6s, list) and len(ipv6s) > 1): - addresses.extend( - _get_secondary_addresses( - nic_metadata, "subnet-ipv6-cidr-block", mac, ipv6s, "128" - ) - ) - return sorted(addresses) - - -def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix): - """Return list of IP addresses as CIDRs for secondary IPs - - The CIDR prefix will be default_prefix if cidr_key is absent or not - parseable in nic_metadata. - """ - addresses = [] - cidr = nic_metadata.get(cidr_key) - prefix = default_prefix - if not cidr or len(cidr.split("/")) != 2: - ip_type = "ipv4" if "ipv4" in cidr_key else "ipv6" - LOG.warning( - "Could not parse %s %s for mac %s. %s network" - " config prefix defaults to /%s", - cidr_key, - cidr, - mac, - ip_type, - prefix, - ) - else: - prefix = cidr.split("/")[1] - # We know we have > 1 ips for in metadata for this IP type - for ip in ips[1:]: - addresses.append("{ip}/{prefix}".format(ip=ip, prefix=prefix)) - return addresses - - -# Used to match classes to dependencies -datasources = [ - (DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local - (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -] - - -# Return a list of data sources that match this set of dependencies -def get_datasource_list(depends): - return sources.list_from_depends(depends, datasources) diff --git a/.pc/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361/tests/unittests/sources/test_ec2.py b/.pc/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361/tests/unittests/sources/test_ec2.py deleted file mode 100644 index 378c57a14..000000000 --- a/.pc/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361/tests/unittests/sources/test_ec2.py +++ /dev/null @@ -1,1615 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import json -import threading -from unittest import mock - -import pytest -import requests -import responses - -from cloudinit import helpers -from cloudinit.net import netplan -from cloudinit.sources import DataSourceEc2 as ec2 -from tests.unittests import helpers as test_helpers -from tests.unittests.util import MockDistro - -DYNAMIC_METADATA = { - "instance-identity": { - "document": json.dumps( - { - "devpayProductCodes": None, - "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], - "availabilityZone": "us-west-2b", - "privateIp": "10.158.112.84", - "version": "2017-09-30", - "instanceId": "my-identity-id", - "billingProducts": None, - "instanceType": "t2.micro", - "accountId": "123456789012", - "imageId": "ami-5fb8c835", - "pendingTime": "2016-11-19T16:32:11Z", - "architecture": "x86_64", - "kernelId": None, - "ramdiskId": None, - "region": "us-west-2", - } - ) - } -} - - -# collected from api version 2016-09-02/ with -# python3 -c 'import json -# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm -# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))' -# Note that the MAC addresses have been modified to sort in the opposite order -# to the device-number attribute, to test LP: #1876312 -DEFAULT_METADATA = { - "ami-id": "ami-8b92b4ee", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "hostname": "ip-172-31-31-158.us-east-2.compute.internal", - "instance-action": "none", - "instance-id": "i-0a33f80f09c96477f", - "instance-type": "t2.small", - "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", - "local-ipv4": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "metrics": {"vhostmd": ''}, - "network": { - "interfaces": { - "macs": { - "06:17:04:d7:26:09": { - "device-number": "0", - "interface-id": "eni-e44ef49e", - "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, - "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", - "local-hostname": ( - "ip-172-3-3-15.us-east-2.compute.internal" - ), - "local-ipv4s": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "owner-id": "950047163771", - "public-hostname": ( - "ec2-13-59-77-202.us-east-2.compute.amazonaws.com" - ), - "public-ipv4s": "13.59.77.202", - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56", - }, - "06:17:04:d7:26:08": { - "device-number": "1", # Only IPv4 local config - "interface-id": "eni-e44ef49f", - "ipv4-associations": {"": "172.3.3.16"}, - "ipv6s": "", # No IPv6 config - "local-hostname": ( - "ip-172-3-3-16.us-east-2.compute.internal" - ), - "local-ipv4s": "172.3.3.16", - "mac": "06:17:04:d7:26:08", - "owner-id": "950047163771", - "public-hostname": ( - "ec2-172-3-3-16.us-east-2.compute.amazonaws.com" - ), - "public-ipv4s": "", # No public ipv4 config - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "", - }, - } - } - }, - "placement": {"availability-zone": "us-east-2b"}, - "profile": "default-hvm", - "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com", - "public-ipv4": "13.59.77.202", - "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, - "reservation-id": "r-01efbc9996bac1bd6", - "security-groups": "my-wide-open", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -# collected from api version 2018-09-24/ with -# python3 -c 'import json -# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm -# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))' - -NIC1_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "0", - "interface-id": "eni-0d6335689899ce9cc", - "ipv4-associations": {"18.218.219.181": "172.31.44.13"}, - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - "2600:1f16:292:100:f152:2222:3333:4444", - ], - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4s": ["172.31.44.13", "172.31.45.70"], - "mac": "0a:07:84:3d:6e:38", - "owner-id": "329910648901", - "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", - "public-ipv4s": "18.218.219.181", - "security-group-ids": "sg-0c387755222ba8d2e", - "security-groups": "launch-wizard-4", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56", -} - -NIC2_MD = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", -} - -NIC2_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - ], - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:292:100::/56", -} - -MULTI_NIC_V6_ONLY_MD = { - "macs": { - "02:6b:df:a2:4b:2b": { - "device-number": "1", - "interface-id": "eni-0669816d0cf606123", - "ipv6s": "2600:1f16:67f:f201:8d2e:4d1f:9e80:4ab9", - "local-hostname": "i-0951b6d0b66337123.us-east-2.compute.internal", - "mac": "02:6b:df:a2:4b:2b", - "owner-id": "483410185123", - "security-group-ids": "sg-0bf34e5c3cde1d123", - "security-groups": "default", - "subnet-id": "subnet-0903f279682c66123", - "subnet-ipv6-cidr-blocks": "2600:1f16:67f:f201:0:0:0:0/64", - "vpc-id": "vpc-0ac1befb8c824a123", - "vpc-ipv4-cidr-block": "192.168.0.0/20", - "vpc-ipv4-cidr-blocks": "192.168.0.0/20", - "vpc-ipv6-cidr-blocks": "2600:1f16:67f:f200:0:0:0:0/56", - }, - "02:7c:03:b8:5c:af": { - "device-number": "0", - "interface-id": "eni-0f3cddb84c16e1123", - "ipv6s": "2600:1f16:67f:f201:6613:29a2:dbf7:2f1f", - "local-hostname": "i-0951b6d0b66337123.us-east-2.compute.internal", - "mac": "02:7c:03:b8:5c:af", - "owner-id": "483410185123", - "security-group-ids": "sg-0bf34e5c3cde1d123", - "security-groups": "default", - "subnet-id": "subnet-0903f279682c66123", - "subnet-ipv6-cidr-blocks": "2600:1f16:67f:f201:0:0:0:0/64", - "vpc-id": "vpc-0ac1befb8c824a123", - "vpc-ipv4-cidr-block": "192.168.0.0/20", - "vpc-ipv4-cidr-blocks": "192.168.0.0/20", - "vpc-ipv6-cidr-blocks": "2600:1f16:67f:f200:0:0:0:0/56", - }, - } -} - -SECONDARY_IP_METADATA_2018_09_24 = { - "ami-id": "ami-0986c2ac728528ac2", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "events": {"maintenance": {"history": "[]", "scheduled": "[]"}}, - "hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "identity-credentials": { - "ec2": { - "info": { - "AccountId": "329910648901", - "Code": "Success", - "LastUpdated": "2019-07-06T14:22:56Z", - } - } - }, - "instance-action": "none", - "instance-id": "i-069e01e8cc43732f8", - "instance-type": "t2.micro", - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4": "172.31.44.13", - "mac": "0a:07:84:3d:6e:38", - "metrics": {"vhostmd": ''}, - "network": { - "interfaces": { - "macs": { - "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP, - } - } - }, - "placement": {"availability-zone": "us-east-2c"}, - "profile": "default-hvm", - "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", - "public-ipv4": "18.218.219.181", - "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]}, - "reservation-id": "r-09b4917135cdd33be", - "security-groups": "launch-wizard-4", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -M_PATH = "cloudinit.sources.DataSourceEc2." -M_PATH_NET = "cloudinit.sources.DataSourceEc2.net." - -TAGS_METADATA_2021_03_23: dict = { - **DEFAULT_METADATA, - "tags": { - "instance": { - "Environment": "production", - "Application": "test", - "TagWithoutValue": "", - } - }, -} - - -@pytest.fixture(autouse=True) -def disable_is_resolvable(): - with mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable"): - yield - - -def _register_ssh_keys(rfunc, base_url, keys_data): - """handle ssh key inconsistencies. - - public-keys in the ec2 metadata is inconsistently formated compared - to other entries. - Given keys_data of {name1: pubkey1, name2: pubkey2} - - This registers the following urls: - base_url 0={name1}\n1={name2} # (for each name) - base_url/ 0={name1}\n1={name2} # (for each name) - base_url/0 openssh-key - base_url/0/ openssh-key - base_url/0/openssh-key {pubkey1} - base_url/0/openssh-key/ {pubkey1} - ... - """ - - base_url = base_url.rstrip("/") - odd_index = "\n".join( - ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))] - ) - - rfunc(base_url, odd_index) - rfunc(base_url + "/", odd_index) - - for n, name in enumerate(sorted(keys_data)): - val = keys_data[name] - if isinstance(val, list): - val = "\n".join(val) - burl = base_url + "/%s" % n - rfunc(burl, "openssh-key") - rfunc(burl + "/", "openssh-key") - rfunc(burl + "/%s/openssh-key" % name, val) - rfunc(burl + "/%s/openssh-key/" % name, val) - - -def register_mock_metaserver(base_url, data, responses_mock=None): - """Register with responses a ec2 metadata like service serving 'data'. - - If given a dictionary, it will populate urls under base_url for - that dictionary. For example, input of - {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} - populates - base_url with 'instance-id\nmac' - base_url/ with 'instance-id\nmac' - base_url/instance-id with i-abc - base_url/mac with 00:16:3e:00:00:00 - In the index, references to lists or dictionaries have a trailing /. - """ - responses_mock = responses_mock or responses - - def register_helper(register, base_url, body): - if not isinstance(base_url, str): - register(base_url, body) - return - base_url = base_url.rstrip("/") - if isinstance(body, str): - register(base_url, body) - elif isinstance(body, list): - register(base_url, "\n".join(body) + "\n") - register(base_url + "/", "\n".join(body) + "\n") - elif isinstance(body, dict): - vals = [] - for k, v in body.items(): - if k == "public-keys": - _register_ssh_keys(register, base_url + "/public-keys/", v) - continue - suffix = k.rstrip("/") - if not isinstance(v, (str, list)): - suffix += "/" - vals.append(suffix) - url = base_url + "/" + suffix - register_helper(register, url, v) - register(base_url, "\n".join(vals) + "\n") - register(base_url + "/", "\n".join(vals) + "\n") - elif body is None: - register(base_url, "not found", status=404) - - def myreg(*argc, **kwargs): - url, body = argc - method = responses.PUT if "latest/api/token" in url else responses.GET - status = kwargs.get("status", 200) - return responses_mock.add(method, url, body, status=status) - - register_helper(myreg, base_url, data) - - -class TestEc2(test_helpers.ResponsesTestCase): - with_logs = True - maxDiff = None - - valid_platform_data = { - "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - "uuid_source": "dmi", - "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - } - - def setUp(self): - super(TestEc2, self).setUp() - self.datasource = ec2.DataSourceEc2 - self.metadata_addr = self.datasource.metadata_urls[0] - self.tmp = self.tmp_dir() - - def data_url(self, version, data_item="meta-data"): - """Return a metadata url based on the version provided.""" - return "/".join([self.metadata_addr, version, data_item]) - - def _patch_add_cleanup(self, mpath, *args, **kwargs): - p = mock.patch(mpath, *args, **kwargs) - p.start() - self.addCleanup(p.stop) - - def _setup_ds( - self, sys_cfg, platform_data, md, md_version=None, distro=None - ): - self.uris = [] - distro = distro or mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - paths = helpers.Paths({"run_dir": self.tmp}) - if sys_cfg is None: - sys_cfg = {} - ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) - event = threading.Event() - p = mock.patch("time.sleep", event.wait) - p.start() - - def _mock_sleep(): - event.set() - p.stop() - - self.addCleanup(_mock_sleep) - if not md_version: - md_version = ds.min_metadata_version - if platform_data is not None: - self._patch_add_cleanup( - "cloudinit.sources.DataSourceEc2._collect_platform_data", - return_value=platform_data, - ) - - if md: - all_versions = [ - ds.min_metadata_version - ] + ds.extended_metadata_versions - token_url = self.data_url("latest", data_item="api/token") - register_mock_metaserver(token_url, "API-TOKEN", self.responses) - for version in all_versions: - metadata_url = self.data_url(version) + "/" - if version == md_version: - # Register all metadata for desired version - register_mock_metaserver( - metadata_url, - md.get("md", DEFAULT_METADATA), - self.responses, - ) - userdata_url = self.data_url( - version, data_item="user-data" - ) - register_mock_metaserver( - userdata_url, md.get("ud", ""), self.responses - ) - identity_url = self.data_url( - version, data_item="dynamic/instance-identity" - ) - register_mock_metaserver( - identity_url, - md.get("id", DYNAMIC_METADATA), - self.responses, - ) - else: - instance_id_url = metadata_url + "instance-id" - if version == ds.min_metadata_version: - # Add min_metadata_version service availability check - register_mock_metaserver( - instance_id_url, - DEFAULT_METADATA["instance-id"], - self.responses, - ) - else: - # Register 404s for all unrequested extended versions - register_mock_metaserver( - instance_id_url, None, self.responses - ) - return ds - - def test_network_config_property_returns_version_2_network_data(self): - """network_config property returns network version 2 for metadata""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": "06:17:04:d7:26:09"}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_set_dhcp4(self): - """network_config property configures dhcp4 on nics with local-ipv4s. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1.lower()}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - dhcp_client = ds.distro.dhcp_client - dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_secondary_private_ips(self): - """network_config property configures any secondary ipv4 addresses. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": SECONDARY_IP_METADATA_2018_09_24}, - ) - find_fallback_path = M_PATH_NET + "find_fallback_nic" - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = "eth9" - ds.get_data() - - mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6 - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1}, - "set-name": "eth9", - "addresses": [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - "dhcp4": True, - "dhcp6": True, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - get_interface_mac_path = M_PATH_NET + "get_interface_mac" - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - m_find_fallback.return_value = "eth9" - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_is_cached_in_datasource(self): - """network_config property is cached in DataSourceEc2.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ds._network_config = {"cached": "data"} - self.assertEqual({"cached": "data"}, ds.network_config) - - @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): - """Refresh the network_config Ec2 cache if network key is absent. - - This catches an upgrade issue where obj.pkl contained stale metadata - which lacked newly required network key. - """ - old_metadata = copy.deepcopy(DEFAULT_METADATA) - old_metadata.pop("network") - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": old_metadata}, - ) - self.assertTrue(ds.get_data()) - - # Workaround https://github.com/getsentry/responses/issues/212 - if hasattr(self.responses, "_urls"): - # Can be removed when Bionic is EOL - for index, url in enumerate(self.responses._urls): - if url["url"].startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._urls[index] - elif hasattr(self.responses, "_matches"): - # Can be removed when Focal is EOL - for index, response in enumerate(self.responses._matches): - if response.url.startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._matches[index] - - # Provide new revision of metadata that contains network data - register_mock_metaserver( - "http://169.254.169.254/2009-04-04/meta-data/", - DEFAULT_METADATA, - self.responses, - ) - mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA - get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac" - ds.distro.fallback_nic = "eth9" - with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {mac1: "eth9"} - nc = ds.network_config # Will re-crawl network metadata - self.assertIsNotNone(nc) - self.assertIn( - "Refreshing stale metadata from prior to upgrade", - self.logs.getvalue(), - ) - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - self.assertEqual(expected, ds.network_config) - - def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): - """get_instance-id gets DataSourceEc2Local.identity if not present. - - This handles an upgrade case where the old pickled datasource didn't - set up self.identity, but 'systemctl cloud-init init' runs - get_instance_id which traces on missing self.identity. lp:1748354. - """ - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - # Mock 404s on all versions except latest - all_versions = [ - ds.min_metadata_version - ] + ds.extended_metadata_versions - for ver in all_versions[:-1]: - register_mock_metaserver( - "http://[fd00:ec2::254]/{0}/meta-data/instance-id".format(ver), - None, - self.responses, - ) - - ds.metadata_address = "http://[fd00:ec2::254]" - register_mock_metaserver( - "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]), - DEFAULT_METADATA, - self.responses, - ) - # Register dynamic/instance-identity document which we now read. - register_mock_metaserver( - "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]), - DYNAMIC_METADATA, - self.responses, - ) - ds._cloud_name = ec2.CloudNames.AWS - # Setup cached metadata on the Datasource - ds.metadata = DEFAULT_METADATA - self.assertEqual("my-identity-id", ds.get_instance_id()) - - def test_classic_instance_true(self): - """If no vpc-id in metadata, is_classic_instance must return true.""" - md_copy = copy.deepcopy(DEFAULT_METADATA) - ifaces_md = md_copy.get("network", {}).get("interfaces", {}) - for _mac, mac_data in ifaces_md.get("macs", {}).items(): - if "vpc-id" in mac_data: - del mac_data["vpc-id"] - - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": md_copy}, - ) - self.assertTrue(ds.get_data()) - self.assertTrue(ds.is_classic_instance()) - - def test_classic_instance_false(self): - """If vpc-id in metadata, is_classic_instance must return false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - self.assertTrue(ds.get_data()) - self.assertFalse(ds.is_classic_instance()) - - def test_aws_inaccessible_imds_service_fails_with_retries(self): - """Inaccessibility of http://169.254.169.254 are retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md=None, - ) - - conn_error = requests.exceptions.ConnectionError( - "[Errno 113] no route to host" - ) - - mock_success = mock.MagicMock(contents=b"fakesuccess") - mock_success.ok.return_value = True - - with mock.patch("cloudinit.url_helper.readurl") as m_readurl: - # yikes, this endpoint needs help - m_readurl.side_effect = ( - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - mock_success, - ) - with mock.patch("cloudinit.url_helper.time.sleep"): - self.assertTrue(ds.wait_for_metadata_service()) - - # Just one /latest/api/token request - self.assertEqual(19, len(m_readurl.call_args_list)) - for readurl_call in m_readurl.call_args_list: - self.assertIn("latest/api/token", readurl_call[0][0]) - - def test_aws_token_403_fails_without_retries(self): - """Verify that 403s fetching AWS tokens are not retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md=None, - ) - - token_url = self.data_url("latest", data_item="api/token") - self.responses.add(responses.PUT, token_url, status=403) - self.assertFalse(ds.get_data()) - # Just one /latest/api/token request - logs = self.logs.getvalue() - expected_logs = [ - "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is" - " disabled. Aborting.", - "WARNING: IMDS's HTTP endpoint is probably disabled", - ] - for log in expected_logs: - self.assertIn(log, logs) - - def test_aws_token_redacted(self): - """Verify that aws tokens are redacted when logged.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - self.assertTrue(ds.get_data()) - all_logs = self.logs.getvalue().splitlines() - REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" - REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" - logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] - logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] - logs_with_token = [log for log in all_logs if "API-TOKEN" in log] - self.assertEqual(1, len(logs_with_redacted_ttl)) - self.assertEqual(83, len(logs_with_redacted)) - self.assertEqual(0, len(logs_with_token)) - - @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_valid_platform_with_strict_true(self, m_dhcp): - """Valid platform data should return true with strict_id true.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual("aws", ds.cloud_name) - self.assertEqual("ec2", ds.platform_type) - self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform) - - def test_valid_platform_with_strict_false(self): - """Valid platform data should return true with strict_id false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - - def test_unknown_platform_with_strict_true(self): - """Unknown platform data with strict_id true should return False.""" - uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" - ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, - sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertFalse(ret) - - def test_unknown_platform_with_strict_false(self): - """Unknown platform data with strict_id false should return True.""" - uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" - ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertTrue(ret) - - def test_ec2_local_returns_false_on_non_aws(self): - """DataSourceEc2Local returns False when platform is not AWS.""" - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - platform_attrs = [ - attr - for attr in ec2.CloudNames.__dict__.keys() - if not attr.startswith("__") - ] - for attr_name in platform_attrs: - platform_name = getattr(ec2.CloudNames, attr_name) - if platform_name not in ["aws", "outscale"]: - ds._cloud_name = platform_name - ret = ds.get_data() - self.assertEqual("ec2", ds.platform_type) - self.assertFalse(ret) - message = ( - "Local Ec2 mode only supported on ('aws', 'outscale')," - " not {0}".format(platform_name) - ) - self.assertIn(message, self.logs.getvalue()) - - @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): - """DataSourceEc2Local returns False on BSD. - - FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. - """ - m_is_freebsd.return_value = True - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - ) - ret = ds.get_data() - self.assertFalse(ret) - self.assertIn( - "FreeBSD doesn't support running dhclient with -sf", - self.logs.getvalue(), - ) - - @mock.patch("cloudinit.net.ephemeral.EphemeralIPv6Network") - @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") - @mock.patch("cloudinit.distros.net.find_fallback_nic") - @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") - @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_performs_dhcp_on_non_bsd( - self, m_is_bsd, m_dhcp, m_fallback_nic, m_net4, m_net6 - ): - """Ec2Local returns True for valid platform data on non-BSD with dhcp. - - DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. - Then the metadata services is crawled for more network config info. - When the platform data is valid, return True. - """ - - m_fallback_nic.return_value = "eth9" - m_is_bsd.return_value = False - m_dhcp.return_value = { - "interface": "eth9", - "fixed-address": "192.168.2.9", - "routers": "192.168.2.1", - "subnet-mask": "255.255.255.0", - "broadcast-address": "192.168.2.255", - } - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": DEFAULT_METADATA}, - distro=MockDistro("", {}, {}), - ) - - ret = ds.get_data() - self.assertTrue(ret) - m_dhcp.assert_called_once_with(ds.distro, "eth9", None) - m_net4.assert_called_once_with( - ds.distro, - broadcast="192.168.2.255", - interface="eth9", - ip="192.168.2.9", - prefix_or_mask="255.255.255.0", - router="192.168.2.1", - static_routes=None, - ) - self.assertIn("Crawl of metadata service ", self.logs.getvalue()) - - def test_get_instance_tags(self): - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, - md={"md": TAGS_METADATA_2021_03_23}, - ) - self.assertTrue(ds.get_data()) - self.assertIn("tags", ds.metadata) - self.assertIn("instance", ds.metadata["tags"]) - instance_tags = ds.metadata["tags"]["instance"] - self.assertEqual(instance_tags["Application"], "test") - self.assertEqual(instance_tags["Environment"], "production") - - -class TestGetSecondaryAddresses(test_helpers.CiTestCase): - mac = "06:17:04:d7:26:ff" - with_logs = True - - def test_md_with_no_secondary_addresses(self): - """Empty list is returned when nic metadata contains no secondary ip""" - self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) - - def test_md_with_secondary_v4_and_v6_addresses(self): - """All secondary addresses are returned from nic metadata""" - self.assertEqual( - [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac), - ) - - def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): - """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" - invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) - invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected" - invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is" - self.assertEqual( - [ - "172.31.45.70/24", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(invalid_cidr_md, self.mac), - ) - expected_logs = [ - "WARNING: Could not parse subnet-ipv4-cidr-block" - " something-unexpected for mac 06:17:04:d7:26:ff." - " ipv4 network config prefix defaults to /24", - "WARNING: Could not parse subnet-ipv6-cidr-block" - " not/sure/what/this/is for mac 06:17:04:d7:26:ff." - " ipv6 network config prefix defaults to /128", - ] - logs = self.logs.getvalue() - for log in expected_logs: - self.assertIn(log, logs) - - -class TestBuildNicOrder: - @pytest.mark.parametrize( - ["macs_metadata", "macs", "expected"], - [ - pytest.param({}, [], {}, id="all_empty"), - pytest.param( - {}, ["0a:f7:8d:96:f2:a1"], {}, id="empty_macs_metadata" - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - } - }, - [], - {}, - id="empty_macs", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:f7:8d:96:f2:a1": 0, "0a:0d:dd:44:cd:7b": 1}, - id="no-device-number-info", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1"], - {"0a:f7:8d:96:f2:a1": 0}, - id="no-device-number-info-subset", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1}, - id="device-numbers", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "network-card": "0", - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "network-card": "1", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - "0a:f7:8d:96:f2:a2": { - "network-card": "2", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], - { - "0a:0d:dd:44:cd:7b": 0, - "0a:f7:8d:96:f2:a1": 1, - "0a:f7:8d:96:f2:a2": 2, - }, - id="network-cardes", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "network-card": "0", - "device-number": "0", - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "network-card": "1", - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - "0a:f7:8d:96:f2:a2": { - "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], - { - "0a:0d:dd:44:cd:7b": 0, - "0a:f7:8d:96:f2:a1": 1, - "0a:f7:8d:96:f2:a2": 2, - }, - id="network-card-partially-missing", - ), - pytest.param( - { - "0a:0d:dd:44:cd:7b": { - "mac": "0a:0d:dd:44:cd:7b", - }, - "0a:f7:8d:96:f2:a1": { - "mac": "0a:f7:8d:96:f2:a1", - }, - }, - ["0a:f7:8d:96:f2:a9"], - {}, - id="macs-not-in-md", - ), - ], - ) - def test_build_nic_order(self, macs_metadata, macs, expected): - assert expected == ec2._build_nic_order(macs_metadata, macs) - - -class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): - def setUp(self): - super(TestConvertEc2MetadataNetworkConfig, self).setUp() - self.mac1 = "06:17:04:d7:26:09" - interface_dict = copy.deepcopy( - DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1] - ) - # These tests are written assuming the base interface doesn't have IPv6 - interface_dict.pop("ipv6s") - self.network_metadata = { - "interfaces": {"macs": {self.mac1: interface_dict}} - } - - def test_convert_ec2_metadata_network_config_skips_absent_macs(self): - """Any mac absent from metadata is skipped by network config.""" - macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"} - - # DE:AD:BE:EF:FF:FF represented by OS but not in metadata - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): - """Config dhcp6 when ipv6s is in metadata for a mac.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): - """Config dhcp4 when there are no public addresses in public-ipv4s.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["local-ipv4s"] = "172.3.3.15" - nic1_metadata.pop("public-ipv4s") - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): - """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] - nic1_metadata["public-ipv4s"] = "" - - # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): - """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. - Source-routing configured for secondary NICs (routing-policy and extra - routing table).""" - mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth10": { - "match": {"macaddress": mac2}, - "set-name": "eth10", - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": False, - "routes": [ - # via DHCP gateway - {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, - # to NIC2_MD["subnet-ipv4-cidr-block"] - {"to": "172.31.32.0/20", "table": 101}, - ], - "routing-policy": [ - # NIC2_MD["local-ipv4s"] - {"from": "172.31.47.221", "table": 101} - ], - }, - }, - } - distro = mock.Mock() - distro.network_renderer = netplan.Renderer - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( - self, - ): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. - Source-routing configured for secondary NICs (routing-policy and extra - routing table).""" - mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both["interfaces"]["macs"][ - mac2 - ] = NIC2_MD_IPV4_IPV6_MULTI_IP - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg - nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc - expected = { - "version": 2, - "ethernets": { - "eth9": { - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "match": {"macaddress": "06:17:04:d7:26:09"}, - "set-name": "eth9", - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth10": { - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": True, - "match": {"macaddress": "06:17:04:d7:26:08"}, - "set-name": "eth10", - "routes": [ - # via DHCP gateway - {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, - # to NIC2_MD["subnet-ipv4-cidr-block"] - {"to": "172.31.32.0/20", "table": 101}, - # to NIC2_MD["subnet-ipv6-cidr-blocks"] - {"to": "2600:1f16:292:100::/64", "table": 101}, - ], - "routing-policy": [ - # NIC2_MD["local-ipv4s"] - {"from": "172.31.47.221", "table": 101}, - { - "from": "2600:1f16:292:100:c187:593c:4349:136", - "table": 101, - }, - { - "from": "2600:1f16:292:100:f153:12a3:c37c:11f9", - "table": 101, - }, - ], - "dhcp6-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "addresses": ["2600:1f16:292:100:f153:12a3:c37c:11f9/128"], - }, - }, - } - distro = mock.Mock() - distro.network_renderer = netplan.Renderer - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_network_config_multi_nics_ipv6_only(self): - """Like above, but only ipv6s are present in metadata.""" - macs_to_nics = { - "02:7c:03:b8:5c:af": "eth0", - "02:6b:df:a2:4b:2b": "eth1", - } - mac_data = copy.deepcopy(MULTI_NIC_V6_ONLY_MD) - network_metadata = {"interfaces": mac_data} - expected = { - "version": 2, - "ethernets": { - "eth0": { - "dhcp4": True, - "dhcp4-overrides": {"route-metric": 100}, - "dhcp6": True, - "match": {"macaddress": "02:7c:03:b8:5c:af"}, - "set-name": "eth0", - "dhcp6-overrides": {"route-metric": 100}, - }, - "eth1": { - "dhcp4": True, - "dhcp4-overrides": { - "route-metric": 200, - "use-routes": True, - }, - "dhcp6": True, - "match": {"macaddress": "02:6b:df:a2:4b:2b"}, - "set-name": "eth1", - "routes": [ - {"to": "2600:1f16:67f:f201:0:0:0:0/64", "table": 101}, - ], - "routing-policy": [ - { - "from": "2600:1f16:67f:f201:8d2e:4d1f:9e80:4ab9", - "table": 101, - }, - ], - "dhcp6-overrides": { - "route-metric": 200, - "use-routes": True, - }, - }, - }, - } - distro = mock.Mock() - distro.network_renderer = netplan.Renderer - assert expected == ec2.convert_ec2_metadata_network_config( - network_metadata, distro, macs_to_nics - ) - distro.dhcp_client.dhcp_discovery.assert_not_called() - - def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): - """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] - nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": True, - } - }, - } - distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), - ) - - def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): - """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" - expected = { - "version": 2, - "ethernets": { - "eth9": { - "match": {"macaddress": self.mac1}, - "set-name": "eth9", - "dhcp4": True, - "dhcp6": False, - } - }, - } - patch_path = M_PATH_NET + "get_interfaces_by_mac" - distro = mock.Mock() - with mock.patch(patch_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro - ), - ) - - -class TesIdentifyPlatform(test_helpers.CiTestCase): - def collmock(self, **kwargs): - """return non-special _collect_platform_data updated with changes.""" - unspecial = { - "asset_tag": "3857-0037-2746-7462-1818-3997-77", - "serial": "H23-C4J3JV-R6", - "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2", - "uuid_source": "dmi", - "vendor": "tothecloud", - "product_name": "cloudproduct", - } - unspecial.update(**kwargs) - return unspecial - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_aliyun(self, m_collect): - """aliyun should be identified if product name equals to - Alibaba Cloud ECS - """ - m_collect.return_value = self.collmock( - product_name="Alibaba Cloud ECS" - ) - self.assertEqual(ec2.CloudNames.ALIYUN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_zstack(self, m_collect): - """zstack should be identified if chassis-asset-tag - ends in .zstack.io - """ - m_collect.return_value = self.collmock(asset_tag="123456.zstack.io") - self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_zstack_full_domain_only(self, m_collect): - """zstack asset-tag matching should match only on - full domain boundary. - """ - m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_e24cloud(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor="e24cloud") - self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_e24cloud_negative(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor="e24cloudyday") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - # Outscale - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_identify_outscale(self, m_collect): - """Should return true if the dmi product data has expected value.""" - m_collect.return_value = self.collmock( - vendor="3DS Outscale".lower(), - product_name="3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.OUTSCALE, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_false_on_wrong_sys_vendor(self, m_collect): - """Should return false on empty value returned.""" - m_collect.return_value = self.collmock( - vendor="Not 3DS Outscale".lower(), - product_name="3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") - def test_false_on_wrong_product_name(self, m_collect): - """Should return false on an unrelated string.""" - m_collect.return_value = self.collmock( - vendor="3DS Outscale".lower(), - product_name="Not 3DS Outscale VM".lower(), - ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) diff --git a/.pc/deprecation-version-boundary.patch/cloudinit/features.py b/.pc/deprecation-version-boundary.patch/cloudinit/features.py new file mode 100644 index 000000000..a0d0c072f --- /dev/null +++ b/.pc/deprecation-version-boundary.patch/cloudinit/features.py @@ -0,0 +1,126 @@ +# This file is part of cloud-init. See LICENSE file for license information. +""" +Feature flags are used as a way to easily toggle configuration +**at build time**. They are provided to accommodate feature deprecation and +downstream configuration changes. + +Currently used upstream values for feature flags are set in +``cloudinit/features.py``. Overrides to these values should be +patched directly (e.g., via quilt patch) by downstreams. + +Each flag should include a short comment regarding the reason for +the flag and intended lifetime. + +Tests are required for new feature flags, and tests must verify +all valid states of a flag, not just the default state. +""" +import re +import sys +from typing import Dict + +ERROR_ON_USER_DATA_FAILURE = True +""" +If there is a failure in obtaining user data (i.e., #include or +decompress fails) and ``ERROR_ON_USER_DATA_FAILURE`` is ``False``, +cloud-init will log a warning and proceed. If it is ``True``, +cloud-init will instead raise an exception. + +As of 20.3, ``ERROR_ON_USER_DATA_FAILURE`` is ``True``. + +(This flag can be removed after Focal is no longer supported.) +""" + + +ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES = False +""" +When configuring apt mirrors, if +``ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES`` is ``True`` cloud-init +will detect that a datasource's ``availability_zone`` property looks +like an EC2 availability zone and set the ``ec2_region`` variable when +generating mirror URLs; this can lead to incorrect mirrors being +configured in clouds whose AZs follow EC2's naming pattern. + +As of 20.3, ``ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES`` is ``False`` +so we no longer include ``ec2_region`` in mirror determination on +non-AWS cloud platforms. + +If the old behavior is desired, users can provide the appropriate +mirrors via :py:mod:`apt: ` +directives in cloud-config. +""" + + +EXPIRE_APPLIES_TO_HASHED_USERS = False +""" +If ``EXPIRE_APPLIES_TO_HASHED_USERS`` is True, then when expire is set true +in cc_set_passwords, hashed passwords will be expired. Previous to 22.3, +only non-hashed passwords were expired. + +(This flag can be removed after Jammy is no longer supported.) +""" + +NETPLAN_CONFIG_ROOT_READ_ONLY = False +""" +If ``NETPLAN_CONFIG_ROOT_READ_ONLY`` is True, then netplan configuration will +be written as a single root read-only file /etc/netplan/50-cloud-init.yaml. +This prevents wifi passwords in network v2 configuration from being +world-readable. Prior to 23.1, netplan configuration is world-readable. + +(This flag can be removed after Jammy is no longer supported.) +""" + + +NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH = True +""" +Append a forward slash '/' if NoCloud seedurl does not end with either +a querystring or forward slash. Prior to 23.1, nocloud seedurl would be used +unaltered, appending meta-data, user-data and vendor-data to without URL path +separators. + +(This flag can be removed when Jammy is no longer supported.) +""" + +APT_DEB822_SOURCE_LIST_FILE = False +""" +On Debian and Ubuntu systems, cc_apt_configure will write a deb822 compatible +/etc/apt/sources.list.d/(debian|ubuntu).sources file. When set False, continue +to write /etc/apt/sources.list directly. +""" + +DEPRECATION_INFO_BOUNDARY = "devel" +""" +DEPRECATION_INFO_BOUNDARY is used by distros to configure at which upstream +version to start logging deprecations at a level higher than INFO. + +The default value "devel" tells cloud-init to log all deprecations higher +than INFO. This value may be overriden by downstreams in order to maintain +stable behavior across releases. + +Jsonschema key deprecations and inline logger deprecations include a +deprecated_version key. When the variable below is set to a version, +cloud-init will use that version as a demarcation point. Deprecations which +are added after this version will be logged as at an INFO level. Deprecations +which predate this version will be logged at the higher DEPRECATED level. +Downstreams that want stable log behavior may set the variable below to the +first version released in their stable distro. By doing this, they can expect +that newly added deprecations will be logged at INFO level. The implication of +the different log levels is that logs at DEPRECATED level result in a return +code of 2 from `cloud-init status`. + +format: + + :: = | + ::= "devel" + ::= "." ["." ] + +where , , and are positive integers +""" + + +def get_features() -> Dict[str, bool]: + """Return a dict of applicable features/overrides and their values.""" + return { + k: getattr(sys.modules["cloudinit.features"], k) + for k in sys.modules["cloudinit.features"].__dict__.keys() + if re.match(r"^[_A-Z0-9]+$", k) + } diff --git a/.pc/do-not-block-user-login.patch/systemd/cloud-config.service.tmpl b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-config.service.tmpl similarity index 72% rename from .pc/do-not-block-user-login.patch/systemd/cloud-config.service.tmpl rename to .pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-config.service.tmpl index 31d9d983e..b77eeef84 100644 --- a/.pc/do-not-block-user-login.patch/systemd/cloud-config.service.tmpl +++ b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-config.service.tmpl @@ -1,9 +1,8 @@ ## template:jinja [Unit] -Description=Apply the settings specified in cloud-config -After=network-online.target cloud-config.target -After=snapd.seeded.service -Before=systemd-user-sessions.service +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Config Stage +After=network-online.target cloud-config.target snapd.seeded.service Wants=network-online.target cloud-config.target ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled diff --git a/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-final.service.tmpl b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-final.service.tmpl new file mode 100644 index 000000000..b66533643 --- /dev/null +++ b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-final.service.tmpl @@ -0,0 +1,37 @@ +## template:jinja +[Unit] +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Final Stage +After=network-online.target time-sync.target cloud-config.service rc-local.service +{% if variant in ["ubuntu", "unknown", "debian"] %} +After=multi-user.target +Before=apt-daily.service +{% endif %} +Wants=network-online.target cloud-config.service +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled +ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + + +[Service] +Type=oneshot +ExecStart=/usr/bin/cloud-init modules --mode=final +RemainAfterExit=yes +TimeoutSec=0 +KillMode=process +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +# Restart NetworkManager if it is present and running. +ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ + out=$(systemctl show --property=SubState $u) || exit; \ + [ "$out" = "SubState=running" ] || exit 0; \ + systemctl reload-or-try-restart $u' +{% else %} +TasksMax=infinity +{% endif %} + + +# Output needs to appear in instance console output +StandardOutput=journal+console + +[Install] +WantedBy=cloud-init.target diff --git a/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-hotplugd.service b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-hotplugd.service new file mode 100644 index 000000000..2e552a0a0 --- /dev/null +++ b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-hotplugd.service @@ -0,0 +1,27 @@ +# Paired with cloud-init-hotplugd.socket to read from the FIFO +# /run/cloud-init/hook-hotplug-cmd which is created during a udev network +# add or remove event as processed by 90-cloud-init-hook-hotplug.rules. + +# On start, read args from the FIFO, process and provide structured arguments +# to `cloud-init devel hotplug-hook` which will setup or teardown network +# devices as configured by user-data. + +# Known bug with an enforcing SELinux policy: LP: #1936229 +# cloud-init-hotplud.service will read args from file descriptor 3 + +[Unit] +Description=Cloud-init: Hotplug Hook +After=cloud-init-hotplugd.socket +After=cloud-init.target +Requires=cloud-init-hotplugd.socket +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled +ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'read args <&3; echo "args=$args"; \ + exec /usr/bin/cloud-init devel hotplug-hook $args; \ + exit 0' +SyslogIdentifier=cloud-init-hotplugd +TimeoutStopSec=5 diff --git a/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-hotplugd.socket b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-hotplugd.socket new file mode 100644 index 000000000..8300e717e --- /dev/null +++ b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-hotplugd.socket @@ -0,0 +1,17 @@ +# cloud-init-hotplugd.socket listens on the FIFO file +# /run/cloud-init/hook-hotplug-cmd which is created during a udev network +# add or remove event as processed by 90-cloud-init-hook-hotplug.rules. + +# Known bug with an enforcing SELinux policy: LP: #1936229 +[Unit] +Description=cloud-init hotplug hook socket +After=cloud-config.target +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled +ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + +[Socket] +ListenFIFO=/run/cloud-init/hook-hotplug-cmd + +[Install] +WantedBy=cloud-config.target diff --git a/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-local.service.tmpl b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-local.service.tmpl new file mode 100644 index 000000000..0da2d8337 --- /dev/null +++ b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init-local.service.tmpl @@ -0,0 +1,49 @@ +## template:jinja +[Unit] +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Local Stage (pre-network) +{% if variant in ["almalinux", "cloudlinux", "ubuntu", "unknown", "debian", "rhel"] %} +DefaultDependencies=no +{% endif %} +Wants=network-pre.target +After=hv_kvp_daemon.service +After=systemd-remount-fs.service +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +Requires=dbus.socket +After=dbus.socket +{% endif %} +Before=NetworkManager.service +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +Before=network.service +{% endif %} +Before=network-pre.target +Before=shutdown.target +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +Before=firewalld.target +Conflicts=shutdown.target +{% endif %} +{% if variant in ["ubuntu", "unknown", "debian"] %} +Before=sysinit.target +Conflicts=shutdown.target +{% endif %} +RequiresMountsFor=/var/lib/cloud +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled +ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + +[Service] +Type=oneshot +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +ExecStartPre=/bin/mkdir -p /run/cloud-init +ExecStartPre=/sbin/restorecon /run/cloud-init +ExecStartPre=/usr/bin/touch /run/cloud-init/enabled +{% endif %} +ExecStart=/usr/bin/cloud-init init --local +RemainAfterExit=yes +TimeoutSec=0 + +# Output needs to appear in instance console output +StandardOutput=journal+console + +[Install] +WantedBy=cloud-init.target diff --git a/.pc/do-not-block-user-login.patch/systemd/cloud-init.service.tmpl b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init.service.tmpl similarity index 87% rename from .pc/do-not-block-user-login.patch/systemd/cloud-init.service.tmpl rename to .pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init.service.tmpl index bf91164a4..58031cc43 100644 --- a/.pc/do-not-block-user-login.patch/systemd/cloud-init.service.tmpl +++ b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init.service.tmpl @@ -1,7 +1,8 @@ ## template:jinja [Unit] -Description=Initial cloud-init job (metadata service crawler) -{% if variant not in ["photon", "rhel"] %} +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Network Stage +{% if variant not in ["almalinux", "cloudlinux", "photon", "rhel"] %} DefaultDependencies=no {% endif %} Wants=cloud-init-local.service @@ -29,6 +30,7 @@ After=dbus.service Before=network-online.target Before=sshd-keygen.service Before=sshd.service +Before=systemd-user-sessions.service {% if variant in ["ubuntu", "unknown", "debian"] %} Before=sysinit.target Before=shutdown.target diff --git a/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init.target b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init.target new file mode 100644 index 000000000..30450f7ff --- /dev/null +++ b/.pc/drop-unsupported-systemd-condition-environment.patch/systemd/cloud-init.target @@ -0,0 +1,15 @@ +# cloud-init.target is enabled by cloud-init-generator +# To disable it you can either: +# a.) boot with kernel cmdline of 'cloud-init=disabled' +# b.) touch a file /etc/cloud/cloud-init.disabled +# +# cloud-init.target is a synchronization point when all cloud-init's initial +# system configuration tasks have completed. To order a service after cloud-init +# is done, add the directives as applicable: +# After=cloud-init.target and Wants=cloud-init.target +[Unit] +Description=Cloud-init target +After=multi-user.target +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled +ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled diff --git a/.pc/expire-on-hashed-users.patch/cloudinit/features.py b/.pc/expire-on-hashed-users.patch/cloudinit/features.py index d661b940b..c3fdae186 100644 --- a/.pc/expire-on-hashed-users.patch/cloudinit/features.py +++ b/.pc/expire-on-hashed-users.patch/cloudinit/features.py @@ -87,6 +87,35 @@ to write /etc/apt/sources.list directly. """ +DEPRECATION_INFO_BOUNDARY = "devel" +""" +DEPRECATION_INFO_BOUNDARY is used by distros to configure at which upstream +version to start logging deprecations at a level higher than INFO. + +The default value "devel" tells cloud-init to log all deprecations higher +than INFO. This value may be overriden by downstreams in order to maintain +stable behavior across releases. + +Jsonschema key deprecations and inline logger deprecations include a +deprecated_version key. When the variable below is set to a version, +cloud-init will use that version as a demarcation point. Deprecations which +are added after this version will be logged as at an INFO level. Deprecations +which predate this version will be logged at the higher DEPRECATED level. +Downstreams that want stable log behavior may set the variable below to the +first version released in their stable distro. By doing this, they can expect +that newly added deprecations will be logged at INFO level. The implication of +the different log levels is that logs at DEPRECATED level result in a return +code of 2 from `cloud-init status`. + +format: + + :: = | + ::= "devel" + ::= "." ["." ] + +where , , and are positive integers +""" + def get_features() -> Dict[str, bool]: """Return a dict of applicable features/overrides and their values.""" diff --git a/.pc/expire-on-hashed-users.patch/tests/unittests/config/test_cc_set_passwords.py b/.pc/expire-on-hashed-users.patch/tests/unittests/config/test_cc_set_passwords.py index ef34a8c60..d37faedd4 100644 --- a/.pc/expire-on-hashed-users.patch/tests/unittests/config/test_cc_set_passwords.py +++ b/.pc/expire-on-hashed-users.patch/tests/unittests/config/test_cc_set_passwords.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy import logging from unittest import mock @@ -508,6 +509,7 @@ def test_chpasswd_parity(self, list_def, users_def): class TestExpire: @pytest.mark.parametrize("cfg", expire_cases) def test_expire(self, cfg, mocker, caplog): + cfg = copy.deepcopy(cfg) cloud = get_cloud() mocker.patch(f"{MODPATH}subp.subp") mocker.patch.object(cloud.distro, "chpasswd") @@ -533,7 +535,9 @@ def test_expire(self, cfg, mocker, caplog): def test_expire_old_behavior(self, cfg, mocker, caplog): # Previously expire didn't apply to hashed passwords. # Ensure we can preserve that case on older releases - features.EXPIRE_APPLIES_TO_HASHED_USERS = False + mocker.patch.object(features, "EXPIRE_APPLIES_TO_HASHED_USERS", False) + + cfg = copy.deepcopy(cfg) cloud = get_cloud() mocker.patch(f"{MODPATH}subp.subp") mocker.patch.object(cloud.distro, "chpasswd") diff --git a/.pc/keep-dhclient-as-priority-client.patch/config/cloud.cfg.tmpl b/.pc/keep-dhclient-as-priority-client.patch/config/cloud.cfg.tmpl index 00e3e5dc0..61346f474 100644 --- a/.pc/keep-dhclient-as-priority-client.patch/config/cloud.cfg.tmpl +++ b/.pc/keep-dhclient-as-priority-client.patch/config/cloud.cfg.tmpl @@ -4,14 +4,15 @@ {% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %} {% set is_rhel = variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "miraclelinux", "rhel", "rocky", "virtuozzo"] %} -{% set gecos = ({"amazon": "EC2 Default User", "centos": "Cloud User", - "debian": "Debian", "dragonfly": "DragonFly", - "freebsd": "FreeBSD", "mariner": "MarinerOS", - "rhel": "Cloud User", "netbsd": "NetBSD", - "openbsd": "openBSD", "openmandriva": "OpenMandriva admin", - "photon": "PhotonOS", "ubuntu": "Ubuntu", - "unknown": "Ubuntu"}) %} +{% set gecos = ({"amazon": "EC2 Default User", "azurelinux": "Azure Linux", + "centos": "Cloud User", "debian": "Debian", + "dragonfly": "DragonFly", "freebsd": "FreeBSD", + "mariner": "MarinerOS", "rhel": "Cloud User", + "netbsd": "NetBSD", "openbsd": "openBSD", + "openmandriva": "OpenMandriva admin", "photon": "PhotonOS", + "ubuntu": "Ubuntu", "unknown": "Ubuntu"}) %} {% set groups = ({"alpine": "adm, wheel", "arch": "wheel, users", + "azurelinux": "wheel", "debian": "adm, audio, cdrom, dialout, dip, floppy, netdev, plugdev, sudo, video", "gentoo": "users, wheel", "mariner": "wheel", "photon": "wheel", @@ -61,7 +62,7 @@ disable_root: true "openmandriva", "photon", "TencentOS"] or is_rhel %} {% if is_rhel %} -mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2'] +mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.after=cloud-init.service,_netdev', '0', '2'] {% else %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% endif %} @@ -136,6 +137,7 @@ cloud_init_modules: - rsyslog - users_groups - ssh + - set_passwords # The modules that run in the 'config' stage cloud_config_modules: @@ -155,7 +157,6 @@ cloud_config_modules: {% endif %} - locale {% endif %} - - set_passwords {% if variant == "alpine" %} - apk_configure {% elif variant in ["debian", "ubuntu", "unknown"] %} @@ -165,8 +166,8 @@ cloud_config_modules: {% if variant == "ubuntu" %} - ubuntu_pro {% endif %} -{% elif variant in ["fedora", "mariner", "openeuler", "openmandriva", - "photon"] or is_rhel %} +{% elif variant in ["azurelinux", "fedora", "mariner", "openeuler", + "openmandriva", "photon"] or is_rhel %} {% if is_rhel %} - rh_subscription {% endif %} @@ -219,10 +220,10 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "freebsd", - "gentoo", "mariner", "netbsd", "openbsd", "OpenCloudOS", - "openeuler", "openmandriva", "photon", "suse", "TencentOS", - "ubuntu"] or is_rhel %} +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "freebsd", "gentoo", "mariner", "netbsd", "openbsd", + "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", + "TencentOS", "ubuntu"] or is_rhel %} distro: {{ variant }} {% elif variant == "dragonfly" %} distro: dragonflybsd @@ -237,9 +238,10 @@ system_info: {% else %} name: {{ variant }} {% endif %} -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS", "ubuntu", "unknown"] +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS", "ubuntu", + "unknown"] or is_bsd or is_rhel %} lock_passwd: True {% endif %} @@ -292,7 +294,7 @@ system_info: {% elif variant in ["freebsd", "netbsd", "openbsd"] %} network: renderers: ['{{ variant }}'] -{% elif variant in ["mariner", "photon"] %} +{% elif variant in ["azurelinux", "mariner", "photon"] %} network: renderers: ['networkd'] {% elif variant == "openmandriva" %} @@ -306,7 +308,7 @@ system_info: activators: ['netplan', 'eni', 'network-manager', 'networkd'] {% elif is_rhel %} network: - renderers: ['sysconfig', 'eni', 'netplan', 'network-manager', 'networkd'] + renderers: ['eni', 'netplan', 'network-manager', 'sysconfig', 'networkd'] {% endif %} {% if variant == "photon" %} # If set to true, cloud-init will not use fallback network config. @@ -318,9 +320,10 @@ system_info: # Automatically discover the best ntp_client ntp_client: auto {% endif %} -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS", "ubuntu", "unknown"] +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS", "ubuntu", + "unknown"] or is_rhel %} # Other config here will be given to the distro class and/or path classes paths: @@ -328,7 +331,7 @@ system_info: templates_dir: /etc/cloud/templates/ {% elif is_bsd %} paths: - run_dir: /var/run/ + run_dir: /var/run/cloud-init/ {% endif %} {% if variant == "debian" %} package_mirrors: @@ -365,8 +368,9 @@ system_info: {% endif %} {% if variant in ["debian", "ubuntu", "unknown"] %} ssh_svcname: ssh -{% elif variant in ["alpine", "amazon", "arch", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS"] or is_rhel %} +{% elif variant in ["alpine", "amazon", "arch", "azurelinux", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS"] + or is_rhel %} ssh_svcname: sshd {% endif %} diff --git a/.pc/netplan99-cannot-use-default.patch/cloudinit/net/netplan.py b/.pc/netplan99-cannot-use-default.patch/cloudinit/net/netplan.py index 9e36fe16a..532442dcb 100644 --- a/.pc/netplan99-cannot-use-default.patch/cloudinit/net/netplan.py +++ b/.pc/netplan99-cannot-use-default.patch/cloudinit/net/netplan.py @@ -2,11 +2,11 @@ import copy import io -import ipaddress import logging import os import textwrap -from typing import Optional, cast +from tempfile import SpooledTemporaryFile +from typing import Callable, List, Optional from cloudinit import features, safeyaml, subp, util from cloudinit.net import ( @@ -14,6 +14,7 @@ SYS_CLASS_NET, get_devicelist, renderer, + should_add_gateway_onlink_flag, subnet_is_ipv6, ) from cloudinit.net.network_state import NET_CONFIG_TO_V2, NetworkState @@ -47,7 +48,7 @@ def _get_params_dict_by_match(config, match): ) -def _extract_addresses(config: dict, entry: dict, ifname, features=None): +def _extract_addresses(config: dict, entry: dict, ifname, features: Callable): """This method parse a cloudinit.net.network_state dictionary (config) and maps netstate keys/values into a dictionary (entry) to represent netplan yaml. (config v1 -> netplan) @@ -97,8 +98,6 @@ def _listify(obj, token=" "): obj, ] - if features is None: - features = [] addresses = [] routes = [] nameservers = [] @@ -123,28 +122,17 @@ def _listify(obj, token=" "): "via": subnet.get("gateway"), "to": "default", } - try: - subnet_gateway = ipaddress.ip_address(subnet["gateway"]) - subnet_network = ipaddress.ip_network(addr, strict=False) - # If the gateway is not contained within the subnet's - # network, mark it as on-link so that it can still be - # reached. - if subnet_gateway not in subnet_network: - LOG.debug( - "Gateway %s is not contained within subnet %s," - " adding on-link flag", - subnet["gateway"], - addr, - ) - new_route["on-link"] = True - except ValueError as e: - LOG.warning( - "Failed to check whether gateway %s" - " is contained within subnet %s: %s", + # If the gateway is not contained within the subnet's + # network, mark it as on-link so that it can still be + # reached. + if should_add_gateway_onlink_flag(subnet["gateway"], addr): + LOG.debug( + "Gateway %s is not contained within subnet %s," + " adding on-link flag", subnet["gateway"], addr, - e, ) + new_route["on-link"] = True routes.append(new_route) if "dns_nameservers" in subnet: nameservers += _listify(subnet.get("dns_nameservers", [])) @@ -152,7 +140,7 @@ def _listify(obj, token=" "): searchdomains += _listify(subnet.get("dns_search", [])) if "mtu" in subnet: mtukey = "mtu" - if subnet_is_ipv6(subnet) and "ipv6-mtu" in features: + if subnet_is_ipv6(subnet) and "ipv6-mtu" in features(): mtukey = "ipv6-mtu" entry.update({mtukey: subnet.get("mtu")}) for route in subnet.get("routes", []): @@ -235,6 +223,79 @@ def _clean_default(target=None): os.unlink(f) +def netplan_api_write_yaml_file(net_config_content: str) -> bool: + """Use netplan.State._write_yaml_file to write netplan config + + Where netplan python API exists, prefer to use of the private + _write_yaml_file to ensure proper permissions and file locations + are chosen by the netplan python bindings in the environment. + + By calling the netplan API, allow netplan versions to change behavior + related to file permissions and treatment of sensitive configuration + under the API call to _write_yaml_file. + + In future netplan releases, security-sensitive config may be written to + separate file or directory paths than world-readable configuration parts. + """ + try: + from netplan.parser import Parser # type: ignore + from netplan.state import State # type: ignore + except ImportError: + LOG.debug( + "No netplan python module. Fallback to write %s", + CLOUDINIT_NETPLAN_FILE, + ) + return False + try: + with SpooledTemporaryFile(mode="w") as f: + f.write(net_config_content) + f.flush() + f.seek(0, io.SEEK_SET) + parser = Parser() + parser.load_yaml(f) + state_output_file = State() + state_output_file.import_parser_results(parser) + + # Write our desired basename 50-cloud-init.yaml, allow netplan to + # determine default root-dir /etc/netplan and/or specialized + # filenames or read permissions based on whether this config + # contains secrets. + state_output_file._write_yaml_file( + os.path.basename(CLOUDINIT_NETPLAN_FILE) + ) + except Exception as e: + LOG.warning( + "Unable to render network config using netplan python module." + " Fallback to write %s. %s", + CLOUDINIT_NETPLAN_FILE, + e, + ) + return False + LOG.debug("Rendered netplan config using netplan python API") + return True + + +def has_netplan_config_changed(cfg_file: str, content: str) -> bool: + """Return True when new netplan config has changed vs previous.""" + if not os.path.exists(cfg_file): + # This is our first write of netplan's cfg_file, representing change. + return True + # Check prev cfg vs current cfg. Ignore comments + prior_cfg = util.load_yaml(util.load_text_file(cfg_file)) + return prior_cfg != util.load_yaml(content) + + +def fallback_write_netplan_yaml(cfg_file: str, content: str): + """Write netplan config to cfg_file because python API was unavailable.""" + mode = 0o600 if features.NETPLAN_CONFIG_ROOT_READ_ONLY else 0o644 + if os.path.exists(cfg_file): + current_mode = util.get_permissions(cfg_file) + if current_mode & mode == current_mode: + # preserve mode if existing perms are more strict + mode = current_mode + util.write_file(cfg_file, content, mode=mode) + + class Renderer(renderer.Renderer): """Renders network information in a /etc/netplan/network.yaml format.""" @@ -248,11 +309,10 @@ def __init__(self, config=None): self.netplan_header = config.get("netplan_header", None) self._postcmds = config.get("postcmds", False) self.clean_default = config.get("clean_default", True) - self._features = config.get("features", None) + self._features = config.get("features") or [] - @property - def features(self): - if self._features is None: + def features(self) -> List[str]: + if not self._features: try: info_blob, _err = subp.subp(self.NETPLAN_INFO, capture=True) info = util.load_yaml(info_blob) @@ -287,33 +347,22 @@ def render_network_state( header += "\n" content = header + content - # determine if existing config files have the same content - same_content = False - if os.path.exists(fpnplan): - hashed_content = util.hash_buffer(io.BytesIO(content.encode())) - with open(fpnplan, "rb") as f: - hashed_original_content = util.hash_buffer(f) - if hashed_content == hashed_original_content: - same_content = True - - mode = 0o600 if features.NETPLAN_CONFIG_ROOT_READ_ONLY else 0o644 - if not same_content and os.path.exists(fpnplan): - current_mode = util.get_permissions(fpnplan) - if current_mode & mode == current_mode: - # preserve mode if existing perms are more strict than default - mode = current_mode - util.write_file(fpnplan, content, mode=mode) + netplan_config_changed = has_netplan_config_changed(fpnplan, content) + if not netplan_api_write_yaml_file(content): + fallback_write_netplan_yaml(fpnplan, content) if self.clean_default: _clean_default(target=target) - self._netplan_generate(run=self._postcmds, same_content=same_content) + self._netplan_generate( + run=self._postcmds, config_changed=netplan_config_changed + ) self._net_setup_link(run=self._postcmds) - def _netplan_generate(self, run: bool = False, same_content: bool = False): + def _netplan_generate(self, run: bool, config_changed: bool): if not run: - LOG.debug("netplan generate postcmd disabled") + LOG.debug("netplan generate postcmds disabled") return - if same_content: + if not config_changed: LOG.debug( "skipping call to `netplan generate`." " reason: identical netplan config" @@ -329,6 +378,9 @@ def _net_setup_link(self, run=False): if not run: LOG.debug("netplan net_setup_link postcmd disabled") return + elif "net.ifnames=0" in util.get_cmdline(): + LOG.debug("Predictable interface names disabled.") + return setup_lnk = ["udevadm", "test-builtin", "net_setup_link"] # It's possible we can race a udev rename and attempt to run @@ -353,7 +405,6 @@ def _net_setup_link(self, run=False): ) from last_exception def _render_content(self, network_state: NetworkState) -> str: - # if content already in netplan format, pass it back if network_state.version == 2: LOG.debug("V2 to V2 passthrough") @@ -403,13 +454,10 @@ def _render_content(self, network_state: NetworkState) -> str: bond_config = {} # extract bond params and drop the bond_ prefix as it's # redundant in v2 yaml format - v2_bond_map = cast(dict, NET_CONFIG_TO_V2.get("bond")) - # Previous cast is needed to help mypy to know that the key is - # present in `NET_CONFIG_TO_V2`. This could probably be removed - # by using `Literal` when supported. + v2_bond_map = NET_CONFIG_TO_V2["bond"] for match in ["bond_", "bond-"]: bond_params = _get_params_dict_by_match(ifcfg, match) - for (param, value) in bond_params.items(): + for param, value in bond_params.items(): newname = v2_bond_map.get(param.replace("_", "-")) if newname is None: continue @@ -427,9 +475,18 @@ def _render_content(self, network_state: NetworkState) -> str: elif if_type == "bridge": # required_keys = ['name', 'bridge_ports'] + # + # Rather than raise an exception on `sorted(None)`, log a + # warning and skip this interface when invalid configuration is + # received. bridge_ports = ifcfg.get("bridge_ports") - # mypy wrong error. `copy(None)` is supported: - ports = sorted(copy.copy(bridge_ports)) # type: ignore + if bridge_ports is None: + LOG.warning( + "Invalid config. The key", + f"'bridge_ports' is required in {config}.", + ) + continue + ports = sorted(copy.copy(bridge_ports)) bridge: dict = { "interfaces": ports, } @@ -441,11 +498,8 @@ def _render_content(self, network_state: NetworkState) -> str: # v2 yaml uses different names for the keys # and at least one value format change - v2_bridge_map = cast(dict, NET_CONFIG_TO_V2.get("bridge")) - # Previous cast is needed to help mypy to know that the key is - # present in `NET_CONFIG_TO_V2`. This could probably be removed - # by using `Literal` when supported. - for (param, value) in params.items(): + v2_bridge_map = NET_CONFIG_TO_V2["bridge"] + for param, value in params.items(): newname = v2_bridge_map.get(param) if newname is None: continue diff --git a/.pc/netplan99-cannot-use-default.patch/tests/unittests/distros/test_netconfig.py b/.pc/netplan99-cannot-use-default.patch/tests/unittests/distros/test_netconfig.py index 358f60dd0..b9cab17f6 100644 --- a/.pc/netplan99-cannot-use-default.patch/tests/unittests/distros/test_netconfig.py +++ b/.pc/netplan99-cannot-use-default.patch/tests/unittests/distros/test_netconfig.py @@ -7,15 +7,9 @@ from textwrap import dedent from unittest import mock -from cloudinit import ( - distros, - features, - helpers, - safeyaml, - settings, - subp, - util, -) +import yaml + +from cloudinit import distros, features, helpers, settings, subp, util from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit.net.activators import IfUpDownActivator from tests.unittests.helpers import ( @@ -303,7 +297,7 @@ def setUp(self): def _get_distro(self, dname, renderers=None, activators=None): cls = distros.fetch(dname) - cfg = settings.CFG_BUILTIN + cfg = copy.deepcopy(settings.CFG_BUILTIN) cfg["system_info"]["distro"] = dname system_info_network_cfg = {} if renderers: @@ -735,7 +729,6 @@ def test_apply_network_config_rh(self): GATEWAY=192.168.1.254 IPADDR=192.168.1.5 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -745,7 +738,6 @@ def test_apply_network_config_rh(self): """\ BOOTPROTO=dhcp DEVICE=eth1 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -776,7 +768,6 @@ def test_apply_network_config_ipv6_rh(self): IPV6_AUTOCONF=no IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -786,7 +777,6 @@ def test_apply_network_config_ipv6_rh(self): """\ BOOTPROTO=dhcp DEVICE=eth1 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -833,7 +823,6 @@ def test_vlan_render_unsupported(self): HWADDR=00:16:3e:60:7c:df IPADDR=192.10.1.2 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -845,7 +834,6 @@ def test_vlan_render_unsupported(self): DEVICE=infra0 IPADDR=10.0.1.2 NETMASK=255.255.0.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 USERCTL=no @@ -881,7 +869,6 @@ def test_vlan_render(self): DEVICE=eth0 IPADDR=192.10.1.2 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -893,7 +880,6 @@ def test_vlan_render(self): DEVICE=eth0.1001 IPADDR=10.0.1.2 NETMASK=255.255.0.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 USERCTL=no @@ -1172,7 +1158,7 @@ def test_photon_network_config_v1_with_duplicates(self): [Address] Address=192.168.0.102/24""" - net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) expected = self.create_conf_dict(expected.splitlines()) expected_cfgs = { @@ -1297,7 +1283,132 @@ def test_mariner_network_config_v1_with_duplicates(self): [Address] Address=192.168.0.102/24""" - net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) + + expected = self.create_conf_dict(expected.splitlines()) + expected_cfgs = { + self.nwk_file_path("eth0"): expected, + } + + self._apply_and_verify( + self.distro.apply_network_config, net_cfg, expected_cfgs.copy() + ) + + +class TestNetCfgDistroAzureLinux(TestNetCfgDistroBase): + def setUp(self): + super().setUp() + self.distro = self._get_distro("azurelinux", renderers=["networkd"]) + + def create_conf_dict(self, contents): + content_dict = {} + for line in contents: + if line: + line = line.strip() + if line and re.search(r"^\[(.+)\]$", line): + content_dict[line] = [] + key = line + elif line: + assert key + content_dict[key].append(line) + + return content_dict + + def compare_dicts(self, actual, expected): + for k, v in actual.items(): + self.assertEqual(sorted(expected[k]), sorted(v)) + + def _apply_and_verify( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): + if not expected_cfgs: + raise ValueError("expected_cfg must not be None") + + tmpd = None + with mock.patch("cloudinit.net.networkd.available") as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + actual = self.create_conf_dict(results[cfgpath].splitlines()) + self.compare_dicts(actual, expected) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def nwk_file_path(self, ifname): + return "/etc/systemd/network/10-cloud-init-%s.network" % ifname + + def net_cfg_1(self, ifname): + ret = ( + """\ + [Match] + Name=%s + [Network] + DHCP=no + [Address] + Address=192.168.1.5/24 + [Route] + Gateway=192.168.1.254""" + % ifname + ) + return ret + + def net_cfg_2(self, ifname): + ret = ( + """\ + [Match] + Name=%s + [Network] + DHCP=ipv4""" + % ifname + ) + return ret + + def test_azurelinux_network_config_v1(self): + tmp = self.net_cfg_1("eth0").splitlines() + expected_eth0 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2("eth1").splitlines() + expected_eth1 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path("eth0"): expected_eth0, + self.nwk_file_path("eth1"): expected_eth1, + } + + self._apply_and_verify( + self.distro.apply_network_config, V1_NET_CFG, expected_cfgs.copy() + ) + + def test_azurelinux_network_config_v2(self): + tmp = self.net_cfg_1("eth7").splitlines() + expected_eth7 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2("eth9").splitlines() + expected_eth9 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path("eth7"): expected_eth7, + self.nwk_file_path("eth9"): expected_eth9, + } + + self._apply_and_verify( + self.distro.apply_network_config, V2_NET_CFG, expected_cfgs.copy() + ) + + def test_azurelinux_network_config_v1_with_duplicates(self): + expected = """\ + [Match] + Name=eth0 + [Network] + DHCP=no + DNS=1.2.3.4 + Domains=test.com + [Address] + Address=192.168.0.102/24""" + + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) expected = self.create_conf_dict(expected.splitlines()) expected_cfgs = { diff --git a/.pc/netplan99-cannot-use-default.patch/tests/unittests/net/network_configs.py b/.pc/netplan99-cannot-use-default.patch/tests/unittests/net/network_configs.py new file mode 100644 index 000000000..b68319cc8 --- /dev/null +++ b/.pc/netplan99-cannot-use-default.patch/tests/unittests/net/network_configs.py @@ -0,0 +1,4925 @@ +"""A (hopefully) temporary home for network config test data.""" + +import textwrap + +NETWORK_CONFIGS = { + "small_suse_dhcp6": { + "expected_sysconfig_opensuse": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=cf:d6:af:48:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + LLADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + STARTMODE=auto""" + ), + }, + "yaml_v1": textwrap.dedent( + """ + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth99 + mac_address: c0:d6:9f:2c:e8:80 + subnets: + - type: dhcp4 + - type: dhcp6 + - type: static + address: 192.168.21.3/24 + dns_nameservers: + - 8.8.8.8 + - 8.8.4.4 + dns_search: barley.maas sach.maas + routes: + - gateway: 65.61.151.37 + netmask: 0.0.0.0 + network: 0.0.0.0 + metric: 10000 + - type: physical + name: eth1 + mac_address: cf:d6:af:48:e8:80 + - type: nameserver + address: + - 1.2.3.4 + - 5.6.7.8 + search: + - wark.maas + """ + ), + "yaml_v2": textwrap.dedent( + """ + version: 2 + ethernets: + eth1: + match: + macaddress: cf:d6:af:48:e8:80 + set-name: eth1 + eth99: + dhcp4: true + dhcp6: true + addresses: + - 192.168.21.3/24 + match: + macaddress: c0:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 8.8.4.4 + search: + - barley.maas + - sach.maas + routes: + - metric: 10000 + to: 0.0.0.0/0 + via: 65.61.151.37 + set-name: eth99 + """ + ), + }, + "small_v1": { + "expected_networkd_eth99": textwrap.dedent( + """\ + [Match] + Name=eth99 + MACAddress=c0:d6:9f:2c:e8:80 + [Address] + Address=192.168.21.3/24 + [Network] + DHCP=ipv4 + Domains=barley.maas sach.maas + Domains=wark.maas + DNS=1.2.3.4 5.6.7.8 + DNS=8.8.8.8 8.8.4.4 + [Route] + Gateway=65.61.151.37 + Destination=0.0.0.0/0 + Metric=10000 + """ + ).rstrip(" "), + "expected_networkd_eth1": textwrap.dedent( + """\ + [Match] + Name=eth1 + MACAddress=cf:d6:af:48:e8:80 + [Network] + DHCP=no + Domains=wark.maas + DNS=1.2.3.4 5.6.7.8 + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + dns-nameservers 1.2.3.4 5.6.7.8 + dns-search wark.maas + + iface eth1 inet manual + + auto eth99 + iface eth99 inet dhcp + + # control-alias eth99 + iface eth99 inet static + address 192.168.21.3/24 + dns-nameservers 8.8.8.8 8.8.4.4 + dns-search barley.maas sach.maas + post-up route add default gw 65.61.151.37 metric 10000 || true + pre-down route del default gw 65.61.151.37 metric 10000 || true + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + eth1: + match: + macaddress: cf:d6:af:48:e8:80 + set-name: eth1 + eth99: + addresses: + - 192.168.21.3/24 + dhcp4: true + match: + macaddress: c0:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 8.8.4.4 + search: + - barley.maas + - sach.maas + routes: + - metric: 10000 + to: 0.0.0.0/0 + via: 65.61.151.37 + set-name: eth99 + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=cf:d6:af:48:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + LLADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + STARTMODE=auto""" + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=cf:d6:af:48:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEFROUTE=yes + DEVICE=eth99 + DHCLIENT_SET_DEFAULT_ROUTE=yes + DNS1=8.8.8.8 + DNS2=8.8.4.4 + DOMAIN="barley.maas sach.maas" + GATEWAY=65.61.151.37 + HWADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + METRIC=10000 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + }, + "expected_network_manager": { + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=CF:D6:AF:48:E8:80 + + """ + ), + "cloud-init-eth99.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth99 + uuid=b1b88000-1f03-5360-8377-1a2205efffb4 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:D6:9F:2C:E8:80 + + [ipv4] + method=auto + may-fail=false + address1=192.168.21.3/24 + route1=0.0.0.0/0,65.61.151.37 + dns=8.8.8.8;8.8.4.4; + dns-search=barley.maas;sach.maas; + + """ + ), + }, + "yaml": textwrap.dedent( + """ + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth99 + mac_address: c0:d6:9f:2c:e8:80 + subnets: + - type: dhcp4 + - type: static + address: 192.168.21.3/24 + dns_nameservers: + - 8.8.8.8 + - 8.8.4.4 + dns_search: barley.maas sach.maas + routes: + - gateway: 65.61.151.37 + netmask: 0.0.0.0 + network: 0.0.0.0 + metric: 10000 + - type: physical + name: eth1 + mac_address: cf:d6:af:48:e8:80 + - type: nameserver + address: + - 1.2.3.4 + - 5.6.7.8 + search: + - wark.maas + """ + ), + }, + # We test a separate set of configs here because v2 doesn't support + # generic nameservers, so that aspect needs to be modified + "small_v2": { + "expected_networkd_eth99": textwrap.dedent( + """\ + [Match] + Name=eth99 + MACAddress=c0:d6:9f:2c:e8:80 + [Address] + Address=192.168.21.3/24 + [Network] + DHCP=ipv4 + Domains=barley.maas sach.maas + DNS=8.8.8.8 8.8.4.4 + [Route] + Gateway=65.61.151.37 + Destination=0.0.0.0/0 + Metric=10000 + """ + ).rstrip(" "), + "expected_networkd_eth1": textwrap.dedent( + """\ + [Match] + Name=eth1 + MACAddress=cf:d6:af:48:e8:80 + [Network] + DHCP=no + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + iface eth1 inet manual + + auto eth99 + iface eth99 inet dhcp + + # control-alias eth99 + iface eth99 inet static + address 192.168.21.3/24 + dns-nameservers 8.8.8.8 8.8.4.4 + dns-search barley.maas sach.maas + post-up route add default gw 65.61.151.37 metric 10000 || true + pre-down route del default gw 65.61.151.37 metric 10000 || true + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=cf:d6:af:48:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + LLADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + STARTMODE=auto""" + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=cf:d6:af:48:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEFROUTE=yes + DEVICE=eth99 + DHCLIENT_SET_DEFAULT_ROUTE=yes + DNS1=8.8.8.8 + DNS2=8.8.4.4 + DOMAIN="barley.maas sach.maas" + GATEWAY=65.61.151.37 + HWADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + METRIC=10000 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + }, + "expected_network_manager": { + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=CF:D6:AF:48:E8:80 + + """ + ), + "cloud-init-eth99.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth99 + uuid=b1b88000-1f03-5360-8377-1a2205efffb4 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:D6:9F:2C:E8:80 + + [ipv4] + method=auto + may-fail=false + route1=0.0.0.0/0,65.61.151.37 + address1=192.168.21.3/24 + dns=8.8.8.8;8.8.4.4; + dns-search=barley.maas;sach.maas; + + """ + ), + }, + "yaml": textwrap.dedent( + """ + version: 2 + ethernets: + eth1: + match: + macaddress: cf:d6:af:48:e8:80 + set-name: eth1 + eth99: + addresses: + - 192.168.21.3/24 + dhcp4: true + match: + macaddress: c0:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 8.8.4.4 + search: + - barley.maas + - sach.maas + routes: + - metric: 10000 + to: 0.0.0.0/0 + via: 65.61.151.37 + set-name: eth99 + """ + ), + }, + "v4_and_v6": { + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Network] + DHCP=yes + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + + # control-alias iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + dhcp4: true + dhcp6: true + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + STARTMODE=auto""" + ) + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=true + + [ipv6] + method=auto + may-fail=true + + """ + ), + }, + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp4'} + - {'type': 'dhcp6'} + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp4: true + dhcp6: true + """ + ), + }, + "v1_ipv4_and_ipv6_static": { + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Link] + MTUBytes=8999 + [Network] + DHCP=no + [Address] + Address=192.168.14.2/24 + [Address] + Address=2001:1::1/64 + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet static + address 192.168.14.2/24 + mtu 9000 + + # control-alias iface0 + iface iface0 inet6 static + address 2001:1::1/64 + mtu 1500 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + ipv6-mtu: 1500 + mtu: 9000 + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + mtu: 8999 + subnets: + - type: static + address: 192.168.14.2/24 + mtu: 9000 + - type: static + address: 2001:1::1/64 + mtu: 1500 + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + NETMASK=255.255.255.0 + STARTMODE=auto + MTU=9000 + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + IPADDR=192.168.14.2 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.255.0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + MTU=9000 + IPV6_MTU=1500 + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + + [ipv4] + method=manual + may-fail=false + address1=192.168.14.2/24 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::1/64 + + """ + ), + }, + }, + "v2_ipv4_and_ipv6_static": { + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + mtu: 9000 + """ + ).rstrip(" "), + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Link] + MTUBytes=9000 + [Network] + DHCP=no + [Address] + Address=192.168.14.2/24 + [Address] + Address=2001:1::1/64 + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet static + address 192.168.14.2/24 + mtu 9000 + + # control-alias iface0 + iface iface0 inet6 static + address 2001:1::1/64 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + mtu: 9000 + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + NETMASK=255.255.255.0 + STARTMODE=auto + MTU=9000 + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + IPADDR=192.168.14.2 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.255.0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + MTU=9000 + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + + [ipv4] + method=manual + may-fail=false + address1=192.168.14.2/24 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::1/64 + + """ + ), + }, + }, + "v6_and_v4": { + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + STARTMODE=auto""" + ) + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv6] + method=auto + may-fail=true + + [ipv4] + method=auto + may-fail=true + + """ + ), + }, + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - type: dhcp6 + - type: dhcp4 + """ + ).rstrip(" "), + # Do not include a yaml_v2 here as it renders exactly the same as + # the v4_and_v6 case, and that's fine + }, + "dhcpv6_only": { + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp6: true + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv6] + method=auto + may-fail=false + + """ + ), + }, + }, + "dhcpv6_accept_ra": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 1 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: true + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: true + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + IPv6AcceptRA=True + """ + ).rstrip(" "), + }, + "dhcpv6_reject_ra": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 0 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + accept-ra: false + dhcp6: true + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: false + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: false + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=no + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + IPv6AcceptRA=False + """ + ).rstrip(" "), + }, + "ipv6_slaac": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 0 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_slaac'} + """ + ).rstrip(" "), + # A yaml_v2 doesn't make sense here as the configuration looks exactly + # the same as the dhcpv6_only test. + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=info + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv6] + method=auto + may-fail=false + + [ipv4] + method=disabled + + """ + ), + }, + }, + "static6": { + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + accept-ra: 'no' + subnets: + - type: 'static6' + address: 2001:1::1/64 + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + accept-ra: false + addresses: + - 2001:1::1/64 + """ + ), + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + }, + "dhcpv6_stateless": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 1 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateless'} + """ + ).rstrip(" "), + # yaml_v2 makes no sense here as it would be the exact same + # configuration as the dhcpv6_only test + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=info + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + DHCPV6C_OPTIONS=-S + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv6] + method=auto + may-fail=false + + [ipv4] + method=disabled + + """ + ), + }, + }, + "dhcpv6_stateful": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateful'} + accept-ra: true + """ + ).rstrip(" "), + # yaml_v2 makes no sense here as it would be the exact same + # configuration as the dhcpv6_only test + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FAILURE_FATAL=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + }, + "wakeonlan_disabled": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + ethernets: + iface0: + dhcp4: true + wakeonlan: false + version: 2 + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=false + + """ + ), + }, + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp4: true + wakeonlan: false + """ + ).rstrip(" "), + }, + "wakeonlan_enabled": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + ethernet-wol g + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + ethernets: + iface0: + dhcp4: true + wakeonlan: true + version: 2 + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + ETHTOOL_OPTS="wol g" + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=iface0 + ETHTOOL_OPTS="wol g" + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + wake-on-lan=64 + + [ipv4] + method=auto + may-fail=false + + """ + ), + }, + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp4: true + wakeonlan: true + """ + ).rstrip(" "), + }, + "large_v1": { + "expected_eni": """\ +auto lo +iface lo inet loopback + dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 + dns-search barley.maas wark.maas foobar.maas + +iface eth0 inet manual + +auto eth1 +iface eth1 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto eth2 +iface eth2 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +iface eth3 inet manual + +iface eth4 inet manual + +# control-manual eth5 +iface eth5 inet dhcp + +auto ib0 +iface ib0 inet static + address 192.168.200.7/24 + mtu 9000 + hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + +auto bond0 +iface bond0 inet6 dhcp + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:ee:ff + +auto br0 +iface br0 inet static + address 192.168.14.2/24 + bridge_ageing 250 + bridge_bridgeprio 22 + bridge_fd 1 + bridge_gcint 2 + bridge_hello 1 + bridge_maxage 10 + bridge_pathcost eth3 50 + bridge_pathcost eth4 75 + bridge_portprio eth3 28 + bridge_portprio eth4 14 + bridge_ports eth3 eth4 + bridge_stp off + bridge_waitport 1 eth3 + bridge_waitport 2 eth4 + hwaddress bb:bb:bb:bb:bb:aa + +# control-alias br0 +iface br0 inet6 static + address 2001:1::1/64 + post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true + pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true + +auto bond0.200 +iface bond0.200 inet dhcp + vlan-raw-device bond0 + vlan_id 200 + +auto eth0.101 +iface eth0.101 inet static + address 192.168.0.2/24 + dns-nameservers 192.168.0.10 10.23.23.134 + dns-search barley.maas sacchromyces.maas brettanomyces.maas + gateway 192.168.0.1 + mtu 1500 + hwaddress aa:bb:cc:dd:ee:11 + vlan-raw-device eth0 + vlan_id 101 + +# control-alias eth0.101 +iface eth0.101 inet static + address 192.168.2.10/24 + +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +""", + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + eth0: + match: + macaddress: c0:d6:9f:2c:e8:80 + set-name: eth0 + eth1: + match: + macaddress: aa:d6:9f:2c:e8:80 + set-name: eth1 + eth2: + match: + macaddress: c0:bb:9f:2c:e8:80 + set-name: eth2 + eth3: + match: + macaddress: 66:bb:9f:2c:e8:80 + set-name: eth3 + eth4: + match: + macaddress: 98:bb:9f:2c:e8:80 + set-name: eth4 + eth5: + dhcp4: true + match: + macaddress: 98:bb:9f:2c:e8:8a + set-name: eth5 + bonds: + bond0: + dhcp6: true + interfaces: + - eth1 + - eth2 + macaddress: aa:bb:cc:dd:ee:ff + parameters: + mii-monitor-interval: 100 + mode: active-backup + transmit-hash-policy: layer3+4 + bridges: + br0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + interfaces: + - eth3 + - eth4 + macaddress: bb:bb:bb:bb:bb:aa + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + parameters: + ageing-time: 250 + forward-delay: 1 + hello-time: 1 + max-age: 10 + path-cost: + eth3: 50 + eth4: 75 + port-priority: + eth3: 28 + eth4: 14 + priority: 22 + stp: false + routes: + - to: ::/0 + via: 2001:4800:78ff:1b::1 + vlans: + bond0.200: + dhcp4: true + id: 200 + link: bond0 + eth0.101: + addresses: + - 192.168.0.2/24 + - 192.168.2.10/24 + id: 101 + link: eth0 + macaddress: aa:bb:cc:dd:ee:11 + mtu: 1500 + nameservers: + addresses: + - 192.168.0.10 + - 10.23.23.134 + search: + - barley.maas + - sacchromyces.maas + - brettanomyces.maas + routes: + - to: default + via: 192.168.0.1 + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE_0=eth1 + BONDING_SLAVE_1=eth2 + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + LLADDR=aa:bb:cc:dd:ee:ff + STARTMODE=auto""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + ETHERDEVICE=bond0 + STARTMODE=auto + VLAN_ID=200""" + ), + "ifcfg-br0": textwrap.dedent( + """\ + BRIDGE_AGEINGTIME=250 + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + LLADDRESS=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + BRIDGE_PRIORITY=22 + BRIDGE_PORTS='eth3 eth4' + STARTMODE=auto + BRIDGE_STP=off""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=c0:d6:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ETHERDEVICE=eth0 + STARTMODE=auto + VLAN_ID=101""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:d6:9f:2c:e8:80 + STARTMODE=hotplug""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=c0:bb:9f:2c:e8:80 + STARTMODE=hotplug""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=66:bb:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=98:bb:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + LLADDR=98:bb:9f:2c:e8:8a + STARTMODE=manual""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + STARTMODE=auto + TYPE=InfiniBand""" + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE0=eth1 + BONDING_SLAVE1=eth2 + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes + TYPE=Bond + USERCTL=no""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no + ONBOOT=yes + PHYSDEV=bond0 + USERCTL=no + VLAN=yes""" + ), + "ifcfg-br0": textwrap.dedent( + """\ + AGEING=250 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=br0 + IPADDR=192.168.14.2 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + MACADDR=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + ONBOOT=yes + PRIO=22 + STP=no + TYPE=Bridge + USERCTL=no""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=c0:d6:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0.101 + DNS1=192.168.0.10 + DNS2=10.23.23.134 + DOMAIN="barley.maas sacchromyces.maas brettanomyces.maas" + GATEWAY=192.168.0.1 + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + PHYSDEV=eth0 + USERCTL=no + VLAN=yes""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=aa:d6:9f:2c:e8:80 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth2 + HWADDR=c0:bb:9f:2c:e8:80 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth3 + HWADDR=66:bb:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth4 + HWADDR=98:bb:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no + HWADDR=98:bb:9f:2c:e8:8a + ONBOOT=no + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=ib0 + HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + ONBOOT=yes + TYPE=InfiniBand + USERCTL=no""" + ), + }, + "expected_network_manager": { + "cloud-init-eth3.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth3 + uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=66:BB:9F:2C:E8:80 + + """ + ), + "cloud-init-eth5.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth5 + uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=98:BB:9F:2C:E8:8A + + [ipv4] + method=auto + may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-ib0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ib0 + uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b + autoconnect-priority=120 + type=infiniband + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [infiniband] + transport-mode=datagram + mtu=9000 + mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1 + + [ipv4] + method=manual + may-fail=false + address1=192.168.200.7/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-bond0.200.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0.200 + uuid=88984a9c-ff22-5233-9267-86315e0acaa7 + autoconnect-priority=120 + type=vlan + interface-name=bond0.200 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=200 + parent=54317911-f840-516b-a10d-82cb4c1f075c + + [ipv4] + method=auto + may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:D6:9F:2C:E8:80 + + """ + ), + "cloud-init-eth4.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth4 + uuid=e27e4959-fb50-5580-b9a4-2073554627b9 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=98:BB:9F:2C:E8:80 + + """ + ), + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:D6:9F:2C:E8:80 + + """ + ), + "cloud-init-br0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init br0 + uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + autoconnect-priority=120 + type=bridge + interface-name=br0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bridge] + stp=false + priority=22 + mac-address=BB:BB:BB:BB:BB:AA + + [ipv4] + method=manual + may-fail=false + address1=192.168.14.2/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + [ipv6] + method=manual + may-fail=false + address1=2001:1::1/64 + route1=::/0,2001:4800:78ff:1b::1 + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth0.101.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0.101 + uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf + autoconnect-priority=120 + type=vlan + interface-name=eth0.101 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=101 + parent=1dd9a779-d327-56e1-8454-c65e2556c12c + + [ipv4] + method=manual + may-fail=false + address1=192.168.0.2/24 + gateway=192.168.0.1 + address2=192.168.2.10/24 + dns=192.168.0.10;10.23.23.134; + dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; + + """ + ), + "cloud-init-bond0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=active-backup + miimon=100 + xmit_hash_policy=layer3+4 + + [ipv6] + method=auto + may-fail=false + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth2.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth2 + uuid=5559a242-3421-5fdd-896e-9cb8313d5804 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:BB:9F:2C:E8:80 + + """ + ), + }, + "yaml": textwrap.dedent( + """ + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth0 + mac_address: c0:d6:9f:2c:e8:80 + - type: physical + name: eth1 + mac_address: aa:d6:9f:2c:e8:80 + - type: physical + name: eth2 + mac_address: c0:bb:9f:2c:e8:80 + - type: physical + name: eth3 + mac_address: 66:bb:9f:2c:e8:80 + - type: physical + name: eth4 + mac_address: 98:bb:9f:2c:e8:80 + # specify how ifupdown should treat iface + # control is one of ['auto', 'hotplug', 'manual'] + # with manual meaning ifup/ifdown should not affect the iface + # useful for things like iscsi root + dhcp + - type: physical + name: eth5 + mac_address: 98:bb:9f:2c:e8:8a + subnets: + - type: dhcp + control: manual + # VLAN interface. + - type: vlan + name: eth0.101 + vlan_link: eth0 + vlan_id: 101 + mac_address: aa:bb:cc:dd:ee:11 + mtu: 1500 + subnets: + - type: static + # When 'mtu' matches device-level mtu, no warnings + mtu: 1500 + address: 192.168.0.2/24 + gateway: 192.168.0.1 + dns_nameservers: + - 192.168.0.10 + - 10.23.23.134 + dns_search: + - barley.maas + - sacchromyces.maas + - brettanomyces.maas + - type: static + address: 192.168.2.10/24 + # Bond. + - type: bond + name: bond0 + # if 'mac_address' is omitted, the MAC is taken from + # the first slave. + mac_address: aa:bb:cc:dd:ee:ff + bond_interfaces: + - eth1 + - eth2 + params: + bond-mode: active-backup + bond_miimon: 100 + bond-xmit-hash-policy: "layer3+4" + subnets: + - type: dhcp6 + # A Bond VLAN. + - type: vlan + name: bond0.200 + vlan_link: bond0 + vlan_id: 200 + subnets: + - type: dhcp4 + # An infiniband + - type: infiniband + name: ib0 + mac_address: >- + a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + subnets: + - type: static + address: 192.168.200.7/24 + mtu: 9000 + # A bridge. + - type: bridge + name: br0 + bridge_interfaces: + - eth3 + - eth4 + ipv4_conf: + rp_filter: 1 + proxy_arp: 0 + forwarding: 1 + ipv6_conf: + autoconf: 1 + disable_ipv6: 1 + use_tempaddr: 1 + forwarding: 1 + # basically anything in /proc/sys/net/ipv6/conf/.../ + mac_address: bb:bb:bb:bb:bb:aa + params: + bridge_ageing: 250 + bridge_bridgeprio: 22 + bridge_fd: 1 + bridge_gcint: 2 + bridge_hello: 1 + bridge_maxage: 10 + bridge_maxwait: 0 + bridge_pathcost: + - eth3 50 + - eth4 75 + bridge_portprio: + - eth3 28 + - eth4 14 + bridge_stp: 'off' + bridge_waitport: + - 1 eth3 + - 2 eth4 + subnets: + - type: static + address: 192.168.14.2/24 + - type: static + address: 2001:1::1/64 # default to /64 + routes: + - gateway: 2001:4800:78ff:1b::1 + netmask: '::' + network: '::' + # A global nameserver. + - type: nameserver + address: 8.8.8.8 + search: barley.maas + # global nameservers and search in list form + - type: nameserver + address: + - 4.4.4.4 + - 8.8.4.4 + search: + - wark.maas + - foobar.maas + # A global route. + - type: route + destination: 10.0.0.0/8 + gateway: 11.0.0.1 + metric: 3 + """ + ).lstrip(), + }, + "large_v2": { + "expected_eni": """\ +auto lo +iface lo inet loopback + dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 + dns-search barley.maas wark.maas foobar.maas + +iface eth0 inet manual + +auto eth1 +iface eth1 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto eth2 +iface eth2 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +iface eth3 inet manual + +iface eth4 inet manual + +# control-manual eth5 +iface eth5 inet dhcp + +auto ib0 +iface ib0 inet static + address 192.168.200.7/24 + mtu 9000 + hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + +auto bond0 +iface bond0 inet6 dhcp + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:ee:ff + +auto br0 +iface br0 inet static + address 192.168.14.2/24 + bridge_ageing 250 + bridge_bridgeprio 22 + bridge_fd 1 + bridge_gcint 2 + bridge_hello 1 + bridge_maxage 10 + bridge_pathcost eth3 50 + bridge_pathcost eth4 75 + bridge_portprio eth3 28 + bridge_portprio eth4 14 + bridge_ports eth3 eth4 + bridge_stp off + bridge_waitport 1 eth3 + bridge_waitport 2 eth4 + hwaddress bb:bb:bb:bb:bb:aa + +# control-alias br0 +iface br0 inet6 static + address 2001:1::1/64 + post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true + pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true + +auto bond0.200 +iface bond0.200 inet dhcp + vlan-raw-device bond0 + vlan_id 200 + +auto eth0.101 +iface eth0.101 inet static + address 192.168.0.2/24 + dns-nameservers 192.168.0.10 10.23.23.134 + dns-search barley.maas sacchromyces.maas brettanomyces.maas + gateway 192.168.0.1 + mtu 1500 + hwaddress aa:bb:cc:dd:ee:11 + vlan-raw-device eth0 + vlan_id 101 + +# control-alias eth0.101 +iface eth0.101 inet static + address 192.168.2.10/24 + +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +""", + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE_0=eth1 + BONDING_SLAVE_1=eth2 + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + LLADDR=aa:bb:cc:dd:ee:ff + STARTMODE=auto""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + ETHERDEVICE=bond0 + STARTMODE=auto + VLAN_ID=200""" + ), + "ifcfg-br0": textwrap.dedent( + """\ + BRIDGE_AGEINGTIME=250 + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + LLADDRESS=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + BRIDGE_PRIORITY=22 + BRIDGE_PORTS='eth3 eth4' + STARTMODE=auto + BRIDGE_STP=off""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=c0:d6:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ETHERDEVICE=eth0 + STARTMODE=auto + VLAN_ID=101""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:d6:9f:2c:e8:80 + STARTMODE=hotplug""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=c0:bb:9f:2c:e8:80 + STARTMODE=hotplug""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=66:bb:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=98:bb:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + LLADDR=98:bb:9f:2c:e8:8a + STARTMODE=manual""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + STARTMODE=auto + TYPE=InfiniBand""" + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE0=eth1 + BONDING_SLAVE1=eth2 + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes + TYPE=Bond + USERCTL=no""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no + ONBOOT=yes + PHYSDEV=bond0 + USERCTL=no + VLAN=yes""" + ), + "ifcfg-br0": textwrap.dedent( + """\ + AGEING=250 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=br0 + IPADDR=192.168.14.2 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + MACADDR=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + ONBOOT=yes + PRIO=22 + STP=no + TYPE=Bridge + USERCTL=no""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=c0:d6:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0.101 + DNS1=192.168.0.10 + DNS2=10.23.23.134 + DOMAIN="barley.maas sacchromyces.maas brettanomyces.maas" + GATEWAY=192.168.0.1 + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + PHYSDEV=eth0 + USERCTL=no + VLAN=yes""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=aa:d6:9f:2c:e8:80 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth2 + HWADDR=c0:bb:9f:2c:e8:80 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth3 + HWADDR=66:bb:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth4 + HWADDR=98:bb:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no + HWADDR=98:bb:9f:2c:e8:8a + ONBOOT=no + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=ib0 + HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + ONBOOT=yes + TYPE=InfiniBand + USERCTL=no""" + ), + }, + "expected_network_manager": { + "cloud-init-eth3.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth3 + uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=66:BB:9F:2C:E8:80 + + """ + ), + "cloud-init-eth5.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth5 + uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=98:BB:9F:2C:E8:8A + + [ipv4] + method=auto + may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-ib0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ib0 + uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b + autoconnect-priority=120 + type=infiniband + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [infiniband] + transport-mode=datagram + mtu=9000 + mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1 + + [ipv4] + method=manual + may-fail=false + address1=192.168.200.7/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-bond0.200.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0.200 + uuid=88984a9c-ff22-5233-9267-86315e0acaa7 + autoconnect-priority=120 + type=vlan + interface-name=bond0.200 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=200 + parent=54317911-f840-516b-a10d-82cb4c1f075c + + [ipv4] + method=auto + may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:D6:9F:2C:E8:80 + + """ + ), + "cloud-init-eth4.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth4 + uuid=e27e4959-fb50-5580-b9a4-2073554627b9 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=98:BB:9F:2C:E8:80 + + """ + ), + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:D6:9F:2C:E8:80 + + """ + ), + "cloud-init-br0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init br0 + uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + autoconnect-priority=120 + type=bridge + interface-name=br0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bridge] + stp=false + priority=22 + mac-address=BB:BB:BB:BB:BB:AA + + [ipv4] + method=manual + may-fail=false + address1=192.168.14.2/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + [ipv6] + route1=::/0,2001:4800:78ff:1b::1 + method=manual + may-fail=false + address1=2001:1::1/64 + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth0.101.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0.101 + uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf + autoconnect-priority=120 + type=vlan + interface-name=eth0.101 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=101 + parent=1dd9a779-d327-56e1-8454-c65e2556c12c + + [ipv4] + method=manual + may-fail=false + address1=192.168.0.2/24 + route1=0.0.0.0/0,192.168.0.1 + address2=192.168.2.10/24 + dns=192.168.0.10;10.23.23.134; + dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; + + """ + ), + "cloud-init-bond0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=active-backup + miimon=100 + xmit_hash_policy=layer3+4 + + [ipv6] + method=auto + may-fail=false + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth2.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth2 + uuid=5559a242-3421-5fdd-896e-9cb8313d5804 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:BB:9F:2C:E8:80 + + """ + ), + }, + "yaml": textwrap.dedent( + """ + version: 2 + ethernets: + eth0: + match: + macaddress: c0:d6:9f:2c:e8:80 + set-name: eth0 + eth1: + match: + macaddress: aa:d6:9f:2c:e8:80 + set-name: eth1 + eth2: + match: + macaddress: c0:bb:9f:2c:e8:80 + set-name: eth2 + eth3: + match: + macaddress: 66:bb:9f:2c:e8:80 + set-name: eth3 + eth4: + match: + macaddress: 98:bb:9f:2c:e8:80 + set-name: eth4 + eth5: + dhcp4: true + match: + macaddress: 98:bb:9f:2c:e8:8a + set-name: eth5 + bonds: + bond0: + dhcp6: true + interfaces: + - eth1 + - eth2 + macaddress: aa:bb:cc:dd:ee:ff + parameters: + mii-monitor-interval: 100 + mode: active-backup + transmit-hash-policy: layer3+4 + bridges: + br0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + interfaces: + - eth3 + - eth4 + macaddress: bb:bb:bb:bb:bb:aa + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + parameters: + ageing-time: 250 + forward-delay: 1 + hello-time: 1 + max-age: 10 + path-cost: + eth3: 50 + eth4: 75 + port-priority: + eth3: 28 + eth4: 14 + priority: 22 + stp: false + routes: + - to: ::/0 + via: 2001:4800:78ff:1b::1 + vlans: + bond0.200: + dhcp4: true + id: 200 + link: bond0 + eth0.101: + addresses: + - 192.168.0.2/24 + - 192.168.2.10/24 + id: 101 + link: eth0 + macaddress: aa:bb:cc:dd:ee:11 + mtu: 1500 + nameservers: + addresses: + - 192.168.0.10 + - 10.23.23.134 + search: + - barley.maas + - sacchromyces.maas + - brettanomyces.maas + routes: + - to: 0.0.0.0/0 + via: 192.168.0.1 + """ + ), + }, + "bond_v1": { + "yaml": textwrap.dedent( + """ + version: 1 + config: + - type: physical + name: bond0s0 + mac_address: aa:bb:cc:dd:e8:00 + - type: physical + name: bond0s1 + mac_address: aa:bb:cc:dd:e8:01 + - type: bond + name: bond0 + mac_address: aa:bb:cc:dd:e8:ff + mtu: 9000 + bond_interfaces: + - bond0s0 + - bond0s1 + params: + bond-mode: active-backup + bond_miimon: 100 + bond-xmit-hash-policy: "layer3+4" + bond-num-grat-arp: 5 + bond-downdelay: 10 + bond-updelay: 20 + bond-fail-over-mac: active + bond-primary: bond0s0 + bond-primary-reselect: always + subnets: + - type: static + address: 192.168.0.2/24 + gateway: 192.168.0.1 + routes: + - gateway: 192.168.0.3 + netmask: 255.255.255.0 + network: 10.1.3.0 + - type: static + address: 192.168.1.2/24 + - type: static + address: 2001:1::1/92 + routes: + - gateway: 2001:67c:1562::1 + network: "2001:67c::" + netmask: "ffff:ffff::" + - gateway: 3001:67c:15::1 + network: "3001:67c::" + netmask: "ffff:ffff::" + metric: 10000 + """ + ), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + bond0s0: + match: + macaddress: aa:bb:cc:dd:e8:00 + set-name: bond0s0 + bond0s1: + match: + macaddress: aa:bb:cc:dd:e8:01 + set-name: bond0s1 + bonds: + bond0: + addresses: + - 192.168.0.2/24 + - 192.168.1.2/24 + - 2001:1::1/92 + interfaces: + - bond0s0 + - bond0s1 + macaddress: aa:bb:cc:dd:e8:ff + mtu: 9000 + parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitous-arp: 5 + mii-monitor-interval: 100 + mode: active-backup + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 + routes: + - to: default + via: 192.168.0.1 + - to: 10.1.3.0/24 + via: 192.168.0.3 + - to: 2001:67c::/32 + via: 2001:67c:1562::1 + - metric: 10000 + to: 3001:67c::/32 + via: 3001:67c:15::1 + """ + ), + "expected_eni": textwrap.dedent( + """\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0s1 +iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-slaves none + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true + pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up route add -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true + pre-down route del -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true + post-up route add -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ +|| true + pre-down route del -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ +|| true + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE_0=bond0s0 + BONDING_SLAVE_1=bond0s1 + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=hotplug + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:bb:cc:dd:e8:01 + STARTMODE=hotplug + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE0=bond0s0 + BONDING_SLAVE1=bond0s1 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=bond0 + GATEWAY=192.168.0.1 + MACADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::1/92 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + TYPE=Bond + USERCTL=no + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """ + ), + "route6-bond0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + 2001:67c::/32 via 2001:67c:1562::1 dev bond0 + 3001:67c::/32 via 3001:67c:15::1 metric 10000 dev bond0 + """ + ), + "route-bond0": textwrap.dedent( + """\ + ADDRESS0=10.1.3.0 + GATEWAY0=192.168.0.3 + NETMASK0=255.255.255.0 + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-bond0s0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0s0 + uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:00 + + """ + ), + "cloud-init-bond0s1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0s1 + uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:01 + + """ + ), + "cloud-init-bond0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=active-backup + miimon=100 + xmit_hash_policy=layer3+4 + num_grat_arp=5 + downdelay=10 + updelay=20 + fail_over_mac=active + primary_reselect=always + primary=bond0s0 + + [ipv4] + method=manual + may-fail=false + address1=192.168.0.2/24 + gateway=192.168.0.1 + route1=10.1.3.0/24,192.168.0.3 + address2=192.168.1.2/24 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::1/92 + route1=2001:67c::/32,2001:67c:1562::1 + route2=3001:67c::/32,3001:67c:15::1 + + """ + ), + }, + }, + "bond_v2": { + "yaml": textwrap.dedent( + """ + version: 2 + ethernets: + bond0s0: + match: + driver: "virtio_net" + macaddress: aa:bb:cc:dd:e8:00 + set-name: bond0s0 + bond0s1: + set-name: bond0s1 + match: + driver: "e1000" + macaddress: aa:bb:cc:dd:e8:01 + bonds: + bond0: + addresses: + - 192.168.0.2/24 + - 192.168.1.2/24 + - 2001:1::1/92 + interfaces: + - bond0s0 + - bond0s1 + macaddress: aa:bb:cc:dd:e8:ff + mtu: 9000 + parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitous-arp: 5 + mii-monitor-interval: 100 + mode: active-backup + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 + routes: + - to: 0.0.0.0/0 + via: 192.168.0.1 + - to: 10.1.3.0/24 + via: 192.168.0.3 + - to: 2001:67c::/32 + via: 2001:67c:1562::1 + - metric: 10000 + to: 3001:67c::/32 + via: 3001:67c:15::1 + """ + ), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + bond0s0: + match: + driver: virtio_net + macaddress: aa:bb:cc:dd:e8:00 + set-name: bond0s0 + bond0s1: + match: + driver: e1000 + macaddress: aa:bb:cc:dd:e8:01 + set-name: bond0s1 + bonds: + bond0: + addresses: + - 192.168.0.2/24 + - 192.168.1.2/24 + - 2001:1::1/92 + interfaces: + - bond0s0 + - bond0s1 + macaddress: aa:bb:cc:dd:e8:ff + mtu: 9000 + parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitous-arp: 5 + mii-monitor-interval: 100 + mode: active-backup + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 + routes: + - to: 0.0.0.0/0 + via: 192.168.0.1 + - to: 10.1.3.0/24 + via: 192.168.0.3 + - to: 2001:67c::/32 + via: 2001:67c:1562::1 + - metric: 10000 + to: 3001:67c::/32 + via: 3001:67c:15::1 + """ + ), + "expected_eni": textwrap.dedent( + """\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + +auto bond0s1 +iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-slaves none + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true + pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up route add -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true + pre-down route del -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true + post-up route add -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ +|| true + pre-down route del -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ +|| true + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE_0=bond0s0 + BONDING_SLAVE_1=bond0s1 + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=hotplug + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:bb:cc:dd:e8:01 + STARTMODE=hotplug + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE0=bond0s0 + BONDING_SLAVE1=bond0s1 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=bond0 + GATEWAY=192.168.0.1 + MACADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::1/92 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + TYPE=Bond + USERCTL=no + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """ + ), + "route6-bond0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + 2001:67c::/32 via 2001:67c:1562::1 dev bond0 + 3001:67c::/32 via 3001:67c:15::1 metric 10000 dev bond0 + """ + ), + "route-bond0": textwrap.dedent( + """\ + ADDRESS0=10.1.3.0 + GATEWAY0=192.168.0.3 + NETMASK0=255.255.255.0 + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-bond0s0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0s0 + uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:00 + + """ + ), + "cloud-init-bond0s1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0s1 + uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:01 + + """ + ), + "cloud-init-bond0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=active-backup + miimon=100 + xmit_hash_policy=layer3+4 + num_grat_arp=5 + downdelay=10 + updelay=20 + fail_over_mac=active + primary_reselect=always + primary=bond0s0 + + [ipv4] + method=manual + may-fail=false + address1=192.168.0.2/24 + route1=0.0.0.0/0,192.168.0.1 + route2=10.1.3.0/24,192.168.0.3 + address2=192.168.1.2/24 + + [ipv6] + route1=2001:67c::/32,2001:67c:1562::1 + route2=3001:67c::/32,3001:67c:15::1 + method=manual + may-fail=false + address1=2001:1::1/92 + + """ + ), + }, + }, + "vlan_v1": { + "yaml": textwrap.dedent( + """ + version: 1 + config: + - type: physical + name: en0 + mac_address: aa:bb:cc:dd:e8:00 + - type: vlan + mtu: 2222 + name: en0.99 + vlan_link: en0 + vlan_id: 99 + subnets: + - type: static + address: '192.168.2.2/24' + - type: static + address: '192.168.1.2/24' + gateway: 192.168.1.1 + - type: static + address: 2001:1::bbbb/96 + routes: + - gateway: 2001:1::1 + netmask: '::' + network: '::' + """ + ), + "expected_sysconfig_opensuse": { + # TODO RJS: unknown proper BOOTPROTO setting ask Marius + "ifcfg-en0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=auto""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::bbbb/96 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + ETHERDEVICE=en0 + VLAN_ID=99 + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-en0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=en0 + HWADDR=aa:bb:cc:dd:e8:00 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=en0.99 + GATEWAY=192.168.1.1 + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::bbbb/96 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + IPV6_DEFAULTGW=2001:1::1 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + PHYSDEV=en0 + USERCTL=no + VLAN=yes""" + ), + }, + "expected_network_manager": { + "cloud-init-en0.99.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init en0.99 + uuid=f594e2ed-f107-51df-b225-1dc530a5356b + autoconnect-priority=120 + type=vlan + interface-name=en0.99 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=99 + parent=e0ca478b-8d84-52ab-8fae-628482c629b5 + + [ipv4] + method=manual + may-fail=false + address1=192.168.2.2/24 + address2=192.168.1.2/24 + gateway=192.168.1.1 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::bbbb/96 + route1=::/0,2001:1::1 + + """ + ), + "cloud-init-en0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init en0 + uuid=e0ca478b-8d84-52ab-8fae-628482c629b5 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:00 + + """ + ), + }, + }, + "vlan_v2": { + "yaml": textwrap.dedent( + """ + version: 2 + ethernets: + en0: + match: + macaddress: aa:bb:cc:dd:e8:00 + set-name: en0 + vlans: + en0.99: + addresses: + - 192.168.2.2/24 + - 192.168.1.2/24 + - 2001:1::bbbb/96 + id: 99 + link: en0 + mtu: 2222 + routes: + - to: 0.0.0.0/0 + via: 192.168.1.1 + - to: ::/0 + via: 2001:1::1 + + """ + ), + "expected_sysconfig_opensuse": { + # TODO RJS: unknown proper BOOTPROTO setting ask Marius + "ifcfg-en0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=auto""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::bbbb/96 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + ETHERDEVICE=en0 + VLAN_ID=99 + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-en0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=en0 + HWADDR=aa:bb:cc:dd:e8:00 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=en0.99 + GATEWAY=192.168.1.1 + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::bbbb/96 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + IPV6_DEFAULTGW=2001:1::1 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + PHYSDEV=en0 + USERCTL=no + VLAN=yes""" + ), + }, + "expected_network_manager": { + "cloud-init-en0.99.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init en0.99 + uuid=f594e2ed-f107-51df-b225-1dc530a5356b + autoconnect-priority=120 + type=vlan + interface-name=en0.99 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=99 + parent=e0ca478b-8d84-52ab-8fae-628482c629b5 + + [ipv4] + method=manual + may-fail=false + address1=192.168.2.2/24 + route1=0.0.0.0/0,192.168.1.1 + address2=192.168.1.2/24 + + [ipv6] + route1=::/0,2001:1::1 + method=manual + may-fail=false + address1=2001:1::bbbb/96 + + """ + ), + "cloud-init-en0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init en0 + uuid=e0ca478b-8d84-52ab-8fae-628482c629b5 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:00 + + """ + ), + }, + }, + "bridge": { + "yaml_v1": textwrap.dedent( + """ + version: 1 + config: + - type: physical + name: eth0 + mac_address: '52:54:00:12:34:00' + subnets: + - type: static + address: 2001:1::100/96 + - type: physical + name: eth1 + mac_address: '52:54:00:12:34:01' + subnets: + - type: static + address: 2001:1::101/96 + - type: bridge + name: br0 + bridge_interfaces: + - eth0 + - eth1 + params: + bridge_stp: 0 + bridge_bridgeprio: 22 + subnets: + - type: static + address: 192.168.2.2/24""" + ), + "yaml_v2": textwrap.dedent( + """ + version: 2 + ethernets: + eth0: + addresses: + - 2001:1::100/96 + match: + macaddress: '52:54:00:12:34:00' + set-name: eth0 + eth1: + addresses: + - 2001:1::101/96 + match: + macaddress: '52:54:00:12:34:01' + set-name: eth1 + bridges: + br0: + addresses: + - 192.168.2.2/24 + interfaces: + - eth0 + - eth1 + parameters: + priority: 22 + stp: false + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-br0": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.2.2 + NETMASK=255.255.255.0 + STARTMODE=auto + BRIDGE_STP=off + BRIDGE_PRIORITY=22 + BRIDGE_PORTS='eth0 eth1' + """ + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=52:54:00:12:34:00 + IPADDR6=2001:1::100/96 + STARTMODE=auto + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=52:54:00:12:34:01 + IPADDR6=2001:1::101/96 + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-br0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=br0 + IPADDR=192.168.2.2 + NETMASK=255.255.255.0 + ONBOOT=yes + PRIO=22 + STP=no + TYPE=Bridge + USERCTL=no + """ + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth0 + HWADDR=52:54:00:12:34:00 + IPV6ADDR=2001:1::100/96 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth1 + HWADDR=52:54:00:12:34:01 + IPV6ADDR=2001:1::101/96 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-br0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init br0 + uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + autoconnect-priority=120 + type=bridge + interface-name=br0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bridge] + stp=false + priority=22 + + [ipv4] + method=manual + may-fail=false + address1=192.168.2.2/24 + + """ + ), + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=52:54:00:12:34:00 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::100/96 + + """ + ), + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=52:54:00:12:34:01 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::101/96 + + """ + ), + }, + }, + "manual": { + "yaml": textwrap.dedent( + """ + version: 1 + config: + - type: physical + name: eth0 + mac_address: '52:54:00:12:34:00' + subnets: + - type: static + address: 192.168.1.2/24 + control: manual + - type: physical + name: eth1 + mtu: 1480 + mac_address: 52:54:00:12:34:aa + subnets: + - type: manual + - type: physical + name: eth2 + mac_address: 52:54:00:12:34:ff + subnets: + - type: manual + control: manual + """ + ), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + # control-manual eth0 + iface eth0 inet static + address 192.168.1.2/24 + + auto eth1 + iface eth1 inet manual + mtu 1480 + + # control-manual eth2 + iface eth2 inet manual + """ + ), + "expected_netplan": textwrap.dedent( + """\ + + network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.2/24 + match: + macaddress: '52:54:00:12:34:00' + set-name: eth0 + eth1: + match: + macaddress: 52:54:00:12:34:aa + mtu: 1480 + set-name: eth1 + eth2: + match: + macaddress: 52:54:00:12:34:ff + set-name: eth2 + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:00 + IPADDR=192.168.1.2 + NETMASK=255.255.255.0 + STARTMODE=manual + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:aa + MTU=1480 + STARTMODE=auto + """ + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:ff + STARTMODE=manual + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=52:54:00:12:34:00 + IPADDR=192.168.1.2 + NETMASK=255.255.255.0 + ONBOOT=no + TYPE=Ethernet + USERCTL=no + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=52:54:00:12:34:aa + MTU=1480 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth2 + HWADDR=52:54:00:12:34:ff + ONBOOT=no + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=52:54:00:12:34:00 + + [ipv4] + method=manual + may-fail=false + address1=192.168.1.2/24 + + """ + ), + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=1480 + mac-address=52:54:00:12:34:AA + + [ipv4] + method=auto + may-fail=true + + """ + ), + "cloud-init-eth2.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth2 + uuid=5559a242-3421-5fdd-896e-9cb8313d5804 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=52:54:00:12:34:FF + + [ipv4] + method=auto + may-fail=true + + """ + ), + }, + }, + "v1-dns": { + "expected_networkd": textwrap.dedent( + """\ + [Address] + Address=192.168.1.20/16 + + [Match] + MACAddress=11:22:33:44:55:66 + Name=interface0 + + [Network] + DHCP=no + DNS=1.1.1.1 3.3.3.3 + Domains=aaaa cccc + + [Route] + Gateway=192.168.1.1 + """ + ), + "expected_eni": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + auto lo + iface lo inet loopback + dns-nameservers 2.2.2.2 + dns-search bbbb + + iface lo inet6 loopback + dns-nameservers FEDC::1 + dns-search bbbb + + auto interface0 + iface interface0 inet static + address 192.168.1.20/16 + dns-nameservers 1.1.1.1 3.3.3.3 + dns-search aaaa cccc + gateway 192.168.1.1 + """ # noqa: E501 + ), + "expected_netplan": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + network: + version: 2 + ethernets: + interface0: + addresses: + - 192.168.1.20/16 + match: + macaddress: 11:22:33:44:55:66 + nameservers: + addresses: + - 1.1.1.1 + - 3.3.3.3 + search: + - aaaa + - cccc + routes: + - to: default + via: 192.168.1.1 + set-name: interface0 + """ # noqa: E501 + ), + "expected_sysconfig_opensuse": { + "ifcfg-interface0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=static + IPADDR=192.168.1.20 + LLADDR=11:22:33:44:55:66 + NETMASK=255.255.0.0 + STARTMODE=auto + """ + ) + }, + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=none + DEFROUTE=yes + DEVICE=interface0 + DNS1=1.1.1.1 + DNS2=3.3.3.3 + DOMAIN=aaaa cccc + GATEWAY=192.168.1.1 + HWADDR=11:22:33:44:55:66 + IPADDR=192.168.1.20 + NETMASK=255.255.0.0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-interface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init interface0 + uuid=8b6862ed-dbd6-5830-93f7-a91451c13828 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=11:22:33:44:55:66 + + [ipv4] + method=manual + may-fail=false + address1=192.168.1.20/16 + gateway=192.168.1.1 + dns=3.3.3.3;1.1.1.1; + dns-search=cccc;aaaa; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: physical + name: interface0 + mac_address: "11:22:33:44:55:66" + subnets: + - type: static + address: 192.168.1.20/16 + gateway: 192.168.1.1 + dns_nameservers: + - 3.3.3.3 + dns_search: + - cccc + - type: nameserver + interface: interface0 + address: + - 1.1.1.1 + search: + - aaaa + - type: nameserver + address: + - 2.2.2.2 + - FEDC::1 + search: + - bbbb + """ + ), + }, + "v2-dev-name-via-mac-lookup": { + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=cf:d6:af:48:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + nic0: + match: + macaddress: 'cf:d6:af:48:e8:80' + """ + ), + }, + "v2-mixed-routes": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=500 + + [ipv4] + method=auto + may-fail=true + route1=169.254.42.42/32,62.210.0.1 + route1_options=mtu=400 + route2=169.254.42.43/32,62.210.0.2 + route2_options=mtu=200 + address1=192.168.1.20/16 + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + route1=::/0,fe80::dc00:ff:fe20:186 + route1_options=mtu=300 + route2=fe80::dc00:ff:fe20:188/64,fe80::dc00:ff:fe20:187 + route2_options=mtu=100 + method=auto + may-fail=true + address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp6: true + mtu: 500 + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + routes: + - to: 169.254.42.42/32 + via: 62.210.0.1 + mtu: 400 + - via: fe80::dc00:ff:fe20:186 + to: ::/0 + mtu: 300 + - to: 169.254.42.43/32 + via: 62.210.0.2 + mtu: 200 + - via: fe80::dc00:ff:fe20:187 + to: fe80::dc00:ff:fe20:188 + mtu: 100 + addresses: + - 192.168.1.20/16 + - 2001:bc8:1210:232:dc00:ff:fe20:185/64 + """ + ), + }, + "v2-dns": { + "expected_networkd": textwrap.dedent( + """\ + [Address] + Address=192.168.1.20/16 + + [Address] + Address=2001:bc8:1210:232:dc00:ff:fe20:185/64 + + [Match] + Name=eth0 + + [Network] + DHCP=no + DNS=8.8.8.8 FEDC::1 + Domains=lab home + """ + ), + "expected_eni": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet static + address 192.168.1.20/16 + dns-nameservers 8.8.8.8 + dns-search lab home + + # control-alias eth0 + iface eth0 inet6 static + address 2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns-nameservers FEDC::1 + dns-search lab home + """ # noqa: E501 + ), + "expected_sysconfig_opensuse": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=static + IPADDR=192.168.1.20 + IPADDR6=2001:bc8:1210:232:dc00:ff:fe20:185/64 + NETMASK=255.255.0.0 + STARTMODE=auto + """ + ) + }, + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=none + DEVICE=eth0 + DNS1=8.8.8.8 + DNS2=FEDC::1 + DOMAIN="lab home" + IPADDR=192.168.1.20 + IPV6ADDR=2001:bc8:1210:232:dc00:ff:fe20:185/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.0.0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ) + }, + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=manual + may-fail=false + address1=192.168.1.20/16 + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + method=manual + may-fail=false + address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + addresses: + - 192.168.1.20/16 + - 2001:bc8:1210:232:dc00:ff:fe20:185/64 + """ + ), + }, + "v2-dns-no-if-ips": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=true + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + method=auto + may-fail=true + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp6: true + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + """ + ), + }, + "v2-dns-no-dhcp": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + """ + ), + }, + "v2-route-no-gateway": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=false + route1=0.0.0.0/0 + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + routes: + - to: "0.0.0.0/0" + """ + ), + }, +} diff --git a/.pc/netplan99-cannot-use-default.patch/tests/unittests/test_net.py b/.pc/netplan99-cannot-use-default.patch/tests/unittests/test_net.py index cb9919388..dbae4f202 100644 --- a/.pc/netplan99-cannot-use-default.patch/tests/unittests/test_net.py +++ b/.pc/netplan99-cannot-use-default.patch/tests/unittests/test_net.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import base64 import copy @@ -11,11 +12,10 @@ from typing import Optional import pytest +import yaml from yaml.serializer import Serializer -from cloudinit import distros, net -from cloudinit import safeyaml as yaml -from cloudinit import subp, temp_utils, util +from cloudinit import distros, net, subp, temp_utils, util from cloudinit.net import ( cmdline, eni, @@ -31,13 +31,12 @@ ) from cloudinit.sources.helpers import openstack from tests.unittests.helpers import ( - CiTestCase, - FilesystemMockingTestCase, dir2dict, does_not_raise, mock, populate_dir, ) +from tests.unittests.net.network_configs import NETWORK_CONFIGS DHCP_CONTENT_1 = """ DEVICE='eth0' @@ -134,6 +133,37 @@ ], } +STATIC_CONTENT_2 = """ +DEVICE='eth1' +PROTO='static' +IPV4ADDR='10.0.0.2' +IPV4BROADCAST='10.0.0.255' +IPV4NETMASK='255.255.255.0' +IPV4GATEWAY='10.0.0.1' +IPV4DNS0='10.0.1.1' +IPV4DNS1='0.0.0.0' +HOSTNAME='foohost' +UPTIME='21' +DHCPLEASETIME='3600' +DOMAINSEARCH='foo.com' +""" + +STATIC_CONTENT_3 = """ +DEVICE='eth1' +PROTO='off' +IPV4ADDR='10.0.0.2' +IPV4BROADCAST='10.0.0.255' +IPV4NETMASK='255.255.255.0' +IPV4GATEWAY='10.0.0.1' +IPV4DNS0='10.0.1.1' +IPV4DNS1='0.0.0.0' +HOSTNAME='foohost' +UPTIME='21' +DHCPLEASETIME='3600' +DOMAINSEARCH='foo.com' +""" + + V1_NAMESERVER_ALIAS = """ config: - id: eno1 @@ -516,6 +546,8 @@ } ], "ip_address": "172.19.1.34", + "dns_search": ["testweb.com"], + "dns_nameservers": ["172.19.0.13"], "id": "network0", } ], @@ -550,7 +582,9 @@ """ ; Created by cloud-init automatically, do not edit. ; +nameserver 172.19.0.13 nameserver 172.19.0.12 +search testweb.com """.lstrip(), ), ( @@ -581,11 +615,12 @@ BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 +DNS1=172.19.0.13 +DOMAIN=testweb.com GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 NETMASK=255.255.252.0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -596,7 +631,9 @@ """ ; Created by cloud-init automatically, do not edit. ; +nameserver 172.19.0.13 nameserver 172.19.0.12 +search testweb.com """.lstrip(), ), ( @@ -646,7 +683,8 @@ may-fail=false address1=172.19.1.34/22 route1=0.0.0.0/0,172.19.3.254 -dns=172.19.0.12; +dns=172.19.0.13; +dns-search=testweb.com; """.lstrip(), ), @@ -654,13 +692,19 @@ }, { "in_data": { - "services": [{"type": "dns", "address": "172.19.0.12"}], + "services": [ + { + "type": "dns", + "address": "172.19.0.12", + "search": ["example1.com", "example2.com"], + } + ], "networks": [ { - "network_id": "public-ipv4", + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", + "link": "eth0", "routes": [ { "netmask": "0.0.0.0", @@ -669,25 +713,17 @@ } ], "ip_address": "172.19.1.34", + "dns_search": ["example3.com"], + "dns_nameservers": ["172.19.0.12"], "id": "network0", - }, - { - "network_id": "private-ipv4", - "type": "ipv4", - "netmask": "255.255.255.0", - "link": "tap1a81968a-79", - "routes": [], - "ip_address": "10.0.0.10", - "id": "network1", - }, + } ], "links": [ { "ethernet_mac_address": "fa:16:3e:ed:9a:59", "mtu": None, - "type": "bridge", - "id": "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", + "type": "physical", + "id": "eth0", }, ], }, @@ -702,10 +738,8 @@ # BOOTPROTO=static IPADDR=172.19.1.34 -IPADDR1=10.0.0.10 LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 -NETMASK1=255.255.255.0 STARTMODE=auto """.lstrip(), ), @@ -715,6 +749,7 @@ ; Created by cloud-init automatically, do not edit. ; nameserver 172.19.0.12 +search example3.com example1.com example2.com """.lstrip(), ), ( @@ -745,13 +780,12 @@ BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 +DNS1=172.19.0.12 +DOMAIN=example3.com GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 -IPADDR1=10.0.0.10 NETMASK=255.255.252.0 -NETMASK1=255.255.255.0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -763,6 +797,7 @@ ; Created by cloud-init automatically, do not edit. ; nameserver 172.19.0.12 +search example3.com example1.com example2.com """.lstrip(), ), ( @@ -784,10 +819,50 @@ ), ), ], + "expected_network_manager": [ + ( + "".join( + [ + "etc/NetworkManager/system-connections", + "/cloud-init-eth0.nmconnection", + ] + ), + """ +# Generated by cloud-init. Changes will be lost. + +[connection] +id=cloud-init eth0 +uuid=1dd9a779-d327-56e1-8454-c65e2556c12c +autoconnect-priority=120 +type=ethernet + +[user] +org.freedesktop.NetworkManager.origin=cloud-init + +[ethernet] +mac-address=FA:16:3E:ED:9A:59 + +[ipv4] +method=manual +may-fail=false +address1=172.19.1.34/22 +route1=0.0.0.0/0,172.19.3.254 +dns=172.19.0.12; +dns-search=example3.com; + +""".lstrip(), + ), + ], }, { "in_data": { - "services": [{"type": "dns", "address": "172.19.0.12"}], + "services": [ + { + "type": "dns", + "address": "172.19.0.12", + "search": "example.com", + } + ], "networks": [ { "network_id": "public-ipv4", @@ -805,37 +880,13 @@ "id": "network0", }, { - "network_id": "public-ipv6-a", - "type": "ipv6", - "netmask": "", - "link": "tap1a81968a-79", - "routes": [ - { - "gateway": "2001:DB8::1", - "netmask": "::", - "network": "::", - } - ], - "ip_address": "2001:DB8::10", - "id": "network1", - }, - { - "network_id": "public-ipv6-b", - "type": "ipv6", - "netmask": "64", - "link": "tap1a81968a-79", - "routes": [], - "ip_address": "2001:DB9::10", - "id": "network2", - }, - { - "network_id": "public-ipv6-c", - "type": "ipv6", - "netmask": "64", + "network_id": "private-ipv4", + "type": "ipv4", + "netmask": "255.255.255.0", "link": "tap1a81968a-79", "routes": [], - "ip_address": "2001:DB10::10", - "id": "network3", + "ip_address": "10.0.0.10", + "id": "network1", }, ], "links": [ @@ -859,11 +910,10 @@ # BOOTPROTO=static IPADDR=172.19.1.34 -IPADDR6=2001:DB8::10/64 -IPADDR6_1=2001:DB9::10/64 -IPADDR6_2=2001:DB10::10/64 +IPADDR1=10.0.0.10 LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 +NETMASK1=255.255.255.0 STARTMODE=auto """.lstrip(), ), @@ -873,6 +923,7 @@ ; Created by cloud-init automatically, do not edit. ; nameserver 172.19.0.12 +search example.com """.lstrip(), ), ( @@ -906,14 +957,9 @@ GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 -IPV6ADDR=2001:DB8::10/64 -IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" -IPV6INIT=yes -IPV6_AUTOCONF=no -IPV6_DEFAULTGW=2001:DB8::1 -IPV6_FORCE_ACCEPT_RA=no +IPADDR1=10.0.0.10 NETMASK=255.255.252.0 -NM_CONTROLLED=no +NETMASK1=255.255.255.0 ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -925,6 +971,7 @@ ; Created by cloud-init automatically, do not edit. ; nameserver 172.19.0.12 +search example.com """.lstrip(), ), ( @@ -947,3701 +994,207 @@ ), ], }, -] - -EXAMPLE_ENI = """ -auto lo -iface lo inet loopback - dns-nameservers 10.0.0.1 - dns-search foo.com - -auto eth0 -iface eth0 inet static - address 1.2.3.12 - netmask 255.255.255.248 - broadcast 1.2.3.15 - gateway 1.2.3.9 - dns-nameservers 69.9.160.191 69.9.191.4 -auto eth1 -iface eth1 inet static - address 10.248.2.4 - netmask 255.255.255.248 - broadcast 10.248.2.7 -""" - -RENDERED_ENI = """ -auto lo -iface lo inet loopback - dns-nameservers 10.0.0.1 - dns-search foo.com - -auto eth0 -iface eth0 inet static - address 1.2.3.12/29 - broadcast 1.2.3.15 - dns-nameservers 69.9.160.191 69.9.191.4 - gateway 1.2.3.9 - -auto eth1 -iface eth1 inet static - address 10.248.2.4/29 - broadcast 10.248.2.7 -""".lstrip() - -NETWORK_CONFIGS = { - "small_v1_suse_dhcp6": { - "expected_sysconfig_opensuse": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=cf:d6:af:48:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DHCLIENT6_MODE=managed - LLADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - STARTMODE=auto""" - ), - }, - "yaml": textwrap.dedent( - """ - version: 1 - config: - # Physical interfaces. - - type: physical - name: eth99 - mac_address: c0:d6:9f:2c:e8:80 - subnets: - - type: dhcp4 - - type: dhcp6 - - type: static - address: 192.168.21.3/24 - dns_nameservers: - - 8.8.8.8 - - 8.8.4.4 - dns_search: barley.maas sach.maas - routes: - - gateway: 65.61.151.37 - netmask: 0.0.0.0 - network: 0.0.0.0 - metric: 10000 - - type: physical - name: eth1 - mac_address: cf:d6:af:48:e8:80 - - type: nameserver - address: - - 1.2.3.4 - - 5.6.7.8 - search: - - wark.maas - """ - ), - }, - "small_v1": { - "expected_networkd_eth99": textwrap.dedent( - """\ - [Match] - Name=eth99 - MACAddress=c0:d6:9f:2c:e8:80 - [Address] - Address=192.168.21.3/24 - [Network] - DHCP=ipv4 - Domains=barley.maas sach.maas - Domains=wark.maas - DNS=1.2.3.4 5.6.7.8 - DNS=8.8.8.8 8.8.4.4 - [Route] - Gateway=65.61.151.37 - Destination=0.0.0.0/0 - Metric=10000 - """ - ).rstrip(" "), - "expected_networkd_eth1": textwrap.dedent( - """\ - [Match] - Name=eth1 - MACAddress=cf:d6:af:48:e8:80 - [Network] - DHCP=no - Domains=wark.maas - DNS=1.2.3.4 5.6.7.8 - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - dns-nameservers 1.2.3.4 5.6.7.8 - dns-search wark.maas - - iface eth1 inet manual - - auto eth99 - iface eth99 inet dhcp - - # control-alias eth99 - iface eth99 inet static - address 192.168.21.3/24 - dns-nameservers 8.8.8.8 8.8.4.4 - dns-search barley.maas sach.maas - post-up route add default gw 65.61.151.37 metric 10000 || true - pre-down route del default gw 65.61.151.37 metric 10000 || true - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - eth1: - match: - macaddress: cf:d6:af:48:e8:80 - set-name: eth1 - eth99: - addresses: - - 192.168.21.3/24 - dhcp4: true - match: - macaddress: c0:d6:9f:2c:e8:80 - nameservers: - addresses: - - 8.8.8.8 - - 8.8.4.4 - search: - - barley.maas - - sach.maas - routes: - - metric: 10000 - to: 0.0.0.0/0 - via: 65.61.151.37 - set-name: eth99 - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=cf:d6:af:48:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - LLADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - STARTMODE=auto""" - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth1 - HWADDR=cf:d6:af:48:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEFROUTE=yes - DEVICE=eth99 - DHCLIENT_SET_DEFAULT_ROUTE=yes - DNS1=8.8.8.8 - DNS2=8.8.4.4 - DOMAIN="barley.maas sach.maas" - GATEWAY=65.61.151.37 - HWADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - METRIC=10000 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - }, - "expected_network_manager": { - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=CF:D6:AF:48:E8:80 - - """ - ), - "cloud-init-eth99.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth99 - uuid=b1b88000-1f03-5360-8377-1a2205efffb4 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=C0:D6:9F:2C:E8:80 - - [ipv4] - method=auto - may-fail=false - address1=192.168.21.3/24 - route1=0.0.0.0/0,65.61.151.37 - dns=8.8.8.8;8.8.4.4; - dns-search=barley.maas;sach.maas; - - """ - ), - }, - "yaml": textwrap.dedent( - """ - version: 1 - config: - # Physical interfaces. - - type: physical - name: eth99 - mac_address: c0:d6:9f:2c:e8:80 - subnets: - - type: dhcp4 - - type: static - address: 192.168.21.3/24 - dns_nameservers: - - 8.8.8.8 - - 8.8.4.4 - dns_search: barley.maas sach.maas - routes: - - gateway: 65.61.151.37 - netmask: 0.0.0.0 - network: 0.0.0.0 - metric: 10000 - - type: physical - name: eth1 - mac_address: cf:d6:af:48:e8:80 - - type: nameserver - address: - - 1.2.3.4 - - 5.6.7.8 - search: - - wark.maas - """ - ), - }, - # We test a separate set of configs here because v2 doesn't support - # generic nameservers, so that aspect needs to be modified - "small_v2": { - "expected_networkd_eth99": textwrap.dedent( - """\ - [Match] - Name=eth99 - MACAddress=c0:d6:9f:2c:e8:80 - [Address] - Address=192.168.21.3/24 - [Network] - DHCP=ipv4 - Domains=barley.maas sach.maas - DNS=8.8.8.8 8.8.4.4 - [Route] - Gateway=65.61.151.37 - Destination=0.0.0.0/0 - Metric=10000 - """ - ).rstrip(" "), - "expected_networkd_eth1": textwrap.dedent( - """\ - [Match] - Name=eth1 - MACAddress=cf:d6:af:48:e8:80 - [Network] - DHCP=no - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - iface eth1 inet manual - - auto eth99 - iface eth99 inet dhcp - - # control-alias eth99 - iface eth99 inet static - address 192.168.21.3/24 - dns-nameservers 8.8.8.8 8.8.4.4 - dns-search barley.maas sach.maas - post-up route add default gw 65.61.151.37 metric 10000 || true - pre-down route del default gw 65.61.151.37 metric 10000 || true - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=cf:d6:af:48:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - LLADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - STARTMODE=auto""" - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth1 - HWADDR=cf:d6:af:48:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEFROUTE=yes - DEVICE=eth99 - DHCLIENT_SET_DEFAULT_ROUTE=yes - DNS1=8.8.8.8 - DNS2=8.8.4.4 - DOMAIN="barley.maas sach.maas" - GATEWAY=65.61.151.37 - HWADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - METRIC=10000 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - }, - "expected_network_manager": { - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=CF:D6:AF:48:E8:80 - - """ - ), - "cloud-init-eth99.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth99 - uuid=b1b88000-1f03-5360-8377-1a2205efffb4 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=C0:D6:9F:2C:E8:80 - - [ipv4] - method=auto - may-fail=false - route1=0.0.0.0/0,65.61.151.37 - address1=192.168.21.3/24 - dns=8.8.8.8;8.8.4.4; - dns-search=barley.maas;sach.maas; - - """ - ), - }, - "yaml": textwrap.dedent( - """ - version: 2 - ethernets: - eth1: - match: - macaddress: cf:d6:af:48:e8:80 - set-name: eth1 - eth99: - addresses: - - 192.168.21.3/24 - dhcp4: true - match: - macaddress: c0:d6:9f:2c:e8:80 - nameservers: - addresses: - - 8.8.8.8 - - 8.8.4.4 - search: - - barley.maas - - sach.maas - routes: - - metric: 10000 - to: 0.0.0.0/0 - via: 65.61.151.37 - set-name: eth99 - """ - ), - }, - "v4_and_v6": { - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Network] - DHCP=yes - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet dhcp - - # control-alias iface0 - iface iface0 inet6 dhcp - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - dhcp4: true - dhcp6: true - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DHCLIENT6_MODE=managed - STARTMODE=auto""" - ) - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv4] - method=auto - may-fail=true - - [ipv6] - method=auto - may-fail=true - - """ - ), - }, - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'dhcp4'} - - {'type': 'dhcp6'} - """ - ).rstrip(" "), - }, - "v4_and_v6_static": { - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Link] - MTUBytes=8999 - [Network] - DHCP=no - [Address] - Address=192.168.14.2/24 - [Address] - Address=2001:1::1/64 - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet static - address 192.168.14.2/24 - mtu 9000 - - # control-alias iface0 - iface iface0 inet6 static - address 2001:1::1/64 - mtu 1500 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - addresses: - - 192.168.14.2/24 - - 2001:1::1/64 - ipv6-mtu: 1500 - mtu: 9000 - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - mtu: 8999 - subnets: - - type: static - address: 192.168.14.2/24 - mtu: 9000 - - type: static - address: 2001:1::1/64 - mtu: 1500 - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=static - IPADDR=192.168.14.2 - IPADDR6=2001:1::1/64 - NETMASK=255.255.255.0 - STARTMODE=auto - MTU=9000 - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - IPADDR=192.168.14.2 - IPV6ADDR=2001:1::1/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - MTU=9000 - IPV6_MTU=1500 - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mtu=9000 - - [ipv4] - method=manual - may-fail=false - address1=192.168.14.2/24 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::1/64 - - """ - ), - }, - }, - "v6_and_v4": { - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DHCLIENT6_MODE=managed - STARTMODE=auto""" - ) - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv6] - method=auto - may-fail=true - - [ipv4] - method=auto - may-fail=true - - """ - ), - }, - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - type: dhcp6 - - type: dhcp4 - """ - ).rstrip(" "), - }, - "dhcpv6_only": { - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Network] - DHCP=ipv6 - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 dhcp - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - dhcp6: true - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'dhcp6'} - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - IPV6INIT=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv6] - method=auto - may-fail=false - - """ - ), - }, - }, - "dhcpv6_accept_ra": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 dhcp - accept_ra 1 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - accept-ra: true - dhcp6: true - """ - ).rstrip(" "), - "yaml_v1": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'dhcp6'} - accept-ra: true - """ - ).rstrip(" "), - "yaml_v2": textwrap.dedent( - """\ - version: 2 - ethernets: - iface0: - dhcp6: true - accept-ra: true - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - IPV6INIT=yes - IPV6_FORCE_ACCEPT_RA=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Network] - DHCP=ipv6 - IPv6AcceptRA=True - """ - ).rstrip(" "), - }, - "dhcpv6_reject_ra": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 dhcp - accept_ra 0 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - accept-ra: false - dhcp6: true - """ - ).rstrip(" "), - "yaml_v1": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'dhcp6'} - accept-ra: false - """ - ).rstrip(" "), - "yaml_v2": textwrap.dedent( - """\ - version: 2 - ethernets: - iface0: - dhcp6: true - accept-ra: false - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - IPV6INIT=yes - IPV6_FORCE_ACCEPT_RA=no - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Network] - DHCP=ipv6 - IPv6AcceptRA=False - """ - ).rstrip(" "), - }, - "ipv6_slaac": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 auto - dhcp 0 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - dhcp6: true - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'ipv6_slaac'} - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=info - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - IPV6_AUTOCONF=yes - IPV6INIT=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv6] - method=auto - may-fail=false - - [ipv4] - method=disabled - - """ - ), - }, - }, - "static6": { - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - accept-ra: 'no' - subnets: - - type: 'static6' - address: 2001:1::1/64 - """ - ).rstrip(" "), - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - IPV6ADDR=2001:1::1/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - }, - "dhcpv6_stateless": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 auto - dhcp 1 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - dhcp6: true - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'ipv6_dhcpv6-stateless'} - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=info - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - DHCPV6C_OPTIONS=-S - IPV6_AUTOCONF=yes - IPV6INIT=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv6] - method=auto - may-fail=false - - [ipv4] - method=disabled - - """ - ), - }, - }, - "dhcpv6_stateful": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 dhcp - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - accept-ra: true - dhcp6: true - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'ipv6_dhcpv6-stateful'} - accept-ra: true - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FAILURE_FATAL=yes - IPV6_FORCE_ACCEPT_RA=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - }, - "wakeonlan_disabled": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet dhcp - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - ethernets: - iface0: - dhcp4: true - wakeonlan: false - version: 2 - """ - ), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv4] - method=auto - may-fail=false - - """ - ), - }, - "yaml_v2": textwrap.dedent( - """\ - version: 2 - ethernets: - iface0: - dhcp4: true - wakeonlan: false - """ - ).rstrip(" "), - }, - "wakeonlan_enabled": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet dhcp - ethernet-wol g - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - ethernets: - iface0: - dhcp4: true - wakeonlan: true - version: 2 - """ - ), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - ETHTOOL_OPTS="wol g" - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEVICE=iface0 - ETHTOOL_OPTS="wol g" - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - wake-on-lan=64 - - [ipv4] - method=auto - may-fail=false - - """ - ), - }, - "yaml_v2": textwrap.dedent( - """\ - version: 2 - ethernets: - iface0: - dhcp4: true - wakeonlan: true - """ - ).rstrip(" "), - }, - "all": { - "expected_eni": """\ -auto lo -iface lo inet loopback - dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 - dns-search barley.maas wark.maas foobar.maas - -iface eth0 inet manual - -auto eth1 -iface eth1 inet manual - bond-master bond0 - bond-mode active-backup - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - -auto eth2 -iface eth2 inet manual - bond-master bond0 - bond-mode active-backup - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - -iface eth3 inet manual - -iface eth4 inet manual - -# control-manual eth5 -iface eth5 inet dhcp - -auto ib0 -iface ib0 inet static - address 192.168.200.7/24 - mtu 9000 - hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 - -auto bond0 -iface bond0 inet6 dhcp - bond-mode active-backup - bond-slaves none - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - hwaddress aa:bb:cc:dd:ee:ff - -auto br0 -iface br0 inet static - address 192.168.14.2/24 - bridge_ageing 250 - bridge_bridgeprio 22 - bridge_fd 1 - bridge_gcint 2 - bridge_hello 1 - bridge_maxage 10 - bridge_pathcost eth3 50 - bridge_pathcost eth4 75 - bridge_portprio eth3 28 - bridge_portprio eth4 14 - bridge_ports eth3 eth4 - bridge_stp off - bridge_waitport 1 eth3 - bridge_waitport 2 eth4 - hwaddress bb:bb:bb:bb:bb:aa - -# control-alias br0 -iface br0 inet6 static - address 2001:1::1/64 - post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true - pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true - -auto bond0.200 -iface bond0.200 inet dhcp - vlan-raw-device bond0 - vlan_id 200 - -auto eth0.101 -iface eth0.101 inet static - address 192.168.0.2/24 - dns-nameservers 192.168.0.10 10.23.23.134 - dns-search barley.maas sacchromyces.maas brettanomyces.maas - gateway 192.168.0.1 - mtu 1500 - hwaddress aa:bb:cc:dd:ee:11 - vlan-raw-device eth0 - vlan_id 101 - -# control-alias eth0.101 -iface eth0.101 inet static - address 192.168.2.10/24 - -post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true -pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true -""", - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - eth0: - match: - macaddress: c0:d6:9f:2c:e8:80 - set-name: eth0 - eth1: - match: - macaddress: aa:d6:9f:2c:e8:80 - set-name: eth1 - eth2: - match: - macaddress: c0:bb:9f:2c:e8:80 - set-name: eth2 - eth3: - match: - macaddress: 66:bb:9f:2c:e8:80 - set-name: eth3 - eth4: - match: - macaddress: 98:bb:9f:2c:e8:80 - set-name: eth4 - eth5: - dhcp4: true - match: - macaddress: 98:bb:9f:2c:e8:8a - set-name: eth5 - bonds: - bond0: - dhcp6: true - interfaces: - - eth1 - - eth2 - macaddress: aa:bb:cc:dd:ee:ff - parameters: - mii-monitor-interval: 100 - mode: active-backup - transmit-hash-policy: layer3+4 - bridges: - br0: - addresses: - - 192.168.14.2/24 - - 2001:1::1/64 - interfaces: - - eth3 - - eth4 - macaddress: bb:bb:bb:bb:bb:aa - nameservers: - addresses: - - 8.8.8.8 - - 4.4.4.4 - - 8.8.4.4 - search: - - barley.maas - - wark.maas - - foobar.maas - parameters: - ageing-time: 250 - forward-delay: 1 - hello-time: 1 - max-age: 10 - path-cost: - eth3: 50 - eth4: 75 - port-priority: - eth3: 28 - eth4: 14 - priority: 22 - stp: false - routes: - - to: ::/0 - via: 2001:4800:78ff:1b::1 - vlans: - bond0.200: - dhcp4: true - id: 200 - link: bond0 - eth0.101: - addresses: - - 192.168.0.2/24 - - 192.168.2.10/24 - id: 101 - link: eth0 - macaddress: aa:bb:cc:dd:ee:11 - mtu: 1500 - nameservers: - addresses: - - 192.168.0.10 - - 10.23.23.134 - search: - - barley.maas - - sacchromyces.maas - - brettanomyces.maas - routes: - - to: default - via: 192.168.0.1 - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_MODULE_OPTS="mode=active-backup """ - """xmit_hash_policy=layer3+4 """ - """miimon=100" - BONDING_SLAVE_0=eth1 - BONDING_SLAVE_1=eth2 - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - LLADDR=aa:bb:cc:dd:ee:ff - STARTMODE=auto""" - ), - "ifcfg-bond0.200": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - ETHERDEVICE=bond0 - STARTMODE=auto - VLAN_ID=200""" - ), - "ifcfg-br0": textwrap.dedent( - """\ - BRIDGE_AGEINGTIME=250 - BOOTPROTO=static - IPADDR=192.168.14.2 - IPADDR6=2001:1::1/64 - LLADDRESS=bb:bb:bb:bb:bb:aa - NETMASK=255.255.255.0 - BRIDGE_PRIORITY=22 - BRIDGE_PORTS='eth3 eth4' - STARTMODE=auto - BRIDGE_STP=off""" - ), - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=c0:d6:9f:2c:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth0.101": textwrap.dedent( - """\ - BOOTPROTO=static - IPADDR=192.168.0.2 - IPADDR1=192.168.2.10 - MTU=1500 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - ETHERDEVICE=eth0 - STARTMODE=auto - VLAN_ID=101""" - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - LLADDR=aa:d6:9f:2c:e8:80 - STARTMODE=hotplug""" - ), - "ifcfg-eth2": textwrap.dedent( - """\ - BOOTPROTO=none - LLADDR=c0:bb:9f:2c:e8:80 - STARTMODE=hotplug""" - ), - "ifcfg-eth3": textwrap.dedent( - """\ - BOOTPROTO=static - BRIDGE=yes - LLADDR=66:bb:9f:2c:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth4": textwrap.dedent( - """\ - BOOTPROTO=static - BRIDGE=yes - LLADDR=98:bb:9f:2c:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth5": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - LLADDR=98:bb:9f:2c:e8:8a - STARTMODE=manual""" - ), - "ifcfg-ib0": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 - IPADDR=192.168.200.7 - MTU=9000 - NETMASK=255.255.255.0 - STARTMODE=auto - TYPE=InfiniBand""" - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup """ - """xmit_hash_policy=layer3+4 """ - """miimon=100" - BONDING_SLAVE0=eth1 - BONDING_SLAVE1=eth2 - BOOTPROTO=none - DEVICE=bond0 - DHCPV6C=yes - IPV6INIT=yes - MACADDR=aa:bb:cc:dd:ee:ff - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Bond - USERCTL=no""" - ), - "ifcfg-bond0.200": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEVICE=bond0.200 - DHCLIENT_SET_DEFAULT_ROUTE=no - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=bond0 - USERCTL=no - VLAN=yes""" - ), - "ifcfg-br0": textwrap.dedent( - """\ - AGEING=250 - BOOTPROTO=none - DEFROUTE=yes - DEVICE=br0 - IPADDR=192.168.14.2 - IPV6ADDR=2001:1::1/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - IPV6_DEFAULTGW=2001:4800:78ff:1b::1 - MACADDR=bb:bb:bb:bb:bb:aa - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - PRIO=22 - STP=no - TYPE=Bridge - USERCTL=no""" - ), - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=c0:d6:9f:2c:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth0.101": textwrap.dedent( - """\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=eth0.101 - DNS1=192.168.0.10 - DNS2=10.23.23.134 - DOMAIN="barley.maas sacchromyces.maas brettanomyces.maas" - GATEWAY=192.168.0.1 - IPADDR=192.168.0.2 - IPADDR1=192.168.2.10 - MTU=1500 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=eth0 - USERCTL=no - VLAN=yes""" - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth1 - HWADDR=aa:d6:9f:2c:e8:80 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth2": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth2 - HWADDR=c0:bb:9f:2c:e8:80 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth3": textwrap.dedent( - """\ - BOOTPROTO=none - BRIDGE=br0 - DEVICE=eth3 - HWADDR=66:bb:9f:2c:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth4": textwrap.dedent( - """\ - BOOTPROTO=none - BRIDGE=br0 - DEVICE=eth4 - HWADDR=98:bb:9f:2c:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth5": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEVICE=eth5 - DHCLIENT_SET_DEFAULT_ROUTE=no - HWADDR=98:bb:9f:2c:e8:8a - NM_CONTROLLED=no - ONBOOT=no - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-ib0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=ib0 - HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 - IPADDR=192.168.200.7 - MTU=9000 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=InfiniBand - USERCTL=no""" - ), - }, - "expected_network_manager": { - "cloud-init-eth3.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth3 - uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790 - autoconnect-priority=120 - type=ethernet - slave-type=bridge - master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=66:BB:9F:2C:E8:80 - - """ - ), - "cloud-init-eth5.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth5 - uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=98:BB:9F:2C:E8:8A - - [ipv4] - method=auto - may-fail=false - dns=8.8.8.8;4.4.4.4;8.8.4.4; - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-ib0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init ib0 - uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b - autoconnect-priority=120 - type=infiniband - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [infiniband] - transport-mode=datagram - mtu=9000 - mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1 - - [ipv4] - method=manual - may-fail=false - address1=192.168.200.7/24 - dns=8.8.8.8;4.4.4.4;8.8.4.4; - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-bond0.200.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0.200 - uuid=88984a9c-ff22-5233-9267-86315e0acaa7 - autoconnect-priority=120 - type=vlan - interface-name=bond0.200 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [vlan] - id=200 - parent=54317911-f840-516b-a10d-82cb4c1f075c - - [ipv4] - method=auto - may-fail=false - dns=8.8.8.8;4.4.4.4;8.8.4.4; - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=C0:D6:9F:2C:E8:80 - - """ - ), - "cloud-init-eth4.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth4 - uuid=e27e4959-fb50-5580-b9a4-2073554627b9 - autoconnect-priority=120 - type=ethernet - slave-type=bridge - master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=98:BB:9F:2C:E8:80 - - """ - ), - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - slave-type=bond - master=54317911-f840-516b-a10d-82cb4c1f075c - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=AA:D6:9F:2C:E8:80 - - """ - ), - "cloud-init-br0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init br0 - uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - autoconnect-priority=120 - type=bridge - interface-name=br0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [bridge] - stp=false - priority=22 - mac-address=BB:BB:BB:BB:BB:AA - - [ipv4] - method=manual - may-fail=false - address1=192.168.14.2/24 - dns=8.8.8.8;4.4.4.4;8.8.4.4; - dns-search=barley.maas;wark.maas;foobar.maas; - - [ipv6] - method=manual - may-fail=false - address1=2001:1::1/64 - route1=::/0,2001:4800:78ff:1b::1 - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-eth0.101.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0.101 - uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf - autoconnect-priority=120 - type=vlan - interface-name=eth0.101 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [vlan] - id=101 - parent=1dd9a779-d327-56e1-8454-c65e2556c12c - - [ipv4] - method=manual - may-fail=false - address1=192.168.0.2/24 - gateway=192.168.0.1 - address2=192.168.2.10/24 - dns=192.168.0.10;10.23.23.134; - dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; - - """ - ), - "cloud-init-bond0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0 - uuid=54317911-f840-516b-a10d-82cb4c1f075c - autoconnect-priority=120 - type=bond - interface-name=bond0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [bond] - mode=active-backup - miimon=100 - xmit_hash_policy=layer3+4 - - [ipv6] - method=auto - may-fail=false - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-eth2.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth2 - uuid=5559a242-3421-5fdd-896e-9cb8313d5804 - autoconnect-priority=120 - type=ethernet - slave-type=bond - master=54317911-f840-516b-a10d-82cb4c1f075c - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=C0:BB:9F:2C:E8:80 - - """ - ), - }, - "yaml": textwrap.dedent( - """ - version: 1 - config: - # Physical interfaces. - - type: physical - name: eth0 - mac_address: c0:d6:9f:2c:e8:80 - - type: physical - name: eth1 - mac_address: aa:d6:9f:2c:e8:80 - - type: physical - name: eth2 - mac_address: c0:bb:9f:2c:e8:80 - - type: physical - name: eth3 - mac_address: 66:bb:9f:2c:e8:80 - - type: physical - name: eth4 - mac_address: 98:bb:9f:2c:e8:80 - # specify how ifupdown should treat iface - # control is one of ['auto', 'hotplug', 'manual'] - # with manual meaning ifup/ifdown should not affect the iface - # useful for things like iscsi root + dhcp - - type: physical - name: eth5 - mac_address: 98:bb:9f:2c:e8:8a - subnets: - - type: dhcp - control: manual - # VLAN interface. - - type: vlan - name: eth0.101 - vlan_link: eth0 - vlan_id: 101 - mac_address: aa:bb:cc:dd:ee:11 - mtu: 1500 - subnets: - - type: static - # When 'mtu' matches device-level mtu, no warnings - mtu: 1500 - address: 192.168.0.2/24 - gateway: 192.168.0.1 - dns_nameservers: - - 192.168.0.10 - - 10.23.23.134 - dns_search: - - barley.maas - - sacchromyces.maas - - brettanomyces.maas - - type: static - address: 192.168.2.10/24 - # Bond. - - type: bond - name: bond0 - # if 'mac_address' is omitted, the MAC is taken from - # the first slave. - mac_address: aa:bb:cc:dd:ee:ff - bond_interfaces: - - eth1 - - eth2 - params: - bond-mode: active-backup - bond_miimon: 100 - bond-xmit-hash-policy: "layer3+4" - subnets: - - type: dhcp6 - # A Bond VLAN. - - type: vlan - name: bond0.200 - vlan_link: bond0 - vlan_id: 200 - subnets: - - type: dhcp4 - # An infiniband - - type: infiniband - name: ib0 - mac_address: >- - a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 - subnets: - - type: static - address: 192.168.200.7/24 - mtu: 9000 - # A bridge. - - type: bridge - name: br0 - bridge_interfaces: - - eth3 - - eth4 - ipv4_conf: - rp_filter: 1 - proxy_arp: 0 - forwarding: 1 - ipv6_conf: - autoconf: 1 - disable_ipv6: 1 - use_tempaddr: 1 - forwarding: 1 - # basically anything in /proc/sys/net/ipv6/conf/.../ - mac_address: bb:bb:bb:bb:bb:aa - params: - bridge_ageing: 250 - bridge_bridgeprio: 22 - bridge_fd: 1 - bridge_gcint: 2 - bridge_hello: 1 - bridge_maxage: 10 - bridge_maxwait: 0 - bridge_pathcost: - - eth3 50 - - eth4 75 - bridge_portprio: - - eth3 28 - - eth4 14 - bridge_stp: 'off' - bridge_waitport: - - 1 eth3 - - 2 eth4 - subnets: - - type: static - address: 192.168.14.2/24 - - type: static - address: 2001:1::1/64 # default to /64 - routes: - - gateway: 2001:4800:78ff:1b::1 - netmask: '::' - network: '::' - # A global nameserver. - - type: nameserver - address: 8.8.8.8 - search: barley.maas - # global nameservers and search in list form - - type: nameserver - address: - - 4.4.4.4 - - 8.8.4.4 - search: - - wark.maas - - foobar.maas - # A global route. - - type: route - destination: 10.0.0.0/8 - gateway: 11.0.0.1 - metric: 3 - """ - ).lstrip(), - }, - "bond": { - "yaml": textwrap.dedent( - """ - version: 1 - config: - - type: physical - name: bond0s0 - mac_address: aa:bb:cc:dd:e8:00 - - type: physical - name: bond0s1 - mac_address: aa:bb:cc:dd:e8:01 - - type: bond - name: bond0 - mac_address: aa:bb:cc:dd:e8:ff - mtu: 9000 - bond_interfaces: - - bond0s0 - - bond0s1 - params: - bond-mode: active-backup - bond_miimon: 100 - bond-xmit-hash-policy: "layer3+4" - bond-num-grat-arp: 5 - bond-downdelay: 10 - bond-updelay: 20 - bond-fail-over-mac: active - bond-primary: bond0s0 - bond-primary-reselect: always - subnets: - - type: static - address: 192.168.0.2/24 - gateway: 192.168.0.1 - routes: - - gateway: 192.168.0.3 - netmask: 255.255.255.0 - network: 10.1.3.0 - - type: static - address: 192.168.1.2/24 - - type: static - address: 2001:1::1/92 - routes: - - gateway: 2001:67c:1562::1 - network: "2001:67c::" - netmask: "ffff:ffff::" - - gateway: 3001:67c:15::1 - network: "3001:67c::" - netmask: "ffff:ffff::" - metric: 10000 - """ - ), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - bond0s0: - match: - macaddress: aa:bb:cc:dd:e8:00 - set-name: bond0s0 - bond0s1: - match: - macaddress: aa:bb:cc:dd:e8:01 - set-name: bond0s1 - bonds: - bond0: - addresses: - - 192.168.0.2/24 - - 192.168.1.2/24 - - 2001:1::1/92 - interfaces: - - bond0s0 - - bond0s1 - macaddress: aa:bb:cc:dd:e8:ff - mtu: 9000 - parameters: - down-delay: 10 - fail-over-mac-policy: active - gratuitous-arp: 5 - mii-monitor-interval: 100 - mode: active-backup - primary: bond0s0 - primary-reselect-policy: always - transmit-hash-policy: layer3+4 - up-delay: 20 - routes: - - to: default - via: 192.168.0.1 - - to: 10.1.3.0/24 - via: 192.168.0.3 - - to: 2001:67c::/32 - via: 2001:67c:1562::1 - - metric: 10000 - to: 3001:67c::/32 - via: 3001:67c:15::1 - """ - ), - "expected_eni": textwrap.dedent( - """\ -auto lo -iface lo inet loopback - -auto bond0s0 -iface bond0s0 inet manual - bond-downdelay 10 - bond-fail-over-mac active - bond-master bond0 - bond-mode active-backup - bond-num-grat-arp 5 - bond-primary bond0s0 - bond-primary-reselect always - bond-updelay 20 - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - -auto bond0s1 -iface bond0s1 inet manual - bond-downdelay 10 - bond-fail-over-mac active - bond-master bond0 - bond-mode active-backup - bond-num-grat-arp 5 - bond-primary bond0s0 - bond-primary-reselect always - bond-updelay 20 - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - -auto bond0 -iface bond0 inet static - address 192.168.0.2/24 - gateway 192.168.0.1 - bond-downdelay 10 - bond-fail-over-mac active - bond-mode active-backup - bond-num-grat-arp 5 - bond-primary bond0s0 - bond-primary-reselect always - bond-slaves none - bond-updelay 20 - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - hwaddress aa:bb:cc:dd:e8:ff - mtu 9000 - post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true - pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true - -# control-alias bond0 -iface bond0 inet static - address 192.168.1.2/24 - -# control-alias bond0 -iface bond0 inet6 static - address 2001:1::1/92 - post-up route add -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true - pre-down route del -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true - post-up route add -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ -|| true - pre-down route del -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ -|| true - """ - ), - "yaml-v2": textwrap.dedent( - """ - version: 2 - ethernets: - eth0: - match: - driver: "virtio_net" - macaddress: aa:bb:cc:dd:e8:00 - vf0: - set-name: vf0 - match: - driver: "e1000" - macaddress: aa:bb:cc:dd:e8:01 - bonds: - bond0: - addresses: - - 192.168.0.2/24 - - 192.168.1.2/24 - - 2001:1::1/92 - gateway4: 192.168.0.1 - interfaces: - - eth0 - - vf0 - parameters: - down-delay: 10 - fail-over-mac-policy: active - gratuitous-arp: 5 - mii-monitor-interval: 100 - mode: active-backup - primary: bond0s0 - primary-reselect-policy: always - transmit-hash-policy: layer3+4 - up-delay: 20 - routes: - - to: 10.1.3.0/24 - via: 192.168.0.3 - - to: 2001:67c:1562:8007::1/64 - via: 2001:67c:1562:8007::aac:40b2 - - metric: 10000 - to: 3001:67c:15:8007::1/64 - via: 3001:67c:15:8007::aac:40b2 - """ - ), - "expected_netplan-v2": textwrap.dedent( - """ - network: - bonds: - bond0: - addresses: - - 192.168.0.2/24 - - 192.168.1.2/24 - - 2001:1::1/92 - gateway4: 192.168.0.1 - interfaces: - - eth0 - - vf0 - parameters: - down-delay: 10 - fail-over-mac-policy: active - gratuitous-arp: 5 - mii-monitor-interval: 100 - mode: active-backup - primary: bond0s0 - primary-reselect-policy: always - transmit-hash-policy: layer3+4 - up-delay: 20 - routes: - - to: 10.1.3.0/24 - via: 192.168.0.3 - - to: 2001:67c:1562:8007::1/64 - via: 2001:67c:1562:8007::aac:40b2 - - metric: 10000 - to: 3001:67c:15:8007::1/64 - via: 3001:67c:15:8007::aac:40b2 - ethernets: - eth0: - match: - driver: virtio_net - macaddress: aa:bb:cc:dd:e8:00 - vf0: - match: - driver: e1000 - macaddress: aa:bb:cc:dd:e8:01 - set-name: vf0 - version: 2 - """ - ), - "expected_sysconfig_opensuse": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" - BONDING_SLAVE_0=bond0s0 - BONDING_SLAVE_1=bond0s1 - BOOTPROTO=static - LLADDR=aa:bb:cc:dd:e8:ff - IPADDR=192.168.0.2 - IPADDR1=192.168.1.2 - IPADDR6=2001:1::1/92 - MTU=9000 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - STARTMODE=auto - """ - ), - "ifcfg-bond0s0": textwrap.dedent( - """\ - BOOTPROTO=none - LLADDR=aa:bb:cc:dd:e8:00 - STARTMODE=hotplug - """ - ), - "ifcfg-bond0s1": textwrap.dedent( - """\ - BOOTPROTO=none - LLADDR=aa:bb:cc:dd:e8:01 - STARTMODE=hotplug - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" - BONDING_SLAVE0=bond0s0 - BONDING_SLAVE1=bond0s1 - BOOTPROTO=none - DEFROUTE=yes - DEVICE=bond0 - GATEWAY=192.168.0.1 - MACADDR=aa:bb:cc:dd:e8:ff - IPADDR=192.168.0.2 - IPADDR1=192.168.1.2 - IPV6ADDR=2001:1::1/92 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - MTU=9000 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Bond - USERCTL=no - """ - ), - "ifcfg-bond0s0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=bond0s0 - HWADDR=aa:bb:cc:dd:e8:00 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no - """ - ), - "route6-bond0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - 2001:67c::/32 via 2001:67c:1562::1 dev bond0 - 3001:67c::/32 via 3001:67c:15::1 metric 10000 dev bond0 - """ - ), - "route-bond0": textwrap.dedent( - """\ - ADDRESS0=10.1.3.0 - GATEWAY0=192.168.0.3 - NETMASK0=255.255.255.0 - """ - ), - "ifcfg-bond0s1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=bond0s1 - HWADDR=aa:bb:cc:dd:e8:01 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-bond0s0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0s0 - uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e - autoconnect-priority=120 - type=ethernet - slave-type=bond - master=54317911-f840-516b-a10d-82cb4c1f075c - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=AA:BB:CC:DD:E8:00 - - """ - ), - "cloud-init-bond0s1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0s1 - uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0 - autoconnect-priority=120 - type=ethernet - slave-type=bond - master=54317911-f840-516b-a10d-82cb4c1f075c - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=AA:BB:CC:DD:E8:01 - - """ - ), - "cloud-init-bond0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0 - uuid=54317911-f840-516b-a10d-82cb4c1f075c - autoconnect-priority=120 - type=bond - interface-name=bond0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [bond] - mode=active-backup - miimon=100 - xmit_hash_policy=layer3+4 - num_grat_arp=5 - downdelay=10 - updelay=20 - fail_over_mac=active - primary_reselect=always - primary=bond0s0 - - [ipv4] - method=manual - may-fail=false - address1=192.168.0.2/24 - gateway=192.168.0.1 - route1=10.1.3.0/24,192.168.0.3 - address2=192.168.1.2/24 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::1/92 - route1=2001:67c::/32,2001:67c:1562::1 - route2=3001:67c::/32,3001:67c:15::1 - - """ - ), - }, - }, - "vlan": { - "yaml": textwrap.dedent( - """ - version: 1 - config: - - type: physical - name: en0 - mac_address: aa:bb:cc:dd:e8:00 - - type: vlan - mtu: 2222 - name: en0.99 - vlan_link: en0 - vlan_id: 99 - subnets: - - type: static - address: '192.168.2.2/24' - - type: static - address: '192.168.1.2/24' - gateway: 192.168.1.1 - - type: static - address: 2001:1::bbbb/96 - routes: - - gateway: 2001:1::1 - netmask: '::' - network: '::' - """ - ), - "expected_sysconfig_opensuse": { - # TODO RJS: unknown proper BOOTPROTO setting ask Marius - "ifcfg-en0": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=aa:bb:cc:dd:e8:00 - STARTMODE=auto""" - ), - "ifcfg-en0.99": textwrap.dedent( - """\ - BOOTPROTO=static - IPADDR=192.168.2.2 - IPADDR1=192.168.1.2 - IPADDR6=2001:1::bbbb/96 - MTU=2222 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - STARTMODE=auto - ETHERDEVICE=en0 - VLAN_ID=99 - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-en0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=en0 - HWADDR=aa:bb:cc:dd:e8:00 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-en0.99": textwrap.dedent( - """\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=en0.99 - GATEWAY=192.168.1.1 - IPADDR=192.168.2.2 - IPADDR1=192.168.1.2 - IPV6ADDR=2001:1::bbbb/96 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - IPV6_DEFAULTGW=2001:1::1 - MTU=2222 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=en0 - USERCTL=no - VLAN=yes""" - ), - }, - "expected_network_manager": { - "cloud-init-en0.99.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init en0.99 - uuid=f594e2ed-f107-51df-b225-1dc530a5356b - autoconnect-priority=120 - type=vlan - interface-name=en0.99 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [vlan] - id=99 - parent=e0ca478b-8d84-52ab-8fae-628482c629b5 - - [ipv4] - method=manual - may-fail=false - address1=192.168.2.2/24 - address2=192.168.1.2/24 - gateway=192.168.1.1 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::bbbb/96 - route1=::/0,2001:1::1 - - """ - ), - "cloud-init-en0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init en0 - uuid=e0ca478b-8d84-52ab-8fae-628482c629b5 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=AA:BB:CC:DD:E8:00 - - """ - ), - }, - }, - "bridge": { - "yaml": textwrap.dedent( - """ - version: 1 - config: - - type: physical - name: eth0 - mac_address: '52:54:00:12:34:00' - subnets: - - type: static - address: 2001:1::100/96 - - type: physical - name: eth1 - mac_address: '52:54:00:12:34:01' - subnets: - - type: static - address: 2001:1::101/96 - - type: bridge - name: br0 - bridge_interfaces: - - eth0 - - eth1 - params: - bridge_stp: 0 - bridge_bridgeprio: 22 - subnets: - - type: static - address: 192.168.2.2/24""" - ), - "expected_sysconfig_opensuse": { - "ifcfg-br0": textwrap.dedent( - """\ - BOOTPROTO=static - IPADDR=192.168.2.2 - NETMASK=255.255.255.0 - STARTMODE=auto - BRIDGE_STP=off - BRIDGE_PRIORITY=22 - BRIDGE_PORTS='eth0 eth1' - """ - ), - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=static - BRIDGE=yes - LLADDR=52:54:00:12:34:00 - IPADDR6=2001:1::100/96 - STARTMODE=auto - """ - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - BRIDGE=yes - LLADDR=52:54:00:12:34:01 - IPADDR6=2001:1::101/96 - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-br0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=br0 - IPADDR=192.168.2.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - PRIO=22 - STP=no - TYPE=Bridge - USERCTL=no - """ - ), - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=none - BRIDGE=br0 - DEVICE=eth0 - HWADDR=52:54:00:12:34:00 - IPV6ADDR=2001:1::100/96 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - BRIDGE=br0 - DEVICE=eth1 - HWADDR=52:54:00:12:34:01 - IPV6ADDR=2001:1::101/96 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-br0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init br0 - uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - autoconnect-priority=120 - type=bridge - interface-name=br0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [bridge] - stp=false - priority=22 - - [ipv4] - method=manual - may-fail=false - address1=192.168.2.2/24 - - """ - ), - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - slave-type=bridge - master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=52:54:00:12:34:00 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::100/96 - - """ - ), - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - slave-type=bridge - master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=52:54:00:12:34:01 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::101/96 - - """ - ), - }, - }, - "manual": { - "yaml": textwrap.dedent( - """ - version: 1 - config: - - type: physical - name: eth0 - mac_address: '52:54:00:12:34:00' - subnets: - - type: static - address: 192.168.1.2/24 - control: manual - - type: physical - name: eth1 - mtu: 1480 - mac_address: 52:54:00:12:34:aa - subnets: - - type: manual - - type: physical - name: eth2 - mac_address: 52:54:00:12:34:ff - subnets: - - type: manual - control: manual - """ - ), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - # control-manual eth0 - iface eth0 inet static - address 192.168.1.2/24 - - auto eth1 - iface eth1 inet manual - mtu 1480 - - # control-manual eth2 - iface eth2 inet manual - """ - ), - "expected_netplan": textwrap.dedent( - """\ - - network: - version: 2 - ethernets: - eth0: - addresses: - - 192.168.1.2/24 - match: - macaddress: '52:54:00:12:34:00' - set-name: eth0 - eth1: - match: - macaddress: 52:54:00:12:34:aa - mtu: 1480 - set-name: eth1 - eth2: - match: - macaddress: 52:54:00:12:34:ff - set-name: eth2 - """ - ), - "expected_sysconfig_opensuse": { - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=52:54:00:12:34:00 - IPADDR=192.168.1.2 - NETMASK=255.255.255.0 - STARTMODE=manual - """ - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=52:54:00:12:34:aa - MTU=1480 - STARTMODE=auto - """ - ), - "ifcfg-eth2": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=52:54:00:12:34:ff - STARTMODE=manual - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=52:54:00:12:34:00 - IPADDR=192.168.1.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=no - TYPE=Ethernet - USERCTL=no - """ - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth1 - HWADDR=52:54:00:12:34:aa - MTU=1480 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - "ifcfg-eth2": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth2 - HWADDR=52:54:00:12:34:ff - NM_CONTROLLED=no - ONBOOT=no - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=52:54:00:12:34:00 - - [ipv4] - method=manual - may-fail=false - address1=192.168.1.2/24 - - """ - ), - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mtu=1480 - mac-address=52:54:00:12:34:AA - - [ipv4] - method=auto - may-fail=true - - """ - ), - "cloud-init-eth2.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth2 - uuid=5559a242-3421-5fdd-896e-9cb8313d5804 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=52:54:00:12:34:FF - - [ipv4] - method=auto - may-fail=true - - """ - ), - }, - }, - "v1-dns": { - "expected_networkd": textwrap.dedent( - """\ - [Address] - Address=192.168.1.20/16 - - [Match] - MACAddress=11:22:33:44:55:66 - Name=interface0 - - [Network] - DHCP=no - DNS=1.1.1.1 3.3.3.3 - Domains=aaaa cccc - - [Route] - Gateway=192.168.1.1 - """ - ), - "expected_eni": textwrap.dedent( - """\ - # This file is generated from information provided by the datasource. Changes - # to it will not persist across an instance reboot. To disable cloud-init's - # network configuration capabilities, write a file - # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: - # network: {config: disabled} - auto lo - iface lo inet loopback - dns-nameservers 2.2.2.2 - dns-search bbbb - - iface lo inet6 loopback - dns-nameservers FEDC::1 - dns-search bbbb - - auto interface0 - iface interface0 inet static - address 192.168.1.20/16 - dns-nameservers 1.1.1.1 3.3.3.3 - dns-search aaaa cccc - gateway 192.168.1.1 - """ # noqa: E501 - ), - "expected_netplan": textwrap.dedent( - """\ - # This file is generated from information provided by the datasource. Changes - # to it will not persist across an instance reboot. To disable cloud-init's - # network configuration capabilities, write a file - # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: - # network: {config: disabled} - network: - version: 2 - ethernets: - interface0: - addresses: - - 192.168.1.20/16 - match: - macaddress: 11:22:33:44:55:66 - nameservers: - addresses: - - 1.1.1.1 - - 3.3.3.3 - search: - - aaaa - - cccc - routes: - - to: default - via: 192.168.1.1 - set-name: interface0 - """ # noqa: E501 - ), - "expected_sysconfig_opensuse": { - "ifcfg-interface0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - BOOTPROTO=static - IPADDR=192.168.1.20 - LLADDR=11:22:33:44:55:66 - NETMASK=255.255.0.0 - STARTMODE=auto - """ - ) - }, - "expected_sysconfig_rhel": { - "ifcfg-eth0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - BOOTPROTO=none - DEFROUTE=yes - DEVICE=interface0 - DNS1=1.1.1.1 - DNS2=3.3.3.3 - DOMAIN=aaaa cccc - GATEWAY=192.168.1.1 - HWADDR=11:22:33:44:55:66 - IPADDR=192.168.1.20 - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-interface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init interface0 - uuid=8b6862ed-dbd6-5830-93f7-a91451c13828 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=11:22:33:44:55:66 - - [ipv4] - method=manual - may-fail=false - address1=192.168.1.20/16 - gateway=192.168.1.1 - dns=3.3.3.3;1.1.1.1; - dns-search=cccc;aaaa; - - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: physical - name: interface0 - mac_address: "11:22:33:44:55:66" - subnets: - - type: static - address: 192.168.1.20/16 - gateway: 192.168.1.1 - dns_nameservers: - - 3.3.3.3 - dns_search: - - cccc - - type: nameserver - interface: interface0 - address: - - 1.1.1.1 - search: - - aaaa - - type: nameserver - address: - - 2.2.2.2 - - FEDC::1 - search: - - bbbb - """ - ), - }, - "v2-dev-name-via-mac-lookup": { - "expected_sysconfig_rhel": { - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=cf:d6:af:48:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - nic0: - match: - macaddress: 'cf:d6:af:48:e8:80' - """ - ), - }, - "v2-mixed-routes": { - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mtu=500 - - [ipv4] - method=auto - may-fail=true - route1=169.254.42.42/32,62.210.0.1 - route1_options=mtu=400 - route2=169.254.42.43/32,62.210.0.2 - route2_options=mtu=200 - address1=192.168.1.20/16 - dns=8.8.8.8; - dns-search=lab;home; - - [ipv6] - route1=::/0,fe80::dc00:ff:fe20:186 - route1_options=mtu=300 - route2=fe80::dc00:ff:fe20:188/64,fe80::dc00:ff:fe20:187 - route2_options=mtu=100 - method=auto - may-fail=true - address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 - dns=FEDC::1; - dns-search=lab;home; - - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - dhcp4: true - dhcp6: true - mtu: 500 - nameservers: - search: [lab, home] - addresses: [8.8.8.8, "FEDC::1"] - routes: - - to: 169.254.42.42/32 - via: 62.210.0.1 - mtu: 400 - - via: fe80::dc00:ff:fe20:186 - to: ::/0 - mtu: 300 - - to: 169.254.42.43/32 - via: 62.210.0.2 - mtu: 200 - - via: fe80::dc00:ff:fe20:187 - to: fe80::dc00:ff:fe20:188 - mtu: 100 - addresses: - - 192.168.1.20/16 - - 2001:bc8:1210:232:dc00:ff:fe20:185/64 - """ - ), - }, - "v2-dns": { - "expected_networkd": textwrap.dedent( - """\ - [Address] - Address=192.168.1.20/16 - - [Address] - Address=2001:bc8:1210:232:dc00:ff:fe20:185/64 - - [Match] - Name=eth0 - - [Network] - DHCP=no - DNS=8.8.8.8 FEDC::1 - Domains=lab home - """ - ), - "expected_eni": textwrap.dedent( - """\ - # This file is generated from information provided by the datasource. Changes - # to it will not persist across an instance reboot. To disable cloud-init's - # network configuration capabilities, write a file - # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: - # network: {config: disabled} - auto lo - iface lo inet loopback - - auto eth0 - iface eth0 inet static - address 192.168.1.20/16 - dns-nameservers 8.8.8.8 - dns-search lab home - - # control-alias eth0 - iface eth0 inet6 static - address 2001:bc8:1210:232:dc00:ff:fe20:185/64 - dns-nameservers FEDC::1 - dns-search lab home - """ # noqa: E501 - ), - "expected_sysconfig_opensuse": { - "ifcfg-eth0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - BOOTPROTO=static - IPADDR=192.168.1.20 - IPADDR6=2001:bc8:1210:232:dc00:ff:fe20:185/64 - NETMASK=255.255.0.0 - STARTMODE=auto - """ - ) - }, - "expected_sysconfig_rhel": { - "ifcfg-eth0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - BOOTPROTO=none - DEVICE=eth0 - DNS1=8.8.8.8 - DNS2=FEDC::1 - DOMAIN="lab home" - IPADDR=192.168.1.20 - IPV6ADDR=2001:bc8:1210:232:dc00:ff:fe20:185/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ) - }, - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv4] - method=manual - may-fail=false - address1=192.168.1.20/16 - dns=8.8.8.8; - dns-search=lab;home; - - [ipv6] - method=manual - may-fail=false - address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 - dns=FEDC::1; - dns-search=lab;home; - - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - nameservers: - search: [lab, home] - addresses: [8.8.8.8, "FEDC::1"] - addresses: - - 192.168.1.20/16 - - 2001:bc8:1210:232:dc00:ff:fe20:185/64 - """ - ), - }, - "v2-dns-no-if-ips": { - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv4] - method=auto - may-fail=true - dns=8.8.8.8; - dns-search=lab;home; - - [ipv6] - method=auto - may-fail=true - dns=FEDC::1; - dns-search=lab;home; - - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - dhcp4: true - dhcp6: true - nameservers: - search: [lab, home] - addresses: [8.8.8.8, "FEDC::1"] - """ - ), - }, - "v2-dns-no-dhcp": { - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - """ - ) + { + "in_data": { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [ + { + "network_id": "public-ipv4", + "type": "ipv4", + "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + } + ], + "ip_address": "172.19.1.34", + "id": "network0", + }, + { + "network_id": "public-ipv6-a", + "type": "ipv6", + "netmask": "", + "link": "tap1a81968a-79", + "routes": [ + { + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::", + } + ], + "ip_address": "2001:DB8::10", + "id": "network1", + }, + { + "network_id": "public-ipv6-b", + "type": "ipv6", + "netmask": "64", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "2001:DB9::10", + "id": "network2", + }, + { + "network_id": "public-ipv6-c", + "type": "ipv6", + "netmask": "64", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "2001:DB10::10", + "id": "network3", + }, + ], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", + }, + ], }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - nameservers: - search: [lab, home] - addresses: [8.8.8.8, "FEDC::1"] - """ - ), + "in_macs": { + "fa:16:3e:ed:9a:59": "eth0", + }, + "out_sysconfig_opensuse": [ + ( + "etc/sysconfig/network/ifcfg-eth0", + """ +# Created by cloud-init automatically, do not edit. +# +BOOTPROTO=static +IPADDR=172.19.1.34 +IPADDR6=2001:DB8::10/64 +IPADDR6_1=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 +LLADDR=fa:16:3e:ed:9a:59 +NETMASK=255.255.252.0 +STARTMODE=auto +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ +; Created by cloud-init automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ +# Created by cloud-init automatically, do not edit. +# +[main] +dns = none +""".lstrip(), + ), + ( + "etc/udev/rules.d/85-persistent-net-cloud-init.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], + "out_sysconfig_rhel": [ + ( + "etc/sysconfig/network-scripts/ifcfg-eth0", + """ +# Created by cloud-init automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPV6ADDR=2001:DB8::10/64 +IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" +IPV6INIT=yes +IPV6_AUTOCONF=no +IPV6_DEFAULTGW=2001:DB8::1 +IPV6_FORCE_ACCEPT_RA=no +NETMASK=255.255.252.0 +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ +; Created by cloud-init automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ +# Created by cloud-init automatically, do not edit. +# +[main] +dns = none +""".lstrip(), + ), + ( + "etc/udev/rules.d/70-persistent-net.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], }, - "v2-route-no-gateway": { - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. +] - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 +EXAMPLE_ENI = """ +auto lo +iface lo inet loopback + dns-nameservers 10.0.0.1 + dns-search foo.com - [user] - org.freedesktop.NetworkManager.origin=cloud-init +auto eth0 +iface eth0 inet static + address 1.2.3.12 + netmask 255.255.255.248 + broadcast 1.2.3.15 + gateway 1.2.3.9 + dns-nameservers 69.9.160.191 69.9.191.4 +auto eth1 +iface eth1 inet static + address 10.248.2.4 + netmask 255.255.255.248 + broadcast 10.248.2.7 +""" - [ethernet] +RENDERED_ENI = """ +auto lo +iface lo inet loopback + dns-nameservers 10.0.0.1 + dns-search foo.com - [ipv4] - method=auto - may-fail=false - route1=0.0.0.0/0 +auto eth0 +iface eth0 inet static + address 1.2.3.12/29 + broadcast 1.2.3.15 + dns-nameservers 69.9.160.191 69.9.191.4 + gateway 1.2.3.9 - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - dhcp4: true - routes: - - to: "0.0.0.0/0" - """ - ), - }, -} +auto eth1 +iface eth1 inet static + address 10.248.2.4/29 + broadcast 10.248.2.7 +""".lstrip() CONFIG_V1_EXPLICIT_LOOPBACK = { @@ -4773,14 +1326,14 @@ def sys_dev_path(devname, path=""): mock_sys_dev_path.side_effect = sys_dev_path -class TestGenerateFallbackConfig(CiTestCase): - def setUp(self): - super(TestGenerateFallbackConfig, self).setUp() - self.add_patch( +class TestGenerateFallbackConfig: + @pytest.fixture(autouse=True) + def setup(self, mocker, tmpdir_factory): + mocker.patch( "cloudinit.util.get_cmdline", - "m_get_cmdline", return_value="root=/dev/sda1", ) + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -4836,7 +1389,7 @@ def test_device_driver_v2( }, "version": 2, } - self.assertEqual(expected, network_cfg) + assert expected == network_cfg @mock.patch("cloudinit.net.openvswitch_is_installed", return_value=False) @mock.patch("cloudinit.net.sys_dev_path") @@ -4901,7 +1454,7 @@ def test_device_driver( ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + assert os.path.exists(os.path.join(render_dir, "interfaces")) with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() print(contents) @@ -4915,9 +1468,9 @@ def test_device_driver( # control-alias eth0 iface eth0 inet6 dhcp """ - self.assertEqual(expected.lstrip(), contents.lstrip()) + assert expected.lstrip() == contents.lstrip() - self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules"))) + assert os.path.exists(os.path.join(render_dir, "netrules")) with open(os.path.join(render_dir, "netrules")) as fh: contents = fh.read() print(contents) @@ -4928,7 +1481,7 @@ def test_device_driver( 'ATTR{address}=="00:11:22:33:44:55"', 'NAME="eth0"', ] - self.assertEqual(", ".join(expected_rule) + "\n", contents.lstrip()) + assert ", ".join(expected_rule) + "\n" == contents.lstrip() @mock.patch("cloudinit.net.openvswitch_is_installed", return_value=False) @mock.patch("cloudinit.net.sys_dev_path") @@ -4993,7 +1546,7 @@ def test_hv_netvsc_vf_filter( ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + assert os.path.exists(os.path.join(render_dir, "interfaces")) with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() print(contents) @@ -5007,9 +1560,9 @@ def test_hv_netvsc_vf_filter( # control-alias eth1 iface eth1 inet6 dhcp """ - self.assertEqual(expected.lstrip(), contents.lstrip()) + assert expected.lstrip() == contents.lstrip() - self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules"))) + assert os.path.exists(os.path.join(render_dir, "netrules")) with open(os.path.join(render_dir, "netrules")) as fh: contents = fh.read() print(contents) @@ -5020,7 +1573,7 @@ def test_hv_netvsc_vf_filter( 'ATTR{address}=="00:11:22:33:44:55"', 'NAME="eth1"', ] - self.assertEqual(", ".join(expected_rule) + "\n", contents.lstrip()) + assert ", ".join(expected_rule) + "\n" == contents.lstrip() @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.udevadm_settle") @@ -5069,7 +1622,7 @@ def test_unstable_names( dev_attrs=devices, ) net.generate_fallback_config(config_driver=True) - self.assertEqual(1, mock_settle.call_count) + assert 1 == mock_settle.call_count @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.udevadm_settle") @@ -5118,21 +1671,23 @@ def test_unstable_names_disabled( dev_attrs=devices, ) net.generate_fallback_config(config_driver=True) - self.assertEqual(0, mock_settle.call_count) + assert 0 == mock_settle.call_count @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestRhelSysConfigRendering(CiTestCase): - with_logs = True - +class TestRhelSysConfigRendering: scripts_dir = "/etc/sysconfig/network-scripts" header = "# Created by cloud-init automatically, do not edit.\n#\n" expected_name = "expected_sysconfig_rhel" + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def _get_renderer(self): distro_cls = distros.fetch("rhel") return sysconfig.Renderer( @@ -5164,7 +1719,6 @@ def _try_load(f): # route6- * files aren't shell content, but iproute2 params return f - orig_maxdiff = self.maxDiff expected_d = dict( (os.path.join(self.scripts_dir, k), _try_load(v)) for k, v in expected.items() @@ -5176,11 +1730,7 @@ def _try_load(f): for k, v in found.items() if k.startswith(self.scripts_dir) ) - try: - self.maxDiff = None - self.assertEqual(expected_d, scripts_found) - finally: - self.maxDiff = orig_maxdiff + assert expected_d == scripts_found def _assert_headers(self, found): missing = [ @@ -5232,12 +1782,11 @@ def test_default_generation( DHCPV6C=yes HWADDR=07-1c-c6-75-a4-be IPV6INIT=yes -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """.lstrip() - self.assertEqual(expected_content, content) + assert expected_content == content def test_multiple_ipv4_default_gateways(self): """ValueError is raised when duplicate ipv4 gateways exist.""" @@ -5282,9 +1831,9 @@ def test_multiple_ipv4_default_gateways(self): network_cfg, skip_broken=False ) renderer = self._get_renderer() - with self.assertRaises(ValueError): + with pytest.raises(ValueError): renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) + assert [] == os.listdir(render_dir) def test_multiple_ipv6_default_gateways(self): """ValueError is raised when duplicate ipv6 gateways exist.""" @@ -5329,9 +1878,9 @@ def test_multiple_ipv6_default_gateways(self): network_cfg, skip_broken=False ) renderer = self._get_renderer() - with self.assertRaises(ValueError): + with pytest.raises(ValueError): renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) + assert [] == os.listdir(render_dir) def test_invalid_network_mask_ipv6(self): net_json = { @@ -5365,7 +1914,7 @@ def test_invalid_network_mask_ipv6(self): } macs = {"fa:16:3e:ed:9a:59": "eth0"} network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): network_state.parse_net_config_data(network_cfg, skip_broken=False) def test_invalid_network_mask_ipv4(self): @@ -5400,7 +1949,7 @@ def test_invalid_network_mask_ipv4(self): } macs = {"fa:16:3e:ed:9a:59": "eth0"} network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): network_state.parse_net_config_data(network_cfg, skip_broken=False) def test_openstack_rendering_samples(self): @@ -5423,17 +1972,17 @@ def test_openstack_rendering_samples(self): "out_sysconfig_rhel", [] ): with open(os.path.join(render_dir, fn)) as fh: - self.assertEqual(expected_content, fh.read()) + assert expected_content == fh.read() def test_network_config_v1_samples(self): ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network-scripts/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected = """\ # Created by cloud-init automatically, do not edit. # @@ -5444,26 +1993,25 @@ def test_network_config_v1_samples(self): HWADDR=52:54:00:12:34:00 IPADDR=10.0.2.15 NETMASK=255.255.255.0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected, found[nspath + "ifcfg-interface0"]) + assert expected == found[nspath + "ifcfg-interface0"] # The configuration has no nameserver information make sure we # do not write the resolv.conf file respath = "/etc/resolv.conf" - self.assertNotIn(respath, found.keys()) + assert respath not in found.keys() def test_network_config_v1_multi_iface_samples(self): ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network-scripts/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected_i1 = """\ # Created by cloud-init automatically, do not edit. # @@ -5475,12 +2023,11 @@ def test_network_config_v1_multi_iface_samples(self): IPADDR=51.68.89.122 MTU=1500 NETMASK=255.255.240.0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected_i1, found[nspath + "ifcfg-eth0"]) + assert expected_i1 == found[nspath + "ifcfg-eth0"] expected_i2 = """\ # Created by cloud-init automatically, do not edit. # @@ -5489,16 +2036,15 @@ def test_network_config_v1_multi_iface_samples(self): DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=fa:16:3e:b1:ca:29 MTU=9000 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected_i2, found[nspath + "ifcfg-eth1"]) + assert expected_i2 == found[nspath + "ifcfg-eth1"] def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) # write an etc/resolv.conf and expect it to not be modified resolvconf = os.path.join(render_dir, "etc/resolv.conf") @@ -5508,107 +2054,109 @@ def test_config_with_explicit_loopback(self): renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network-scripts/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected = """\ # Created by cloud-init automatically, do not edit. # BOOTPROTO=dhcp DEVICE=eth0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected, found[nspath + "ifcfg-eth0"]) + assert expected == found[nspath + "ifcfg-eth0"] # a dhcp only config should not modify resolv.conf - self.assertEqual(resolvconf_content, found["/etc/resolv.conf"]) - - def test_bond_config(self): - entry = NETWORK_CONFIGS["bond"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_vlan_config(self): - entry = NETWORK_CONFIGS["vlan"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_bridge_config(self): - entry = NETWORK_CONFIGS["bridge"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_manual_config(self): - entry = NETWORK_CONFIGS["manual"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_all_config(self): - entry = NETWORK_CONFIGS["all"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - self.assertNotIn( - "WARNING: Network config: ignoring eth0.101 device-level mtu", - self.logs.getvalue(), - ) - - def test_small_config_v1(self): - entry = NETWORK_CONFIGS["small_v1"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_small_config_v2(self): - entry = NETWORK_CONFIGS["small_v2"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_v4_and_v6_static_config(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - expected_msg = ( - "WARNING: Network config: ignoring iface0 device-level mtu:8999" - " because ipv4 subnet-level mtu:9000 provided." - ) - self.assertIn(expected_msg, self.logs.getvalue()) - - def test_dhcpv6_only_config(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + assert resolvconf_content == found["/etc/resolv.conf"] - def test_dhcpv6_accept_ra_config_v1(self): - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + @pytest.mark.parametrize( + "expected_name,yaml_version", + [ + ("bond_v1", "yaml"), + pytest.param( + "bond_v2", + "yaml", + marks=pytest.mark.xfail( + reason="Bond MAC address not rendered" + ), + ), + ("vlan_v1", "yaml"), + ("vlan_v2", "yaml"), + ("bridge", "yaml_v1"), + ("bridge", "yaml_v2"), + ("manual", "yaml"), + ("small_v1", "yaml"), + ("small_v2", "yaml"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("dhcpv6_accept_ra", "yaml_v1"), + ("dhcpv6_accept_ra", "yaml_v2"), + ("dhcpv6_reject_ra", "yaml_v1"), + ("dhcpv6_reject_ra", "yaml_v2"), + ("static6", "yaml_v1"), + ("static6", "yaml_v2"), + ("dhcpv6_stateless", "yaml"), + ("dhcpv6_stateful", "yaml"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + pytest.param( + "v1-dns", + "yaml", + marks=pytest.mark.xfail( + reason="sysconfig should render interface-level DNS" + ), + ), + ("v2-dns", "yaml"), + pytest.param( + "large_v2", + "yaml", + marks=pytest.mark.xfail( + reason="Bond and Bridge MAC address not rendered" + ), + ), + ], + ) + def test_config(self, expected_name, yaml_version): + entry = NETWORK_CONFIGS[expected_name] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) + network_config=yaml.safe_load(entry[yaml_version]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) - def test_dhcpv6_accept_ra_config_v2(self): - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + def test_large_v1_config(self, caplog): + entry = NETWORK_CONFIGS["large_v1"] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry["yaml"]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + assert ( + "Network config: ignoring eth0.101 device-level mtu" + not in caplog.text + ) - def test_dhcpv6_reject_ra_config_v1(self): - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + @pytest.mark.parametrize( + "yaml_file,network_config", + [ + ("yaml_v1", "v1_ipv4_and_ipv6_static"), + ("yaml_v2", "v2_ipv4_and_ipv6_static"), + ], + ) + def test_ipv4_and_ipv6_static_config( + self, yaml_file, network_config, caplog + ): + entry = NETWORK_CONFIGS[network_config] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) + network_config=yaml.safe_load(entry[yaml_file]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + expected_msg = ( + "Network config: ignoring iface0 device-level mtu:8999" + " because ipv4 subnet-level mtu:9000 provided." + ) + if yaml_file == "yaml_v1": + assert expected_msg in caplog.text def test_stattic6_from_json(self): net_json = { @@ -5649,70 +2197,28 @@ def test_stattic6_from_json(self): ], "links": [ { - "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, - "type": "bridge", - "id": "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", - }, - ], - } - macs = {"fa:16:3e:ed:9a:59": "eth0"} - render_dir = self.tmp_dir() - network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - ns = network_state.parse_net_config_data( - network_cfg, skip_broken=False - ) - renderer = self._get_renderer() - with self.assertRaises(ValueError): - renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) - - def test_static6_from_yaml(self): - entry = NETWORK_CONFIGS["static6"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_dhcpv6_reject_ra_config_v2(self): - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] - found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_dhcpv6_stateless_config(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_dhcpv6_stateful_config(self): - entry = NETWORK_CONFIGS["dhcpv6_stateful"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] - found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] - found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", + }, + ], + } + macs = {"fa:16:3e:ed:9a:59": "eth0"} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False ) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + renderer = self._get_renderer() + with pytest.raises(ValueError): + renderer.render_network_state(ns, target=render_dir) + assert [] == os.listdir(render_dir) def test_netplan_dhcp_false_disable_dhcp_in_state(self): """netplan config with dhcp[46]: False should not add dhcp in state""" - net_config = yaml.load(NETPLAN_DHCP_FALSE) + net_config = yaml.safe_load(NETPLAN_DHCP_FALSE) ns = network_state.parse_net_config_data(net_config, skip_broken=False) dhcp_found = [ @@ -5722,21 +2228,7 @@ def test_netplan_dhcp_false_disable_dhcp_in_state(self): if "dhcp" in snet["type"] ] - self.assertEqual([], dhcp_found) - - @pytest.mark.xfail(reason="sysconfig should render interface-level DNS") - def test_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - # TODO: verify resolv.conf - - def test_v2_dns(self): - entry = NETWORK_CONFIGS["v2-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + assert [] == dhcp_found def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): """netplan cfg with dhcp[46]: False should not have bootproto=dhcp""" @@ -5761,7 +2253,6 @@ def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): IPV6_FORCE_ACCEPT_RA=no IPV6_DEFAULTGW=2001:db8::1 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -5770,7 +2261,9 @@ def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): }, } - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + found = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) self._compare_files_to_expected(entry["expected_sysconfig"], found) self._assert_headers(found) @@ -5793,7 +2286,6 @@ def test_from_v2_vlan_mtu(self): """\ BOOTPROTO=none DEVICE=eno1 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -5806,7 +2298,6 @@ def test_from_v2_vlan_mtu(self): IPADDR=192.6.1.9 MTU=1495 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eno1 USERCTL=no @@ -5842,7 +2333,6 @@ def test_from_v2_bond_mtu(self): IPADDR=10.101.8.65 MTU=1334 NETMASK=255.255.255.192 - NM_CONTROLLED=no ONBOOT=yes TYPE=Bond USERCTL=no @@ -5854,7 +2344,6 @@ def test_from_v2_bond_mtu(self): BOOTPROTO=none DEVICE=enp0s0 MASTER=bond0 - NM_CONTROLLED=no ONBOOT=yes SLAVE=yes TYPE=Bond @@ -5867,7 +2356,6 @@ def test_from_v2_bond_mtu(self): BOOTPROTO=none DEVICE=enp0s1 MASTER=bond0 - NM_CONTROLLED=no ONBOOT=yes SLAVE=yes TYPE=Bond @@ -5898,7 +2386,6 @@ def test_from_v2_route_metric(self): DEVICE=eno1 HWADDR=07-1c-c6-75-a4-be METRIC=100 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -5989,7 +2476,6 @@ def test_from_v2_routes(self): IPV6_FORCE_ACCEPT_RA=no MTU=1400 NETMASK=255.255.248.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -6051,9 +2537,10 @@ def test_iface_name_from_device_with_matching_mac_address( mock_sys_dev_path, dev_attrs=devices, ) - entry = NETWORK_CONFIGS["v2-dev-name-via-mac-lookup"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + found = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) @@ -6062,14 +2549,17 @@ def test_iface_name_from_device_with_matching_mac_address( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestOpenSuseSysConfigRendering(CiTestCase): - with_logs = True +class TestOpenSuseSysConfigRendering: scripts_dir = "/etc/sysconfig/network" header = "# Created by cloud-init automatically, do not edit.\n#\n" expected_name = "expected_sysconfig_opensuse" + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def _get_renderer(self): distro_cls = distros.fetch("opensuse") return sysconfig.Renderer( @@ -6092,7 +2582,6 @@ def _render_and_read(self, network_config=None, state=None, dir=None): return dir2dict(dir) def _compare_files_to_expected(self, expected, found): - orig_maxdiff = self.maxDiff expected_d = dict( (os.path.join(self.scripts_dir, k), util.load_shell_content(v)) for k, v in expected.items() @@ -6104,11 +2593,7 @@ def _compare_files_to_expected(self, expected, found): for k, v in found.items() if k.startswith(self.scripts_dir) ) - try: - self.maxDiff = None - self.assertEqual(expected_d, scripts_found) - finally: - self.maxDiff = orig_maxdiff + assert expected_d == scripts_found def _assert_headers(self, found): missing = [ @@ -6160,85 +2645,10 @@ def test_default_generation( LLADDR=07-1c-c6-75-a4-be STARTMODE=auto """.lstrip() - self.assertEqual(expected_content, content) - - # TODO(rjschwei): re-enable test once route writing is implemented - # for SUSE distros - # def test_multiple_ipv4_default_gateways(self): - # """ValueError is raised when duplicate ipv4 gateways exist.""" - # net_json = { - # "services": [{"type": "dns", "address": "172.19.0.12"}], - # "networks": [{ - # "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", - # "type": "ipv4", "netmask": "255.255.252.0", - # "link": "tap1a81968a-79", - # "routes": [{ - # "netmask": "0.0.0.0", - # "network": "0.0.0.0", - # "gateway": "172.19.3.254", - # }, { - # "netmask": "0.0.0.0", # A second default gateway - # "network": "0.0.0.0", - # "gateway": "172.20.3.254", - # }], - # "ip_address": "172.19.1.34", "id": "network0" - # }], - # "links": [ - # { - # "ethernet_mac_address": "fa:16:3e:ed:9a:59", - # "mtu": None, "type": "bridge", "id": - # "tap1a81968a-79", - # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" - # }, - # ], - # } - # macs = {'fa:16:3e:ed:9a:59': 'eth0'} - # render_dir = self.tmp_dir() - # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501 - # ns = network_state.parse_net_config_data(network_cfg, - # skip_broken=False) - # renderer = self._get_renderer() - # with self.assertRaises(ValueError): - # renderer.render_network_state(ns, target=render_dir) - # self.assertEqual([], os.listdir(render_dir)) - # - # def test_multiple_ipv6_default_gateways(self): - # """ValueError is raised when duplicate ipv6 gateways exist.""" - # net_json = { - # "services": [{"type": "dns", "address": "172.19.0.12"}], - # "networks": [{ - # "network_id": "public-ipv6", - # "type": "ipv6", "netmask": "", - # "link": "tap1a81968a-79", - # "routes": [{ - # "gateway": "2001:DB8::1", - # "netmask": "::", - # "network": "::" - # }, { - # "gateway": "2001:DB9::1", - # "netmask": "::", - # "network": "::" - # }], - # "ip_address": "2001:DB8::10", "id": "network1" - # }], - # "links": [ - # { - # "ethernet_mac_address": "fa:16:3e:ed:9a:59", - # "mtu": None, "type": "bridge", "id": - # "tap1a81968a-79", - # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" - # }, - # ], - # } - # macs = {'fa:16:3e:ed:9a:59': 'eth0'} - # render_dir = self.tmp_dir() - # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501 - # ns = network_state.parse_net_config_data(network_cfg, - # skip_broken=False) - # renderer = self._get_renderer() - # with self.assertRaises(ValueError): - # renderer.render_network_state(ns, target=render_dir) - # self.assertEqual([], os.listdir(render_dir)) + assert expected_content == content + + # TODO(rjschwei): re-add tests once route writing is implemented. + # See git history for removed commented tests def test_openstack_rendering_samples(self): for os_sample in OS_SAMPLES: @@ -6260,17 +2670,17 @@ def test_openstack_rendering_samples(self): "out_sysconfig_opensuse", [] ): with open(os.path.join(render_dir, fn)) as fh: - self.assertEqual(expected_content, fh.read()) + assert expected_content == fh.read() def test_network_config_v1_samples(self): ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected = """\ # Created by cloud-init automatically, do not edit. # @@ -6280,15 +2690,15 @@ def test_network_config_v1_samples(self): NETMASK=255.255.255.0 STARTMODE=auto """ - self.assertEqual(expected, found[nspath + "ifcfg-interface0"]) + assert expected == found[nspath + "ifcfg-interface0"] # The configuration has no nameserver information make sure we # do not write the resolv.conf file respath = "/etc/resolv.conf" - self.assertNotIn(respath, found.keys()) + assert respath not in found.keys() def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) # write an etc/resolv.conf and expect it to not be modified resolvconf = os.path.join(render_dir, "etc/resolv.conf") @@ -6298,157 +2708,89 @@ def test_config_with_explicit_loopback(self): renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected = """\ # Created by cloud-init automatically, do not edit. # BOOTPROTO=dhcp4 STARTMODE=auto """ - self.assertEqual(expected, found[nspath + "ifcfg-eth0"]) + assert expected == found[nspath + "ifcfg-eth0"] # a dhcp only config should not modify resolv.conf - self.assertEqual(resolvconf_content, found["/etc/resolv.conf"]) - - def test_bond_config(self): - expected_name = "expected_sysconfig_opensuse" - entry = NETWORK_CONFIGS["bond"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - for fname, contents in entry[expected_name].items(): - print(fname) - print(contents) - print() - print("-- expected ^ | v rendered --") - for fname, contents in found.items(): - print(fname) - print(contents) - print() - self._compare_files_to_expected(entry[expected_name], found) - self._assert_headers(found) - - def test_vlan_config(self): - entry = NETWORK_CONFIGS["vlan"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_bridge_config(self): - entry = NETWORK_CONFIGS["bridge"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_manual_config(self): - entry = NETWORK_CONFIGS["manual"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_all_config(self): - entry = NETWORK_CONFIGS["all"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - self.assertNotIn( - "WARNING: Network config: ignoring eth0.101 device-level mtu", - self.logs.getvalue(), - ) - - def test_small_config_v1(self): - entry = NETWORK_CONFIGS["small_v1"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_small_config_v1_suse(self): - entry = NETWORK_CONFIGS["small_v1_suse_dhcp6"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_small_config_v2(self): - entry = NETWORK_CONFIGS["small_v1"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_v4_and_v6_static_config(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - expected_msg = ( - "WARNING: Network config: ignoring iface0 device-level mtu:8999" - " because ipv4 subnet-level mtu:9000 provided." - ) - self.assertIn(expected_msg, self.logs.getvalue()) - - def test_dhcpv6_only_config(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_simple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS["ipv6_slaac"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_dhcpv6_stateless_config(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + assert resolvconf_content == found["/etc/resolv.conf"] - def test_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] + @pytest.mark.parametrize( + "expected_name,yaml_name", + [ + ("bond_v1", "yaml"), + pytest.param( + "bond_v2", + "yaml", + marks=pytest.mark.xfail( + reason="Bond MAC address not rendered" + ), + ), + ("vlan_v1", "yaml"), + ("vlan_v2", "yaml"), + ("bridge", "yaml_v1"), + ("bridge", "yaml_v2"), + ("manual", "yaml"), + ("small_v1", "yaml"), + ("small_suse_dhcp6", "yaml_v1"), + ("small_suse_dhcp6", "yaml_v2"), + ("small_v2", "yaml"), + ("v1_ipv4_and_ipv6_static", "yaml_v1"), + ("v2_ipv4_and_ipv6_static", "yaml_v2"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("ipv6_slaac", "yaml"), + ("dhcpv6_stateless", "yaml"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("v6_and_v4", "yaml"), + ("v1-dns", "yaml"), + ("v2-dns", "yaml"), + pytest.param( + "large_v2", + "yaml", + marks=pytest.mark.xfail( + reason="Bond and Bridge LLADDR not rendered" + ), + ), + ], + ) + def test_config( + self, + expected_name, + yaml_name, + ): + entry = NETWORK_CONFIGS[expected_name] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry[yaml_name]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) - def test_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] + def test_large_v2_config(self, caplog): + entry = NETWORK_CONFIGS["large_v1"] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry["yaml"]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) - - def test_render_v4_and_v6(self): - entry = NETWORK_CONFIGS["v4_and_v6"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_render_v6_and_v4(self): - entry = NETWORK_CONFIGS["v6_and_v4"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - # TODO: verify resolv.conf - - def test_v2_dns(self): - entry = NETWORK_CONFIGS["v2-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + assert ( + "Network config: ignoring eth0.101 device-level mtu" + not in caplog.text + ) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestNetworkManagerRendering(CiTestCase): - with_logs = True - +class TestNetworkManagerRendering: scripts_dir = "/etc/NetworkManager/system-connections" conf_dir = "/etc/NetworkManager/conf.d" @@ -6469,6 +2811,10 @@ class TestNetworkManagerRendering(CiTestCase): ), } + @pytest.fixture(autouse=True) + def setup(self, tmpdir): + self.tmp_dir = lambda: str(tmpdir) + def _get_renderer(self): return network_manager.Renderer() @@ -6490,7 +2836,6 @@ def _render_and_read(self, network_config=None, state=None, dir=None): def _compare_files_to_expected( self, expected_scripts, expected_conf, found ): - orig_maxdiff = self.maxDiff conf_d = dict( (os.path.join(self.conf_dir, k), v) for k, v in expected_conf.items() @@ -6501,11 +2846,7 @@ def _compare_files_to_expected( ) expected_d = {**conf_d, **scripts_d} - try: - self.maxDiff = None - self.assertEqual(expected_d, found) - finally: - self.maxDiff = orig_maxdiff + assert expected_d == found @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @@ -6586,11 +2927,11 @@ def test_openstack_rendering_samples(self): renderer.render_network_state(ns, target=render_dir) for fn, expected_content in os_sample.get(self.expected_name, []): with open(os.path.join(render_dir, fn)) as fh: - self.assertEqual(expected_content, fh.read()) + assert expected_content == fh.read() def test_network_config_v1_samples(self): ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) @@ -6627,7 +2968,7 @@ def test_network_config_v1_samples(self): ) def test_config_with_explicit_loopback(self): - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) renderer = self._get_renderer() @@ -6662,164 +3003,94 @@ def test_config_with_explicit_loopback(self): found, ) - def test_bond_config(self): - entry = NETWORK_CONFIGS["bond"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_vlan_config(self): - entry = NETWORK_CONFIGS["vlan"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_bridge_config(self): - entry = NETWORK_CONFIGS["bridge"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_manual_config(self): - entry = NETWORK_CONFIGS["manual"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_all_config(self): - entry = NETWORK_CONFIGS["all"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - self.assertNotIn( - "WARNING: Network config: ignoring eth0.101 device-level mtu", - self.logs.getvalue(), - ) - - def test_small_config_v1(self): - entry = NETWORK_CONFIGS["small_v1"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_small_config_v2(self): - entry = NETWORK_CONFIGS["small_v2"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found + @pytest.mark.parametrize( + "yaml_file,config", + [ + ("yaml_v1", "v1_ipv4_and_ipv6_static"), + ("yaml_v2", "v2_ipv4_and_ipv6_static"), + ], + ) + def test_ipv4_and_ipv6_static_config(self, yaml_file, config, caplog): + entry = NETWORK_CONFIGS[config] + found = self._render_and_read( + network_config=yaml.safe_load(entry[yaml_file]) ) - - def test_v4_and_v6_static_config(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected( entry[self.expected_name], self.expected_conf_d, found ) expected_msg = ( - "WARNING: Network config: ignoring iface0 device-level mtu:8999" + "Network config: ignoring iface0 device-level mtu:8999" " because ipv4 subnet-level mtu:9000 provided." ) - self.assertIn(expected_msg, self.logs.getvalue()) - - def test_dhcpv6_only_config(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_simple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS["ipv6_slaac"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_dhcpv6_stateless_config(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) + if yaml_file == "yaml_v1": + assert expected_msg in caplog.text - def test_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] + @pytest.mark.parametrize( + "expected_name,yaml_name", + [ + ("bond_v1", "yaml"), + pytest.param( + "bond_v2", + "yaml", + marks=pytest.mark.xfail( + reason="mii-monitor-interval not rendered." + ), + ), + ("vlan_v1", "yaml"), + ("vlan_v2", "yaml"), + ("bridge", "yaml_v1"), + ("bridge", "yaml_v2"), + ("manual", "yaml"), + ("small_v1", "yaml"), + ("small_v2", "yaml"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("ipv6_slaac", "yaml"), + ("dhcpv6_stateless", "yaml"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("v6_and_v4", "yaml"), + ("v1-dns", "yaml"), + ("v2-mixed-routes", "yaml"), + ("v2-dns", "yaml"), + ("v2-dns-no-if-ips", "yaml"), + ("v2-dns-no-dhcp", "yaml"), + ("v2-route-no-gateway", "yaml"), + pytest.param( + "large_v2", + "yaml", + marks=pytest.mark.xfail( + reason=( + "Bridge MAC and bond miimon not rendered. " + "Bond DNS not rendered. " + "DNS not rendered when DHCP is enabled." + ), + ), + ), + ], + ) + def test_config(self, expected_name, yaml_name): + entry = NETWORK_CONFIGS[expected_name] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry[yaml_name]) ) self._compare_files_to_expected( entry[self.expected_name], self.expected_conf_d, found ) - def test_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] + def test_large_v1_config(self, caplog): + entry = NETWORK_CONFIGS["large_v1"] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry["yaml"]) ) self._compare_files_to_expected( entry[self.expected_name], self.expected_conf_d, found ) - - def test_render_v4_and_v6(self): - entry = NETWORK_CONFIGS["v4_and_v6"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_render_v6_and_v4(self): - entry = NETWORK_CONFIGS["v6_and_v4"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_mixed_routes(self): - entry = NETWORK_CONFIGS["v2-mixed-routes"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_dns(self): - entry = NETWORK_CONFIGS["v2-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_dns_no_ips(self): - entry = NETWORK_CONFIGS["v2-dns-no-if-ips"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_dns_no_dhcp(self): - entry = NETWORK_CONFIGS["v2-dns-no-dhcp"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_route_no_gateway(self): - entry = NETWORK_CONFIGS["v2-route-no-gateway"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found + assert ( + "Network config: ignoring eth0.101 device-level mtu" + not in caplog.text ) @@ -6827,7 +3098,11 @@ def test_v2_route_no_gateway(self): "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestEniNetRendering(CiTestCase): +class TestEniNetRendering: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -6857,7 +3132,7 @@ def test_default_generation( ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + assert os.path.exists(os.path.join(render_dir, "interfaces")) with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() @@ -6871,7 +3146,7 @@ def test_default_generation( # control-alias eth1000 iface eth1000 inet6 dhcp """ - self.assertEqual(expected.lstrip(), contents.lstrip()) + assert expected.lstrip() == contents.lstrip() def test_config_with_explicit_loopback(self): tmp_dir = self.tmp_dir() @@ -6885,9 +3160,7 @@ def test_config_with_explicit_loopback(self): auto eth0 iface eth0 inet dhcp """ - self.assertEqual( - expected, dir2dict(tmp_dir)["/etc/network/interfaces"] - ) + assert expected == dir2dict(tmp_dir)["/etc/network/interfaces"] def test_v2_route_metric_to_eni(self): """Network v2 route-metric overrides are preserved in eni output""" @@ -6912,9 +3185,9 @@ def test_v2_route_metric_to_eni(self): v2_input = {"version": 2, "ethernets": {"eth0": dhcp_cfg}} ns = network_state.parse_net_config_data(v2_input) renderer.render_network_state(ns, target=tmp_dir) - self.assertEqual( - expected_tmpl.format(suffix=suffix), - dir2dict(tmp_dir)["/etc/network/interfaces"], + assert ( + expected_tmpl.format(suffix=suffix) + == dir2dict(tmp_dir)["/etc/network/interfaces"] ) @@ -7280,7 +3553,7 @@ def test_render( if network_cfg is None: network_cfg = net.generate_fallback_config() else: - network_cfg = yaml.load(network_cfg) + network_cfg = yaml.safe_load(network_cfg) assert isinstance(network_cfg, dict) ns = network_state.parse_net_config_data( @@ -7301,11 +3574,11 @@ def test_render( contents = fh.read() print(contents) - assert yaml.load(expected) == yaml.load(contents) + assert yaml.safe_load(expected) == yaml.safe_load(contents) assert 1, mock_clean_default.call_count -class TestNetplanCleanDefault(CiTestCase): +class TestNetplanCleanDefault: snapd_known_path = "etc/netplan/00-snapd-config.yaml" snapd_known_content = textwrap.dedent( """\ @@ -7330,6 +3603,10 @@ class TestNetplanCleanDefault(CiTestCase): "run/systemd/generator/netplan.stamp": "stamp", } + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def test_clean_known_config_cleaned(self): content = { self.snapd_known_path: self.snapd_known_content, @@ -7339,7 +3616,7 @@ def test_clean_known_config_cleaned(self): files = sorted(populate_dir(tmpd, content)) netplan._clean_default(target=tmpd) found = [t for t in files if os.path.exists(t)] - self.assertEqual([], found) + assert [] == found def test_clean_unknown_config_not_cleaned(self): content = { @@ -7351,7 +3628,7 @@ def test_clean_unknown_config_not_cleaned(self): files = sorted(populate_dir(tmpd, content)) netplan._clean_default(target=tmpd) found = [t for t in files if os.path.exists(t)] - self.assertEqual(files, found) + assert files == found def test_clean_known_config_cleans_only_expected(self): astamp = "run/systemd/generator/another.stamp" @@ -7370,10 +3647,10 @@ def test_clean_known_config_cleans_only_expected(self): netplan._clean_default(target=tmpd) found = [t for t in files if os.path.exists(t)] expected = [subp.target_path(tmpd, f) for f in (astamp, anet, ayaml)] - self.assertEqual(sorted(expected), found) + assert sorted(expected) == found -class TestNetplanPostcommands(CiTestCase): +class TestNetplanPostcommands: mycfg = { "config": [ { @@ -7386,6 +3663,10 @@ class TestNetplanPostcommands(CiTestCase): "version": 1, } + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + @mock.patch.object(netplan.Renderer, "_netplan_generate") @mock.patch.object(netplan.Renderer, "_net_setup_link") @mock.patch("cloudinit.subp.subp") @@ -7405,13 +3686,16 @@ def test_netplan_render_calls_postcmds( mock_subp.side_effect = iter([subp.ProcessExecutionError]) renderer.render_network_state(ns, target=render_dir) - mock_netplan_generate.assert_called_with(run=True, same_content=False) + mock_netplan_generate.assert_called_with(run=True, config_changed=True) mock_net_setup_link.assert_called_with(run=True) + @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.SeLinuxGuard") @mock.patch.object(netplan, "get_devicelist") @mock.patch("cloudinit.subp.subp") - def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): + def test_netplan_postcmds( + self, mock_subp, mock_devlist, mock_sel, m_get_cmdline + ): mock_sel.__enter__ = mock.Mock(return_value=False) mock_sel.__exit__ = mock.Mock() mock_devlist.side_effect = [["lo"]] @@ -7427,13 +3711,11 @@ def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): ) mock_subp.side_effect = iter( [ - subp.ProcessExecutionError, ("", ""), ("", ""), ] ) expected = [ - mock.call(["netplan", "info"], capture=True), mock.call(["netplan", "generate"], capture=True), mock.call( [ @@ -7450,7 +3732,7 @@ def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): mock_subp.assert_has_calls(expected) -class TestEniNetworkStateToEni(CiTestCase): +class TestEniNetworkStateToEni: mycfg = { "config": [ { @@ -7469,8 +3751,8 @@ def test_no_header(self): network_state=network_state.parse_net_config_data(self.mycfg), render_hwaddress=True, ) - self.assertIn(self.my_mac, rendered) - self.assertIn("hwaddress", rendered) + assert self.my_mac in rendered + assert "hwaddress" in rendered def test_with_header(self): header = "# hello world\n" @@ -7479,21 +3761,19 @@ def test_with_header(self): header=header, render_hwaddress=True, ) - self.assertIn(header, rendered) - self.assertIn(self.my_mac, rendered) + assert header in rendered + assert self.my_mac in rendered def test_no_hwaddress(self): rendered = eni.network_state_to_eni( network_state=network_state.parse_net_config_data(self.mycfg), render_hwaddress=False, ) - self.assertNotIn(self.my_mac, rendered) - self.assertNotIn("hwaddress", rendered) - + assert self.my_mac not in rendered + assert "hwaddress" not in rendered -class TestCmdlineConfigParsing(CiTestCase): - with_logs = True +class TestCmdlineConfigParsing: simple_cfg = { "config": [ { @@ -7507,15 +3787,19 @@ class TestCmdlineConfigParsing(CiTestCase): def test_cmdline_convert_dhcp(self): found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1) - self.assertEqual(found, ("eth0", DHCP_EXPECTED_1)) + assert found == ("eth0", DHCP_EXPECTED_1) def test_cmdline_convert_dhcp6(self): found = cmdline._klibc_to_config_entry(DHCP6_CONTENT_1) - self.assertEqual(found, ("eno1", DHCP6_EXPECTED_1)) + assert found == ("eno1", DHCP6_EXPECTED_1) def test_cmdline_convert_static(self): - found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1) - self.assertEqual(found, ("eth1", STATIC_EXPECTED_1)) + found1 = cmdline._klibc_to_config_entry(STATIC_CONTENT_1) + assert found1 == ("eth1", STATIC_EXPECTED_1) + found2 = cmdline._klibc_to_config_entry(STATIC_CONTENT_2) + assert found2 == ("eth1", STATIC_EXPECTED_1) + found3 = cmdline._klibc_to_config_entry(STATIC_CONTENT_3) + assert found3 == ("eth1", STATIC_EXPECTED_1) def test_config_from_cmdline_net_cfg(self): files = [] @@ -7542,46 +3826,50 @@ def test_config_from_cmdline_net_cfg(self): found = cmdline.config_from_klibc_net_cfg( files=files, mac_addrs=macs ) - self.assertEqual(found, expected) + assert found == expected def test_cmdline_with_b64(self): data = base64.b64encode(json.dumps(self.simple_cfg).encode()) encoded_text = data.decode() raw_cmdline = "ro network-config=" + encoded_text + " root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertEqual(found, self.simple_cfg) + assert found == self.simple_cfg def test_cmdline_with_net_config_disabled(self): raw_cmdline = "ro network-config=disabled root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertEqual(found, {"config": "disabled"}) + assert found == {"config": "disabled"} - def test_cmdline_with_net_config_unencoded_logs_error(self): + def test_cmdline_with_net_config_unencoded_logs_error(self, caplog): """network-config cannot be unencoded besides 'disabled'.""" raw_cmdline = "ro network-config={config:disabled} root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertIsNone(found) + assert found is None expected_log = ( - "ERROR: Expected base64 encoded kernel commandline parameter" + "Expected base64 encoded kernel command line parameter" " network-config. Ignoring network-config={config:disabled}." ) - self.assertIn(expected_log, self.logs.getvalue()) + assert expected_log in caplog.text def test_cmdline_with_b64_gz(self): data = _gzip_data(json.dumps(self.simple_cfg).encode()) encoded_text = base64.b64encode(data).decode() raw_cmdline = "ro network-config=" + encoded_text + " root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertEqual(found, self.simple_cfg) + assert found == self.simple_cfg -class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase): +class TestCmdlineKlibcNetworkConfigSource: macs = { "eth0": "14:02:ec:42:48:00", "eno1": "14:02:ec:42:48:01", } - def test_without_ip(self): + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + + def test_without_ip(self, fake_filesystem): content = { "/run/net-eth0.conf": DHCP_CONTENT_1, cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n", @@ -7589,70 +3877,61 @@ def test_without_ip(self): exp1 = copy.deepcopy(DHCP_EXPECTED_1) exp1["mac_address"] = self.macs["eth0"] - root = self.tmp_dir() - populate_dir(root, content) - self.reRoot(root) + populate_dir(fake_filesystem, content) src = cmdline.KlibcNetworkConfigSource( _cmdline="foo root=/root/bar", _mac_addrs=self.macs, ) - self.assertTrue(src.is_applicable()) + assert src.is_applicable() found = src.render_config() - self.assertEqual(found["version"], 1) - self.assertEqual(found["config"], [exp1]) + assert found["version"] == 1 + assert found["config"] == [exp1] - def test_with_ip(self): + def test_with_ip(self, fake_filesystem): content = {"/run/net-eth0.conf": DHCP_CONTENT_1} exp1 = copy.deepcopy(DHCP_EXPECTED_1) exp1["mac_address"] = self.macs["eth0"] - root = self.tmp_dir() - populate_dir(root, content) - self.reRoot(root) + populate_dir(fake_filesystem, content) src = cmdline.KlibcNetworkConfigSource( _cmdline="foo ip=dhcp", _mac_addrs=self.macs, ) - self.assertTrue(src.is_applicable()) + assert src.is_applicable() found = src.render_config() - self.assertEqual(found["version"], 1) - self.assertEqual(found["config"], [exp1]) + assert found["version"] == 1 + assert found["config"] == [exp1] - def test_with_ip6(self): + def test_with_ip6(self, fake_filesystem): content = {"/run/net6-eno1.conf": DHCP6_CONTENT_1} - root = self.tmp_dir() - populate_dir(root, content) - self.reRoot(root) + populate_dir(fake_filesystem, content) src = cmdline.KlibcNetworkConfigSource( _cmdline="foo ip6=dhcp root=/dev/sda", _mac_addrs=self.macs, ) - self.assertTrue(src.is_applicable()) + assert src.is_applicable() found = src.render_config() - self.assertEqual( - found, - { - "version": 1, - "config": [ - { - "type": "physical", - "name": "eno1", - "mac_address": self.macs["eno1"], - "subnets": [ - { - "dns_nameservers": ["2001:67c:1562:8010::2:1"], - "control": "manual", - "type": "dhcp6", - "netmask": "64", - } - ], - } - ], - }, - ) + assert found == { + "version": 1, + "config": [ + { + "type": "physical", + "name": "eno1", + "mac_address": self.macs["eno1"], + "subnets": [ + { + "dns_nameservers": ["2001:67c:1562:8010::2:1"], + "control": "manual", + "type": "dhcp6", + "netmask": "64", + } + ], + } + ], + } def test_with_no_ip_or_ip6(self): # if there is no ip= or ip6= on cmdline, return value should be None @@ -7663,7 +3942,7 @@ def test_with_no_ip_or_ip6(self): _cmdline="foo root=/dev/sda", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_with_faux_ip(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7673,7 +3952,7 @@ def test_with_faux_ip(self): _cmdline="foo iscsi_target_ip=root=/dev/sda", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_empty_cmdline(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7683,7 +3962,7 @@ def test_empty_cmdline(self): _cmdline="", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_whitespace_cmdline(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7693,7 +3972,7 @@ def test_whitespace_cmdline(self): _cmdline=" ", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_cmdline_no_lhand(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7703,7 +3982,7 @@ def test_cmdline_no_lhand(self): _cmdline="=wut", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_cmdline_embedded_ip(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7713,9 +3992,9 @@ def test_cmdline_embedded_ip(self): _cmdline='opt="some things and ip=foo"', _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() - def test_with_both_ip_ip6(self): + def test_with_both_ip_ip6(self, fake_filesystem): content = { "/run/net-eth0.conf": DHCP_CONTENT_1, "/run/net6-eth0.conf": DHCP6_CONTENT_1.replace("eno1", "eth0"), @@ -7732,22 +4011,20 @@ def test_with_both_ip_ip6(self): ) expected = [eth0] - root = self.tmp_dir() - populate_dir(root, content) - self.reRoot(root) + populate_dir(fake_filesystem, content) src = cmdline.KlibcNetworkConfigSource( _cmdline="foo ip=dhcp ip6=dhcp", _mac_addrs=self.macs, ) - self.assertTrue(src.is_applicable()) + assert src.is_applicable() found = src.render_config() - self.assertEqual(found["version"], 1) - self.assertEqual(found["config"], expected) + assert found["version"] == 1 + assert found["config"] == expected -class TestReadInitramfsConfig(CiTestCase): +class TestReadInitramfsConfig: def _config_source_cls_mock(self, is_applicable, render_config=None): return lambda: mock.Mock( is_applicable=lambda: is_applicable, @@ -7756,7 +4033,7 @@ def _config_source_cls_mock(self, is_applicable, render_config=None): def test_no_sources(self): with mock.patch("cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", []): - self.assertIsNone(cmdline.read_initramfs_config()) + assert cmdline.read_initramfs_config() is None def test_no_applicable_sources(self): sources = [ @@ -7767,7 +4044,7 @@ def test_no_applicable_sources(self): with mock.patch( "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources ): - self.assertIsNone(cmdline.read_initramfs_config()) + assert cmdline.read_initramfs_config() is None def test_one_applicable_source(self): expected_config = object() @@ -7780,7 +4057,7 @@ def test_one_applicable_source(self): with mock.patch( "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources ): - self.assertEqual(expected_config, cmdline.read_initramfs_config()) + assert expected_config == cmdline.read_initramfs_config() def test_one_applicable_source_after_inapplicable_sources(self): expected_config = object() @@ -7795,7 +4072,7 @@ def test_one_applicable_source_after_inapplicable_sources(self): with mock.patch( "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources ): - self.assertEqual(expected_config, cmdline.read_initramfs_config()) + assert expected_config == cmdline.read_initramfs_config() def test_first_applicable_source_is_used(self): first_config, second_config = object(), object() @@ -7812,10 +4089,10 @@ def test_first_applicable_source_is_used(self): with mock.patch( "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources ): - self.assertEqual(first_config, cmdline.read_initramfs_config()) + assert first_config == cmdline.read_initramfs_config() -class TestNetplanRoundTrip(CiTestCase): +class TestNetplanRoundTrip: NETPLAN_INFO_OUT = textwrap.dedent( """ netplan.io: @@ -7826,10 +4103,13 @@ class TestNetplanRoundTrip(CiTestCase): """ ) - def setUp(self): - super(TestNetplanRoundTrip, self).setUp() - self.add_patch("cloudinit.net.netplan.subp.subp", "m_subp") - self.m_subp.return_value = (self.NETPLAN_INFO_OUT, "") + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory, mocker): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + mocker.patch( + "cloudinit.net.netplan.subp.subp", + return_value=(self.NETPLAN_INFO_OUT, ""), + ) def _render_and_read( self, network_config=None, state=None, netplan_path=None, target=None @@ -7837,169 +4117,58 @@ def _render_and_read( if target is None: target = self.tmp_dir() - if network_config: - ns = network_state.parse_net_config_data(network_config) - elif state: - ns = state - else: - raise ValueError("Expected data or state, got neither") - - if netplan_path is None: - netplan_path = "etc/netplan/50-cloud-init.yaml" - - renderer = netplan.Renderer(config={"netplan_path": netplan_path}) - - renderer.render_network_state(ns, target=target) - return dir2dict(target) - - def testsimple_render_bond_netplan(self): - entry = NETWORK_CONFIGS["bond"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - print(entry["expected_netplan"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_bond_v2_input_netplan(self): - entry = NETWORK_CONFIGS["bond"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml-v2"]) - ) - print(entry["expected_netplan-v2"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan-v2"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_small_netplan(self): - entry = NETWORK_CONFIGS["small_v1"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_v4_and_v6(self): - entry = NETWORK_CONFIGS["v4_and_v6"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_v4_and_v6_static(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_dhcpv6_only(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_dhcpv6_accept_ra(self): - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_dhcpv6_reject_ra(self): - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS["ipv6_slaac"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) + if network_config: + ns = network_state.parse_net_config_data(network_config) + elif state: + ns = state + else: + raise ValueError("Expected data or state, got neither") - def testsimple_render_dhcpv6_stateless(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) + if netplan_path is None: + netplan_path = "etc/netplan/50-cloud-init.yaml" - def testsimple_render_dhcpv6_stateful(self): - entry = NETWORK_CONFIGS["dhcpv6_stateful"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) + renderer = netplan.Renderer(config={"netplan_path": netplan_path}) - def testsimple_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) + renderer.render_network_state(ns, target=target) + return dir2dict(target) - def testsimple_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] + @pytest.mark.parametrize( + "expected_name,yaml_version", + [ + ("bond_v1", "yaml"), + ("bond_v2", "yaml"), + ("small_v1", "yaml"), + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("v1_ipv4_and_ipv6_static", "yaml_v1"), + ("v2_ipv4_and_ipv6_static", "yaml_v2"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("dhcpv6_accept_ra", "yaml_v1"), + ("dhcpv6_reject_ra", "yaml_v1"), + ("ipv6_slaac", "yaml"), + ("dhcpv6_stateless", "yaml"), + ("dhcpv6_stateful", "yaml"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + ("large_v1", "yaml"), + ("manual", "yaml"), + pytest.param( + "v1-dns", + "yaml", + marks=pytest.mark.xfail( + reason="netplan should render interface-level nameservers" + ), + ), + ], + ) + def test_config(self, expected_name, yaml_version): + entry = NETWORK_CONFIGS[expected_name] files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry[yaml_version]) ) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_all(self): - entry = NETWORK_CONFIGS["all"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - print(entry["expected_netplan"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_manual(self): - entry = NETWORK_CONFIGS["manual"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - @pytest.mark.xfail( - reason="netplan should render interface-level nameservers" - ) - def testsimple_render_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + assert yaml.safe_load(entry["expected_netplan"]) == yaml.safe_load( + files["/etc/netplan/50-cloud-init.yaml"] ) def test_render_output_has_yaml_no_aliases(self): @@ -8007,7 +4176,7 @@ def test_render_output_has_yaml_no_aliases(self): "yaml": V1_NAMESERVER_ALIAS, "expected_netplan": NETPLAN_NO_ALIAS, } - network_config = yaml.load(entry["yaml"]) + network_config = yaml.safe_load(entry["yaml"]) ns = network_state.parse_net_config_data(network_config) files = self._render_and_read(state=ns) # check for alias @@ -8015,8 +4184,8 @@ def test_render_output_has_yaml_no_aliases(self): # test load the yaml to ensure we don't render something not loadable # this allows single aliases, but not duplicate ones - parsed = yaml.load(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertNotEqual(None, parsed) + parsed = yaml.safe_load(files["/etc/netplan/50-cloud-init.yaml"]) + assert parsed is not None # now look for any alias, avoid rendering them entirely # generate the first anchor string using the template @@ -8027,12 +4196,9 @@ def test_render_output_has_yaml_no_aliases(self): msg = "Error at: %s\nContent:\n%s" % (found_alias, content) raise ValueError("Found yaml alias in rendered netplan: " + msg) - print(entry["expected_netplan"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + assert ( + entry["expected_netplan"].splitlines() + == files["/etc/netplan/50-cloud-init.yaml"].splitlines() ) def test_render_output_supports_both_grat_arp_spelling(self): @@ -8042,18 +4208,19 @@ def test_render_output_supports_both_grat_arp_spelling(self): "gratuitious", "gratuitous" ), } - network_config = yaml.load(entry["yaml"]).get("network") + network_config = yaml.safe_load(entry["yaml"]).get("network") files = self._render_and_read(network_config=network_config) - print(entry["expected_netplan"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + assert ( + entry["expected_netplan"].splitlines() + == files["/etc/netplan/50-cloud-init.yaml"].splitlines() ) -class TestEniRoundTrip(CiTestCase): +class TestEniRoundTrip: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def _render_and_read( self, network_config=None, @@ -8085,137 +4252,84 @@ def _render_and_read( def testsimple_convert_and_render(self): network_config = eni.convert_eni_data(EXAMPLE_ENI) files = self._render_and_read(network_config=network_config) - self.assertEqual( - RENDERED_ENI.splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_all(self): - entry = NETWORK_CONFIGS["all"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_small_v1(self): - entry = NETWORK_CONFIGS["small_v1"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - @pytest.mark.xfail(reason="GH-4219") - def testsimple_render_small_v2(self): - entry = NETWORK_CONFIGS["small_v2"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_v4_and_v6(self): - entry = NETWORK_CONFIGS["v4_and_v6"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_only(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_v4_and_v6_static(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_stateless(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS["ipv6_slaac"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_stateful(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_accept_ra(self): - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_reject_ra(self): - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), + assert ( + RENDERED_ENI.splitlines() + == files["/etc/network/interfaces"].splitlines() ) - def testsimple_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] + @pytest.mark.parametrize( + "expected_name,yaml_version", + [ + ("large_v1", "yaml"), + pytest.param( + "large_v2", + "yaml", + marks=pytest.mark.xfail( + reason=( + "MAC for bond and bridge not being rendered. " + "bond-miimon is used rather than bond_miimon. " + "No rendering of bridge_gcint. " + "No rendering of bridge_waitport. " + "IPv6 routes added to IPv4 section. " + "DNS rendering inconsistencies." + ) + ), + ), + ("small_v1", "yaml"), + pytest.param( + "small_v2", "yaml", marks=pytest.mark.xfail(reason="GH-4219") + ), + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("v1_ipv4_and_ipv6_static", "yaml_v1"), + ("v2_ipv4_and_ipv6_static", "yaml_v2"), + ("dhcpv6_stateless", "yaml"), + ("ipv6_slaac", "yaml"), + pytest.param( + "dhcpv6_stateful", + "yaml", + marks=pytest.mark.xfail( + reason="Test never passed due to typo in name" + ), + ), + ("dhcpv6_accept_ra", "yaml_v1"), + ("dhcpv6_accept_ra", "yaml_v2"), + ("dhcpv6_reject_ra", "yaml_v1"), + ("dhcpv6_reject_ra", "yaml_v2"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + ("manual", "yaml"), + ("bond_v1", "yaml"), + pytest.param( + "bond_v2", + "yaml", + marks=pytest.mark.xfail( + reason=( + "Rendering bond_miimon rather than bond-miimon. " + "Using pre-down/post-up routes for gateway rather " + "gateway. " + "Adding ipv6 routes to ipv4 section" + ) + ), + ), + pytest.param( + "v1-dns", "yaml", marks=pytest.mark.xfail(reason="GH-4219") + ), + pytest.param( + "v2-dns", "yaml", marks=pytest.mark.xfail(reason="GH-4219") + ), + ], + ) + def test_config(self, expected_name, yaml_version): + entry = NETWORK_CONFIGS[expected_name] files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry[yaml_version]) ) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_manual(self): - """Test rendering of 'manual' for 'type' and 'control'. - - 'type: manual' in a subnet is odd, but it is the way that was used - to declare that a network device should get a mtu set on it even - if there were no addresses to configure. Also strange is the fact - that in order to apply that MTU the ifupdown device must be set - to 'auto', or the MTU would not be set.""" - entry = NETWORK_CONFIGS["manual"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), + assert ( + entry["expected_eni"].splitlines() + == files["/etc/network/interfaces"].splitlines() ) def test_routes_rendered(self): @@ -8292,7 +4406,7 @@ def test_routes_rendered(self): ] found = files["/etc/network/interfaces"].splitlines() - self.assertEqual(expected, [line for line in found if line]) + assert expected == [line for line in found if line] def test_ipv6_static_routes(self): # as reported in bug 1818669 @@ -8366,36 +4480,14 @@ def test_ipv6_static_routes(self): ] found = files["/etc/network/interfaces"].splitlines() - self.assertEqual(expected, [line for line in found if line]) - - def testsimple_render_bond(self): - entry = NETWORK_CONFIGS["bond"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - @pytest.mark.xfail(reason="GH-4219") - def test_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) + assert expected == [line for line in found if line] - @pytest.mark.xfail(reason="GH-4219") - def test_v2_dns(self): - entry = NETWORK_CONFIGS["v2-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) +class TestNetworkdNetRendering: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) -class TestNetworkdNetRendering(CiTestCase): def create_conf_dict(self, contents): content_dict = {} for line in contents: @@ -8411,7 +4503,7 @@ def create_conf_dict(self, contents): def compare_dicts(self, actual, expected): for k, v in actual.items(): - self.assertEqual(sorted(expected[k]), sorted(v)) + assert sorted(expected[k]) == sorted(v) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @@ -8443,9 +4535,7 @@ def test_networkd_default_generation( renderer = networkd.Renderer({}) renderer.render_network_state(ns, target=render_dir) - self.assertTrue( - os.path.exists(os.path.join(render_dir, render_target)) - ) + assert os.path.exists(os.path.join(render_dir, render_target)) with open(os.path.join(render_dir, render_target)) as fh: contents = fh.readlines() @@ -8466,7 +4556,11 @@ def test_networkd_default_generation( self.compare_dicts(actual, expected) -class TestNetworkdRoundTrip(CiTestCase): +class TestNetworkdRoundTrip: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def create_conf_dict(self, contents): content_dict = {} for line in contents: @@ -8482,7 +4576,7 @@ def create_conf_dict(self, contents): def compare_dicts(self, actual, expected): for k, v in actual.items(): - self.assertEqual(sorted(expected[k]), sorted(v)) + assert sorted(expected[k]) == sorted(v) def _render_and_read( self, network_config=None, state=None, nwkd_path=None, dir=None @@ -8505,35 +4599,45 @@ def _render_and_read( renderer.render_network_state(ns, target=dir) return dir2dict(dir) + @pytest.mark.parametrize( + "expected_name,yaml_version", + [ + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("v1_ipv4_and_ipv6_static", "yaml_v1"), + ("v2_ipv4_and_ipv6_static", "yaml_v2"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("dhcpv6_accept_ra", "yaml_v1"), + ("dhcpv6_accept_ra", "yaml_v2"), + ("dhcpv6_reject_ra", "yaml_v1"), + ("dhcpv6_reject_ra", "yaml_v2"), + ], + ) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_small_networkd_v1(self, m_chown): - nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network" - nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network" - entry = NETWORK_CONFIGS["small_v1"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - - actual = files[nwk_fn1].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd_eth99"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) + def test_config(self, _m_chown, expected_name, yaml_version): + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS[expected_name] + files = self._render_and_read( + network_config=yaml.safe_load(entry[yaml_version]) + ) - actual = files[nwk_fn2].splitlines() + actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry["expected_networkd_eth1"].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_small_networkd_v2(self, m_chown): + def testsimple_render_small_networkd_v1(self, m_chown): nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network" nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network" - entry = NETWORK_CONFIGS["small_v2"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + entry = NETWORK_CONFIGS["small_v1"] + files = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) actual = files[nwk_fn1].splitlines() actual = self.create_conf_dict(actual) @@ -8552,107 +4656,26 @@ def testsimple_render_small_networkd_v2(self, m_chown): self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_v4_and_v6(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["v4_and_v6"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_v4_and_v6_static(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["v4_and_v6_static"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_dhcpv6_only(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_only"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def test_dhcpv6_accept_ra_config_v1(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def test_dhcpv6_accept_ra_config_v2(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def test_dhcpv6_reject_ra_config_v1(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + def testsimple_render_small_networkd_v2(self, m_chown): + nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network" + nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network" + entry = NETWORK_CONFIGS["small_v2"] files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) + network_config=yaml.safe_load(entry["yaml"]) ) - actual = files[nwk_fn].splitlines() + actual = files[nwk_fn1].splitlines() actual = self.create_conf_dict(actual) - expected = entry["expected_networkd"].splitlines() + expected = entry["expected_networkd_eth99"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def test_dhcpv6_reject_ra_config_v2(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - - actual = files[nwk_fn].splitlines() + actual = files[nwk_fn2].splitlines() actual = self.create_conf_dict(actual) - expected = entry["expected_networkd"].splitlines() + expected = entry["expected_networkd_eth1"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @@ -8664,7 +4687,9 @@ def test_dhcpv6_reject_ra_config_v2(self, m_chown): def test_v1_dns(self, m_chown): nwk_fn = "/etc/systemd/network/10-cloud-init-eth0.network" entry = NETWORK_CONFIGS["v1-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + files = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) actual = self.create_conf_dict(files[nwk_fn].splitlines()) expected = self.create_conf_dict( @@ -8677,7 +4702,9 @@ def test_v1_dns(self, m_chown): def test_v2_dns(self, m_chown): nwk_fn = "/etc/systemd/network/10-cloud-init-eth0.network" entry = NETWORK_CONFIGS["v2-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + files = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) actual = self.create_conf_dict(files[nwk_fn].splitlines()) expected = self.create_conf_dict( @@ -8755,7 +4782,7 @@ def test_valid_renderer_from_defaults_depending_on_availability( renderers.select(priority=renderers.DEFAULT_PRIORITY) -class TestNetRenderers(CiTestCase): +class TestNetRenderers: @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail): @@ -8763,13 +4790,13 @@ def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail): m_sysc_avail.return_value = True found = renderers.search(priority=["sysconfig", "eni"], first=False) names = [f[0] for f in found] - self.assertEqual(["sysconfig", "eni"], names) + assert ["sysconfig", "eni"] == names @mock.patch("cloudinit.net.renderers.eni.available") def test_search_returns_empty_on_none(self, m_eni_avail): m_eni_avail.return_value = False found = renderers.search(priority=["eni"], first=False) - self.assertEqual([], found) + assert [] == found @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") @@ -8778,7 +4805,7 @@ def test_first_in_priority(self, m_eni_avail, m_sysc_avail): m_eni_avail.return_value = True m_sysc_avail.side_effect = Exception("Should not call me") found = renderers.search(priority=["eni", "sysconfig"], first=True)[0] - self.assertEqual(["eni"], [found[0]]) + assert ["eni"] == [found[0]] @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") @@ -8786,7 +4813,7 @@ def test_select_positive(self, m_eni_avail, m_sysc_avail): m_eni_avail.return_value = True m_sysc_avail.return_value = False found = renderers.select(priority=["sysconfig", "eni"]) - self.assertEqual("eni", found[0]) + assert "eni" == found[0] @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") @@ -8795,7 +4822,7 @@ def test_select_none_found_raises(self, m_eni_avail, m_sysc_avail): m_eni_avail.return_value = False m_sysc_avail.return_value = False - self.assertRaises( + pytest.raises( net.RendererNotFoundError, renderers.select, priority=["sysconfig", "eni"], @@ -8817,20 +4844,20 @@ def test_sysconfig_available_uses_variant_mapping(self, m_info, m_avail): if hasattr(util.system_info, "cache_clear"): util.system_info.cache_clear() result = sysconfig.available() - self.assertTrue(result) + assert result @mock.patch("cloudinit.net.renderers.networkd.available") def test_networkd_available(self, m_nwkd_avail): m_nwkd_avail.return_value = True found = renderers.search(priority=["networkd"], first=False) - self.assertEqual("networkd", found[0][0]) + assert "networkd" == found[0][0] @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestGetInterfaces(CiTestCase): +class TestGetInterfaces: _data = { "bonds": ["bond1"], "bridges": ["bridge1"], @@ -8902,10 +4929,11 @@ def _se_is_bond(self, name): def _se_is_netfailover(self, name): return False - def _mock_setup(self): + @pytest.fixture + def mocks(self, mocker): self.data = copy.deepcopy(self._data) self.data["devices"] = set(list(self.data["macs"].keys())) - mocks = ( + mock_list = ( "get_devicelist", "get_interface_mac", "get_master", @@ -8917,35 +4945,32 @@ def _mock_setup(self): "is_bond", "is_netfailover", ) - self.mocks = {} - for n in mocks: - m = mock.patch( - "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) + all_mocks = {} + for name in mock_list: + all_mocks[name] = mocker.patch( + "cloudinit.net." + name, + side_effect=getattr(self, "_se_" + name), ) - self.addCleanup(m.stop) - self.mocks[n] = m.start() + yield all_mocks - def test_gi_includes_duplicate_macs(self): - self._mock_setup() + def test_gi_includes_duplicate_macs(self, mocks): ret = net.get_interfaces() - self.assertIn("enp0s1", self._se_get_devicelist()) - self.assertIn("eth1", self._se_get_devicelist()) + assert "enp0s1" in self._se_get_devicelist() + assert "eth1" in self._se_get_devicelist() found = [ent for ent in ret if "aa:aa:aa:aa:aa:01" in ent] - self.assertEqual(len(found), 2) + assert len(found) == 2 - def test_gi_excludes_any_without_mac_address(self): - self._mock_setup() + def test_gi_excludes_any_without_mac_address(self, mocks): ret = net.get_interfaces() - self.assertIn("tun0", self._se_get_devicelist()) + assert "tun0" in self._se_get_devicelist() found = [ent for ent in ret if "tun0" in ent] - self.assertEqual(len(found), 0) + assert len(found) == 0 - def test_gi_excludes_stolen_macs(self): - self._mock_setup() + def test_gi_excludes_stolen_macs(self, mocks): ret = net.get_interfaces() - self.mocks["interface_has_own_mac"].assert_has_calls( + mocks["interface_has_own_mac"].assert_has_calls( [mock.call("enp0s1"), mock.call("bond1")], any_order=True ) expected = [ @@ -8955,10 +4980,9 @@ def test_gi_excludes_stolen_macs(self): ("lo", "00:00:00:00:00:00", None, "0x8"), ("bridge1-nic", "aa:aa:aa:aa:aa:03", None, "0x3"), ] - self.assertEqual(sorted(expected), sorted(ret)) + assert sorted(expected) == sorted(ret) - def test_gi_excludes_bridges(self): - self._mock_setup() + def test_gi_excludes_bridges(self, mocks): # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a bridge. # then expect b1 is the only thing left. @@ -8969,8 +4993,8 @@ def test_gi_excludes_bridges(self): self.data["own_macs"] = self.data["devices"] self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces() - self.assertEqual([("b1", "aa:aa:aa:aa:aa:b1", None, "0x0")], ret) - self.mocks["is_bridge"].assert_has_calls( + assert [("b1", "aa:aa:aa:aa:aa:b1", None, "0x0")] == ret + mocks["is_bridge"].assert_has_calls( [ mock.call("bridge1"), mock.call("enp0s1"), @@ -8981,7 +5005,7 @@ def test_gi_excludes_bridges(self): ) -class TestInterfaceHasOwnMac(CiTestCase): +class TestInterfaceHasOwnMac: """Test interface_has_own_mac. This is admittedly a bit whitebox.""" @mock.patch("cloudinit.net.read_sys_net_int", return_value=None) @@ -8999,11 +5023,11 @@ def test_non_strict_with_no_addr_assign_type(self, m_read_sys_net_int): tx_queue_len:1 type:1 """ - self.assertTrue(interface_has_own_mac("eth0")) + assert interface_has_own_mac("eth0") @mock.patch("cloudinit.net.read_sys_net_int", return_value=None) def test_strict_with_no_addr_assign_type_raises(self, m_read_sys_net_int): - with self.assertRaises(ValueError): + with pytest.raises(ValueError): interface_has_own_mac("eth0", True) @mock.patch("cloudinit.net.read_sys_net_int") @@ -9011,20 +5035,17 @@ def test_expected_values(self, m_read_sys_net_int): msg = "address_assign_type=%d said to not have own mac" for address_assign_type in (0, 1, 3): m_read_sys_net_int.return_value = address_assign_type - self.assertTrue( - interface_has_own_mac("eth0", msg % address_assign_type) - ) + assert interface_has_own_mac("eth0", msg % address_assign_type) m_read_sys_net_int.return_value = 2 - self.assertFalse(interface_has_own_mac("eth0")) + assert not interface_has_own_mac("eth0") @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestGetInterfacesByMac(CiTestCase): - with_logs = True +class TestGetInterfacesByMac: _data = { "bonds": ["bond1"], "bridges": ["bridge1"], @@ -9065,6 +5086,27 @@ class TestGetInterfacesByMac(CiTestCase): } data: dict = {} + @pytest.fixture + def mocks(self, mocker): + self.data = copy.deepcopy(self._data) + self.data["devices"] = set(list(self.data["macs"].keys())) + mock_list = ( + "get_devicelist", + "device_driver", + "get_interface_mac", + "is_bridge", + "interface_has_own_mac", + "is_vlan", + "get_ib_interface_hwaddr", + ) + all_mocks = {} + for name in mock_list: + all_mocks[name] = mocker.patch( + "cloudinit.net." + name, + side_effect=getattr(self, "_se_" + name), + ) + yield all_mocks + def _se_get_devicelist(self): return list(self.data["devices"]) @@ -9087,62 +5129,34 @@ def _se_get_ib_interface_hwaddr(self, name, ethernet_format): ib_hwaddr = self.data.get("ib_hwaddr", {}) return ib_hwaddr.get(name, {}).get(ethernet_format) - def _mock_setup(self): - self.data = copy.deepcopy(self._data) - self.data["devices"] = set(list(self.data["macs"].keys())) - mocks = ( - "get_devicelist", - "device_driver", - "get_interface_mac", - "is_bridge", - "interface_has_own_mac", - "is_vlan", - "get_ib_interface_hwaddr", - ) - self.mocks = {} - for n in mocks: - m = mock.patch( - "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) - ) - self.addCleanup(m.stop) - self.mocks[n] = m.start() - - def test_raise_exception_on_duplicate_macs(self): - self._mock_setup() + def test_raise_exception_on_duplicate_macs(self, mocks): self.data["macs"]["bridge1-nic"] = self.data["macs"]["enp0s1"] - self.assertRaises(RuntimeError, net.get_interfaces_by_mac) + pytest.raises(RuntimeError, net.get_interfaces_by_mac) - def test_raise_exception_on_duplicate_netvsc_macs(self): - self._mock_setup() + def test_raise_exception_on_duplicate_netvsc_macs(self, mocks): self.data["macs"]["netvsc0"] = self.data["macs"]["netvsc1"] - self.assertRaises(RuntimeError, net.get_interfaces_by_mac) + pytest.raises(RuntimeError, net.get_interfaces_by_mac) - def test_excludes_any_without_mac_address(self): - self._mock_setup() + def test_excludes_any_without_mac_address(self, mocks): ret = net.get_interfaces_by_mac() - self.assertIn("tun0", self._se_get_devicelist()) - self.assertNotIn("tun0", ret.values()) + assert "tun0" in self._se_get_devicelist() + assert "tun0" not in ret.values() - def test_excludes_stolen_macs(self): - self._mock_setup() + def test_excludes_stolen_macs(self, mocks): ret = net.get_interfaces_by_mac() - self.mocks["interface_has_own_mac"].assert_has_calls( + mocks["interface_has_own_mac"].assert_has_calls( [mock.call("enp0s1"), mock.call("bond1")], any_order=True ) - self.assertEqual( - { - "aa:aa:aa:aa:aa:01": "enp0s1", - "aa:aa:aa:aa:aa:02": "enp0s2", - "aa:aa:aa:aa:aa:03": "bridge1-nic", - "00:00:00:00:00:00": "lo", - "aa:aa:aa:aa:aa:04": "netvsc0", - "aa:aa:aa:aa:aa:05": "netvsc1", - }, - ret, - ) - - def test_excludes_bridges(self): - self._mock_setup() + assert { + "aa:aa:aa:aa:aa:01": "enp0s1", + "aa:aa:aa:aa:aa:02": "enp0s2", + "aa:aa:aa:aa:aa:03": "bridge1-nic", + "00:00:00:00:00:00": "lo", + "aa:aa:aa:aa:aa:04": "netvsc0", + "aa:aa:aa:aa:aa:05": "netvsc1", + } == ret + + def test_excludes_bridges(self, mocks): # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a bridge. # then expect b1 is the only thing left. @@ -9152,8 +5166,8 @@ def test_excludes_bridges(self): self.data["own_macs"] = self.data["devices"] self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces_by_mac() - self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret) - self.mocks["is_bridge"].assert_has_calls( + assert {"aa:aa:aa:aa:aa:b1": "b1"} == ret + mocks["is_bridge"].assert_has_calls( [ mock.call("bridge1"), mock.call("enp0s1"), @@ -9163,8 +5177,7 @@ def test_excludes_bridges(self): any_order=True, ) - def test_excludes_vlans(self): - self._mock_setup() + def test_excludes_vlans(self, mocks): # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a vlan. # then expect b1 is the only thing left. @@ -9175,8 +5188,8 @@ def test_excludes_vlans(self): self.data["own_macs"] = self.data["devices"] self.data["vlans"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces_by_mac() - self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret) - self.mocks["is_vlan"].assert_has_calls( + assert {"aa:aa:aa:aa:aa:b1": "b1"} == ret + mocks["is_vlan"].assert_has_calls( [ mock.call("bridge1"), mock.call("enp0s1"), @@ -9186,20 +5199,18 @@ def test_excludes_vlans(self): any_order=True, ) - def test_duplicates_of_empty_mac_are_ok(self): + def test_duplicates_of_empty_mac_are_ok(self, mocks): """Duplicate macs of 00:00:00:00:00:00 should be skipped.""" - self._mock_setup() empty_mac = "00:00:00:00:00:00" addnics = ("greptap1", "lo", "greptap2") self.data["macs"].update(dict((k, empty_mac) for k in addnics)) self.data["devices"].update(set(addnics)) self.data["own_macs"].extend(list(addnics)) ret = net.get_interfaces_by_mac() - self.assertEqual("lo", ret[empty_mac]) + assert "lo" == ret[empty_mac] - def test_skip_all_zeros(self): + def test_skip_all_zeros(self, mocks): """Any mac of 00:... should be skipped.""" - self._mock_setup() emac1, emac2, emac4, emac6 = ( "00", "00:00", @@ -9217,12 +5228,11 @@ def test_skip_all_zeros(self): self.data["devices"].update(set(addnics)) self.data["own_macs"].extend(addnics.keys()) ret = net.get_interfaces_by_mac() - self.assertEqual("lo", ret["00:00:00:00:00:00"]) + assert "lo" == ret["00:00:00:00:00:00"] - def test_ib(self): + def test_ib(self, mocks): ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56" ib_addr_eth_format = "00:11:22:33:44:56" - self._mock_setup() self.data["devices"] = ["enp0s1", "ib0"] self.data["own_macs"].append("ib0") self.data["macs"]["ib0"] = ib_addr @@ -9235,7 +5245,7 @@ def test_ib(self): ib_addr_eth_format: "ib0", ib_addr: "ib0", } - self.assertEqual(expected, result) + assert expected == result @pytest.mark.parametrize("driver", ("mscc_felix", "fsl_enetc", "qmi_wwan")) @@ -9272,25 +5282,33 @@ def test_duplicate_ignored_macs( assert re.search(pattern, caplog.text) -class TestInterfacesSorting(CiTestCase): +class TestInterfacesSorting: def test_natural_order(self): data = ["ens5", "ens6", "ens3", "ens20", "ens13", "ens2"] - self.assertEqual( - sorted(data, key=natural_sort_key), - ["ens2", "ens3", "ens5", "ens6", "ens13", "ens20"], - ) + assert sorted(data, key=natural_sort_key) == [ + "ens2", + "ens3", + "ens5", + "ens6", + "ens13", + "ens20", + ] data2 = ["enp2s0", "enp2s3", "enp0s3", "enp0s13", "enp0s8", "enp1s2"] - self.assertEqual( - sorted(data2, key=natural_sort_key), - ["enp0s3", "enp0s8", "enp0s13", "enp1s2", "enp2s0", "enp2s3"], - ) + assert sorted(data2, key=natural_sort_key) == [ + "enp0s3", + "enp0s8", + "enp0s13", + "enp1s2", + "enp2s0", + "enp2s3", + ] @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestGetIBHwaddrsByInterface(CiTestCase): +class TestGetIBHwaddrsByInterface: _ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56" _ib_addr_eth_format = "00:11:22:33:44:56" _data = { @@ -9319,22 +5337,23 @@ class TestGetIBHwaddrsByInterface(CiTestCase): } data: dict = {} - def _mock_setup(self): + @pytest.fixture + def mocks(self, mocker): self.data = copy.deepcopy(self._data) - mocks = ( + mock_list = ( "get_devicelist", "get_interface_mac", "is_bridge", "interface_has_own_mac", "get_ib_interface_hwaddr", ) - self.mocks = {} - for n in mocks: - m = mock.patch( - "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) + all_mocks = {} + for name in mock_list: + all_mocks[name] = mocker.patch( + "cloudinit.net." + name, + side_effect=getattr(self, "_se_" + name), ) - self.addCleanup(m.stop) - self.mocks[n] = m.start() + yield all_mocks def _se_get_devicelist(self): return self.data["devices"] @@ -9352,18 +5371,16 @@ def _se_get_ib_interface_hwaddr(self, name, ethernet_format): ib_hwaddr = self.data.get("ib_hwaddr", {}) return ib_hwaddr.get(name, {}).get(ethernet_format) - def test_ethernet(self): - self._mock_setup() + def test_ethernet(self, mocks): self.data["devices"].remove("ib0") result = net.get_ib_hwaddrs_by_interface() expected = {} - self.assertEqual(expected, result) + assert expected == result - def test_ib(self): - self._mock_setup() + def test_ib(self, mocks): result = net.get_ib_hwaddrs_by_interface() expected = {"ib0": self._ib_addr} - self.assertEqual(expected, result) + assert expected == result def _gzip_data(data): @@ -9374,7 +5391,7 @@ def _gzip_data(data): return iobuf.getvalue() -class TestRenameInterfaces(CiTestCase): +class TestRenameInterfaces: @mock.patch("cloudinit.subp.subp") def test_rename_all(self, mock_subp): renames = [ @@ -9400,16 +5417,13 @@ def test_rename_all(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ mock.call( ["ip", "link", "set", "ens3", "name", "interface0"], - capture=True, ), mock.call( ["ip", "link", "set", "ens5", "name", "interface2"], - capture=True, ), ] ) @@ -9439,16 +5453,13 @@ def test_rename_no_driver_no_device_id(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ mock.call( ["ip", "link", "set", "eth0", "name", "interface0"], - capture=True, ), mock.call( ["ip", "link", "set", "eth1", "name", "interface1"], - capture=True, ), ] ) @@ -9478,25 +5489,18 @@ def test_rename_all_bounce(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ - mock.call(["ip", "link", "set", "ens3", "down"], capture=True), + mock.call(["ip", "link", "set", "dev", "ens3", "down"]), mock.call( ["ip", "link", "set", "ens3", "name", "interface0"], - capture=True, ), - mock.call(["ip", "link", "set", "ens5", "down"], capture=True), + mock.call(["ip", "link", "set", "dev", "ens5", "down"]), mock.call( ["ip", "link", "set", "ens5", "name", "interface2"], - capture=True, - ), - mock.call( - ["ip", "link", "set", "interface0", "up"], capture=True - ), - mock.call( - ["ip", "link", "set", "interface2", "up"], capture=True ), + mock.call(["ip", "link", "set", "dev", "interface0", "up"]), + mock.call(["ip", "link", "set", "dev", "interface2", "up"]), ] ) @@ -9525,12 +5529,9 @@ def test_rename_duplicate_macs(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ - mock.call( - ["ip", "link", "set", "eth1", "name", "vf1"], capture=True - ), + mock.call(["ip", "link", "set", "eth1", "name", "vf1"]), ] ) @@ -9559,12 +5560,9 @@ def test_rename_duplicate_macs_driver_no_devid(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ - mock.call( - ["ip", "link", "set", "eth1", "name", "vf1"], capture=True - ), + mock.call(["ip", "link", "set", "eth1", "name", "vf1"]), ] ) @@ -9602,15 +5600,10 @@ def test_rename_multi_mac_dups(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ - mock.call( - ["ip", "link", "set", "eth1", "name", "vf1"], capture=True - ), - mock.call( - ["ip", "link", "set", "eth2", "name", "vf2"], capture=True - ), + mock.call(["ip", "link", "set", "eth1", "name", "vf1"]), + mock.call(["ip", "link", "set", "eth2", "name", "vf2"]), ] ) @@ -9654,23 +5647,16 @@ def test_rename_macs_case_insensitive(self, mock_subp): expected = [ mock.call( ["ip", "link", "set", "eth%d" % i, "name", "en%d" % i], - capture=True, ) for i in range(len(renames)) ] mock_subp.assert_has_calls(expected) -class TestNetworkState(CiTestCase): +class TestNetworkState: def test_bcast_addr(self): """Test mask_and_ipv4_to_bcast_addr proper execution.""" bcast_addr = mask_and_ipv4_to_bcast_addr - self.assertEqual( - "192.168.1.255", bcast_addr("255.255.255.0", "192.168.1.1") - ) - self.assertEqual( - "128.42.7.255", bcast_addr("255.255.248.0", "128.42.5.4") - ) - self.assertEqual( - "10.1.21.255", bcast_addr("255.255.255.0", "10.1.21.4") - ) + assert "192.168.1.255" == bcast_addr("255.255.255.0", "192.168.1.1") + assert "128.42.7.255" == bcast_addr("255.255.248.0", "128.42.5.4") + assert "10.1.21.255" == bcast_addr("255.255.255.0", "10.1.21.4") diff --git a/.pc/retain-apt-pre-deb822.patch/cloudinit/features.py b/.pc/retain-apt-pre-deb822.patch/cloudinit/features.py index 3fc22cff5..5a5d65756 100644 --- a/.pc/retain-apt-pre-deb822.patch/cloudinit/features.py +++ b/.pc/retain-apt-pre-deb822.patch/cloudinit/features.py @@ -87,6 +87,35 @@ to write /etc/apt/sources.list directly. """ +DEPRECATION_INFO_BOUNDARY = "devel" +""" +DEPRECATION_INFO_BOUNDARY is used by distros to configure at which upstream +version to start logging deprecations at a level higher than INFO. + +The default value "devel" tells cloud-init to log all deprecations higher +than INFO. This value may be overriden by downstreams in order to maintain +stable behavior across releases. + +Jsonschema key deprecations and inline logger deprecations include a +deprecated_version key. When the variable below is set to a version, +cloud-init will use that version as a demarcation point. Deprecations which +are added after this version will be logged as at an INFO level. Deprecations +which predate this version will be logged at the higher DEPRECATED level. +Downstreams that want stable log behavior may set the variable below to the +first version released in their stable distro. By doing this, they can expect +that newly added deprecations will be logged at INFO level. The implication of +the different log levels is that logs at DEPRECATED level result in a return +code of 2 from `cloud-init status`. + +format: + + :: = | + ::= "devel" + ::= "." ["." ] + +where , , and are positive integers +""" + def get_features() -> Dict[str, bool]: """Return a dict of applicable features/overrides and their values.""" diff --git a/.pc/retain-ec2-default-net-update-events.patch/cloudinit/sources/DataSourceEc2.py b/.pc/retain-ec2-default-net-update-events.patch/cloudinit/sources/DataSourceEc2.py index 1b81b21f0..e1ab1c5fe 100644 --- a/.pc/retain-ec2-default-net-update-events.patch/cloudinit/sources/DataSourceEc2.py +++ b/.pc/retain-ec2-default-net-update-events.patch/cloudinit/sources/DataSourceEc2.py @@ -12,6 +12,8 @@ import logging import os import time +import uuid +from contextlib import suppress from typing import Dict, List from cloudinit import dmi, net, sources @@ -19,15 +21,14 @@ from cloudinit import util, warnings from cloudinit.distros import Distro from cloudinit.event import EventScope, EventType -from cloudinit.net import activators +from cloudinit.net import netplan from cloudinit.net.dhcp import NoDHCPLeaseError from cloudinit.net.ephemeral import EphemeralIPNetwork +from cloudinit.sources import NicOrder from cloudinit.sources.helpers import ec2 LOG = logging.getLogger(__name__) -SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) - STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" @@ -117,10 +118,13 @@ class DataSourceEc2(sources.DataSource): def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None + self.identity = None + self._fallback_nic_order = NicOrder.MAC def _unpickle(self, ci_pkl_version: int) -> None: super()._unpickle(ci_pkl_version) self.extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES + self._fallback_nic_order = NicOrder.MAC def _get_cloud_name(self): """Return the cloud name as identified during _get_data.""" @@ -203,9 +207,6 @@ def launch_index(self): @property def platform(self): - # Handle upgrade path of pickled ds - if not hasattr(self, "_platform_type"): - self._platform_type = DataSourceEc2.dsname.lower() if not self._platform_type: self._platform_type = DataSourceEc2.dsname.lower() return self._platform_type @@ -333,6 +334,8 @@ def _maybe_fetch_api_token(self, mdurls): return None def wait_for_metadata_service(self): + urls = [] + start_time = 0 mcfg = self.ds_cfg url_params = self.get_url_params() @@ -366,7 +369,6 @@ def wait_for_metadata_service(self): and self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS ): # if we can't get a token, use instance-id path - urls = [] url2base = {} url_path = "{ver}/meta-data/instance-id".format( ver=self.min_metadata_version @@ -377,7 +379,7 @@ def wait_for_metadata_service(self): urls.append(cur) url2base[cur] = url - start_time = time.time() + start_time = time.monotonic() url, _ = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, @@ -400,7 +402,7 @@ def wait_for_metadata_service(self): LOG.critical( "Giving up on md from %s after %s seconds", urls, - int(time.time() - start_time), + int(time.monotonic() - start_time), ) return bool(metadata_address) @@ -534,6 +536,7 @@ def network_config(self): full_network_config=util.get_cfg_option_bool( self.ds_cfg, "apply_full_imds_network_config", True ), + fallback_nic_order=self._fallback_nic_order, ) # Non-VPC (aka Classic) Ec2 instances need to rewrite the @@ -792,11 +795,17 @@ def identify_aliyun(data): def identify_aws(data): # data is a dictionary returned by _collect_platform_data. - if data["uuid"].startswith("ec2") and ( - data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"] - ): + uuid_str = data["uuid"] + if uuid_str.startswith("ec2"): + # example same-endian uuid: + # EC2E1916-9099-7CAF-FD21-012345ABCDEF return CloudNames.AWS - + with suppress(ValueError): + if uuid.UUID(uuid_str).bytes_le.hex().startswith("ec2"): + # check for other endianness + # example other-endian uuid: + # 45E12AEC-DCD1-B213-94ED-012345ABCDEF + return CloudNames.AWS return None @@ -851,7 +860,6 @@ def _collect_platform_data(): Keys in the dictionary are as follows: uuid: system-uuid from dmi or /sys/hypervisor - uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) asset_tag: 'dmidecode -s chassis-asset-tag' vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) @@ -860,44 +868,32 @@ def _collect_platform_data(): On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. """ - data = {} - try: + uuid = None + with suppress(OSError, UnicodeDecodeError): uuid = util.load_text_file("/sys/hypervisor/uuid").strip() - data["uuid_source"] = "hypervisor" - except Exception: - uuid = dmi.read_dmi_data("system-uuid") - data["uuid_source"] = "dmi" - - if uuid is None: - uuid = "" - data["uuid"] = uuid.lower() - - serial = dmi.read_dmi_data("system-serial-number") - if serial is None: - serial = "" - - data["serial"] = serial.lower() - - asset_tag = dmi.read_dmi_data("chassis-asset-tag") - if asset_tag is None: - asset_tag = "" - - data["asset_tag"] = asset_tag.lower() - vendor = dmi.read_dmi_data("system-manufacturer") - data["vendor"] = (vendor if vendor else "").lower() + uuid = uuid or dmi.read_dmi_data("system-uuid") or "" + serial = dmi.read_dmi_data("system-serial-number") or "" + asset_tag = dmi.read_dmi_data("chassis-asset-tag") or "" + vendor = dmi.read_dmi_data("system-manufacturer") or "" + product_name = dmi.read_dmi_data("system-product-name") or "" - product_name = dmi.read_dmi_data("system-product-name") - data["product_name"] = (product_name if product_name else "").lower() - - return data + return { + "uuid": uuid.lower(), + "serial": serial.lower(), + "asset_tag": asset_tag.lower(), + "vendor": vendor.lower(), + "product_name": product_name.lower(), + } def _build_nic_order( - macs_metadata: Dict[str, Dict], macs: List[str] + macs_metadata: Dict[str, Dict], + macs_to_nics: Dict[str, str], + fallback_nic_order: NicOrder = NicOrder.MAC, ) -> Dict[str, int]: """ - Builds a dictionary containing macs as keys nad nic orders as values, + Builds a dictionary containing macs as keys and nic orders as values, taking into account `network-card` and `device-number` if present. Note that the first NIC will be the primary NIC as it will be the one with @@ -905,19 +901,22 @@ def _build_nic_order( @param macs_metadata: dictionary with mac address as key and contents like: {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} - @macs: list of macs to consider + @macs_to_nics: dictionary with mac address as key and nic name as value @return: Dictionary with macs as keys and nic orders as values. """ nic_order: Dict[str, int] = {} - if len(macs) == 0 or len(macs_metadata) == 0: + if len(macs_to_nics) == 0 or len(macs_metadata) == 0: return nic_order valid_macs_metadata = filter( # filter out nics without metadata (not a physical nic) lambda mmd: mmd[1] is not None, # filter by macs - map(lambda mac: (mac, macs_metadata.get(mac)), macs), + map( + lambda mac: (mac, macs_metadata.get(mac), macs_to_nics[mac]), + macs_to_nics.keys(), + ), ) def _get_key_as_int_or(dikt, key, alt_value): @@ -934,7 +933,7 @@ def _get_key_as_int_or(dikt, key, alt_value): # function. return { mac: i - for i, (mac, _mac_metadata) in enumerate( + for i, (mac, _mac_metadata, _nic_name) in enumerate( sorted( valid_macs_metadata, key=lambda mmd: ( @@ -944,6 +943,9 @@ def _get_key_as_int_or(dikt, key, alt_value): _get_key_as_int_or( mmd[1], "device-number", float("infinity") ), + mmd[2] + if fallback_nic_order == NicOrder.NIC_NAME + else mmd[0], ), ) ) @@ -970,11 +972,23 @@ def _configure_policy_routing( @param: is_ipv4: Boolean indicating if we are acting over ipv4 or not. @param: table: Routing table id. """ + if is_ipv4: + subnet_prefix_routes = nic_metadata.get("subnet-ipv4-cidr-block") + ips = nic_metadata.get("local-ipv4s") + else: + subnet_prefix_routes = nic_metadata.get("subnet-ipv6-cidr-blocks") + ips = nic_metadata.get("ipv6s") + if not (subnet_prefix_routes and ips): + LOG.debug( + "Not enough IMDS information to configure policy routing " + "for IPv%s", + "4" if is_ipv4 else "6", + ) + return + if not dev_config.get("routes"): dev_config["routes"] = [] if is_ipv4: - subnet_prefix_routes = nic_metadata["subnet-ipv4-cidr-block"] - ips = nic_metadata["local-ipv4s"] try: lease = distro.dhcp_client.dhcp_discovery(nic_name, distro=distro) gateway = lease["routers"] @@ -995,9 +1009,6 @@ def _configure_policy_routing( "table": table, }, ) - else: - subnet_prefix_routes = nic_metadata["subnet-ipv6-cidr-blocks"] - ips = nic_metadata["ipv6s"] subnet_prefix_routes = ( [subnet_prefix_routes] @@ -1032,6 +1043,7 @@ def convert_ec2_metadata_network_config( macs_to_nics=None, fallback_nic=None, full_network_config=True, + fallback_nic_order=NicOrder.MAC, ): """Convert ec2 metadata to network config version 2 data dict. @@ -1073,9 +1085,11 @@ def convert_ec2_metadata_network_config( netcfg["ethernets"][nic_name] = dev_config return netcfg # Apply network config for all nics and any secondary IPv4/v6 addresses - is_netplan = distro.network_activator == activators.NetplanActivator + is_netplan = isinstance(distro.network_renderer, netplan.Renderer) + nic_order = _build_nic_order( + macs_metadata, macs_to_nics, fallback_nic_order + ) macs = sorted(macs_to_nics.keys()) - nic_order = _build_nic_order(macs_metadata, macs) for mac in macs: nic_name = macs_to_nics[mac] nic_metadata = macs_metadata.get(mac) diff --git a/.pc/retain-netplan-world-readable.patch/cloudinit/features.py b/.pc/retain-netplan-world-readable.patch/cloudinit/features.py index 259b9f772..7b88b5990 100644 --- a/.pc/retain-netplan-world-readable.patch/cloudinit/features.py +++ b/.pc/retain-netplan-world-readable.patch/cloudinit/features.py @@ -87,6 +87,35 @@ to write /etc/apt/sources.list directly. """ +DEPRECATION_INFO_BOUNDARY = "devel" +""" +DEPRECATION_INFO_BOUNDARY is used by distros to configure at which upstream +version to start logging deprecations at a level higher than INFO. + +The default value "devel" tells cloud-init to log all deprecations higher +than INFO. This value may be overriden by downstreams in order to maintain +stable behavior across releases. + +Jsonschema key deprecations and inline logger deprecations include a +deprecated_version key. When the variable below is set to a version, +cloud-init will use that version as a demarcation point. Deprecations which +are added after this version will be logged as at an INFO level. Deprecations +which predate this version will be logged at the higher DEPRECATED level. +Downstreams that want stable log behavior may set the variable below to the +first version released in their stable distro. By doing this, they can expect +that newly added deprecations will be logged at INFO level. The implication of +the different log levels is that logs at DEPRECATED level result in a return +code of 2 from `cloud-init status`. + +format: + + :: = | + ::= "devel" + ::= "." ["." ] + +where , , and are positive integers +""" + def get_features() -> Dict[str, bool]: """Return a dict of applicable features/overrides and their values.""" diff --git a/.pc/retain-netplan-world-readable.patch/tests/unittests/distros/test_netconfig.py b/.pc/retain-netplan-world-readable.patch/tests/unittests/distros/test_netconfig.py index 48690d712..d0c64a24a 100644 --- a/.pc/retain-netplan-world-readable.patch/tests/unittests/distros/test_netconfig.py +++ b/.pc/retain-netplan-world-readable.patch/tests/unittests/distros/test_netconfig.py @@ -7,15 +7,9 @@ from textwrap import dedent from unittest import mock -from cloudinit import ( - distros, - features, - helpers, - safeyaml, - settings, - subp, - util, -) +import yaml + +from cloudinit import distros, features, helpers, settings, subp, util from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit.net.activators import IfUpDownActivator from tests.unittests.helpers import ( @@ -303,7 +297,7 @@ def setUp(self): def _get_distro(self, dname, renderers=None, activators=None): cls = distros.fetch(dname) - cfg = settings.CFG_BUILTIN + cfg = copy.deepcopy(settings.CFG_BUILTIN) cfg["system_info"]["distro"] = dname system_info_network_cfg = {} if renderers: @@ -723,7 +717,6 @@ def test_apply_network_config_rh(self): GATEWAY=192.168.1.254 IPADDR=192.168.1.5 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -733,7 +726,6 @@ def test_apply_network_config_rh(self): """\ BOOTPROTO=dhcp DEVICE=eth1 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -764,7 +756,6 @@ def test_apply_network_config_ipv6_rh(self): IPV6_AUTOCONF=no IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -774,7 +765,6 @@ def test_apply_network_config_ipv6_rh(self): """\ BOOTPROTO=dhcp DEVICE=eth1 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -821,7 +811,6 @@ def test_vlan_render_unsupported(self): HWADDR=00:16:3e:60:7c:df IPADDR=192.10.1.2 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -833,7 +822,6 @@ def test_vlan_render_unsupported(self): DEVICE=infra0 IPADDR=10.0.1.2 NETMASK=255.255.0.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 USERCTL=no @@ -869,7 +857,6 @@ def test_vlan_render(self): DEVICE=eth0 IPADDR=192.10.1.2 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -881,7 +868,6 @@ def test_vlan_render(self): DEVICE=eth0.1001 IPADDR=10.0.1.2 NETMASK=255.255.0.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 USERCTL=no @@ -1156,7 +1142,7 @@ def test_photon_network_config_v1_with_duplicates(self): [Address] Address=192.168.0.102/24""" - net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) expected = self.create_conf_dict(expected.splitlines()) expected_cfgs = { @@ -1281,7 +1267,132 @@ def test_mariner_network_config_v1_with_duplicates(self): [Address] Address=192.168.0.102/24""" - net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) + + expected = self.create_conf_dict(expected.splitlines()) + expected_cfgs = { + self.nwk_file_path("eth0"): expected, + } + + self._apply_and_verify( + self.distro.apply_network_config, net_cfg, expected_cfgs.copy() + ) + + +class TestNetCfgDistroAzureLinux(TestNetCfgDistroBase): + def setUp(self): + super().setUp() + self.distro = self._get_distro("azurelinux", renderers=["networkd"]) + + def create_conf_dict(self, contents): + content_dict = {} + for line in contents: + if line: + line = line.strip() + if line and re.search(r"^\[(.+)\]$", line): + content_dict[line] = [] + key = line + elif line: + assert key + content_dict[key].append(line) + + return content_dict + + def compare_dicts(self, actual, expected): + for k, v in actual.items(): + self.assertEqual(sorted(expected[k]), sorted(v)) + + def _apply_and_verify( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): + if not expected_cfgs: + raise ValueError("expected_cfg must not be None") + + tmpd = None + with mock.patch("cloudinit.net.networkd.available") as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + actual = self.create_conf_dict(results[cfgpath].splitlines()) + self.compare_dicts(actual, expected) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def nwk_file_path(self, ifname): + return "/etc/systemd/network/10-cloud-init-%s.network" % ifname + + def net_cfg_1(self, ifname): + ret = ( + """\ + [Match] + Name=%s + [Network] + DHCP=no + [Address] + Address=192.168.1.5/24 + [Route] + Gateway=192.168.1.254""" + % ifname + ) + return ret + + def net_cfg_2(self, ifname): + ret = ( + """\ + [Match] + Name=%s + [Network] + DHCP=ipv4""" + % ifname + ) + return ret + + def test_azurelinux_network_config_v1(self): + tmp = self.net_cfg_1("eth0").splitlines() + expected_eth0 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2("eth1").splitlines() + expected_eth1 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path("eth0"): expected_eth0, + self.nwk_file_path("eth1"): expected_eth1, + } + + self._apply_and_verify( + self.distro.apply_network_config, V1_NET_CFG, expected_cfgs.copy() + ) + + def test_azurelinux_network_config_v2(self): + tmp = self.net_cfg_1("eth7").splitlines() + expected_eth7 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2("eth9").splitlines() + expected_eth9 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path("eth7"): expected_eth7, + self.nwk_file_path("eth9"): expected_eth9, + } + + self._apply_and_verify( + self.distro.apply_network_config, V2_NET_CFG, expected_cfgs.copy() + ) + + def test_azurelinux_network_config_v1_with_duplicates(self): + expected = """\ + [Match] + Name=eth0 + [Network] + DHCP=no + DNS=1.2.3.4 + Domains=test.com + [Address] + Address=192.168.0.102/24""" + + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) expected = self.create_conf_dict(expected.splitlines()) expected_cfgs = { diff --git a/.pc/retain-old-groups.patch/config/cloud.cfg.tmpl b/.pc/retain-old-groups.patch/config/cloud.cfg.tmpl index e21770326..68175cd0a 100644 --- a/.pc/retain-old-groups.patch/config/cloud.cfg.tmpl +++ b/.pc/retain-old-groups.patch/config/cloud.cfg.tmpl @@ -4,14 +4,15 @@ {% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %} {% set is_rhel = variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "miraclelinux", "rhel", "rocky", "virtuozzo"] %} -{% set gecos = ({"amazon": "EC2 Default User", "centos": "Cloud User", - "debian": "Debian", "dragonfly": "DragonFly", - "freebsd": "FreeBSD", "mariner": "MarinerOS", - "rhel": "Cloud User", "netbsd": "NetBSD", - "openbsd": "openBSD", "openmandriva": "OpenMandriva admin", - "photon": "PhotonOS", "ubuntu": "Ubuntu", - "unknown": "Ubuntu"}) %} +{% set gecos = ({"amazon": "EC2 Default User", "azurelinux": "Azure Linux", + "centos": "Cloud User", "debian": "Debian", + "dragonfly": "DragonFly", "freebsd": "FreeBSD", + "mariner": "MarinerOS", "rhel": "Cloud User", + "netbsd": "NetBSD", "openbsd": "openBSD", + "openmandriva": "OpenMandriva admin", "photon": "PhotonOS", + "ubuntu": "Ubuntu", "unknown": "Ubuntu"}) %} {% set groups = ({"alpine": "adm, wheel", "arch": "wheel, users", + "azurelinux": "wheel", "debian": "adm, audio, cdrom, dialout, dip, floppy, netdev, plugdev, sudo, video", "gentoo": "users, wheel", "mariner": "wheel", "photon": "wheel", @@ -61,7 +62,7 @@ disable_root: true "openmandriva", "photon", "TencentOS"] or is_rhel %} {% if is_rhel %} -mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2'] +mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.after=cloud-init.service,_netdev', '0', '2'] {% else %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% endif %} @@ -136,6 +137,7 @@ cloud_init_modules: - rsyslog - users_groups - ssh + - set_passwords # The modules that run in the 'config' stage cloud_config_modules: @@ -155,7 +157,6 @@ cloud_config_modules: {% endif %} - locale {% endif %} - - set_passwords {% if variant == "alpine" %} - apk_configure {% elif variant in ["debian", "ubuntu", "unknown"] %} @@ -165,8 +166,8 @@ cloud_config_modules: {% if variant == "ubuntu" %} - ubuntu_pro {% endif %} -{% elif variant in ["fedora", "mariner", "openeuler", "openmandriva", - "photon"] or is_rhel %} +{% elif variant in ["azurelinux", "fedora", "mariner", "openeuler", + "openmandriva", "photon"] or is_rhel %} {% if is_rhel %} - rh_subscription {% endif %} @@ -219,10 +220,10 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "freebsd", - "gentoo", "mariner", "netbsd", "openbsd", "OpenCloudOS", - "openeuler", "openmandriva", "photon", "suse", "TencentOS", - "ubuntu"] or is_rhel %} +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "freebsd", "gentoo", "mariner", "netbsd", "openbsd", + "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", + "TencentOS", "ubuntu"] or is_rhel %} distro: {{ variant }} {% elif variant == "dragonfly" %} distro: dragonflybsd @@ -237,9 +238,10 @@ system_info: {% else %} name: {{ variant }} {% endif %} -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS", "ubuntu", "unknown"] +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS", "ubuntu", + "unknown"] or is_bsd or is_rhel %} lock_passwd: True {% endif %} @@ -292,7 +294,7 @@ system_info: {% elif variant in ["freebsd", "netbsd", "openbsd"] %} network: renderers: ['{{ variant }}'] -{% elif variant in ["mariner", "photon"] %} +{% elif variant in ["azurelinux", "mariner", "photon"] %} network: renderers: ['networkd'] {% elif variant == "openmandriva" %} @@ -306,7 +308,7 @@ system_info: activators: ['netplan', 'eni', 'network-manager', 'networkd'] {% elif is_rhel %} network: - renderers: ['sysconfig', 'eni', 'netplan', 'network-manager', 'networkd'] + renderers: ['eni', 'netplan', 'network-manager', 'sysconfig', 'networkd'] {% endif %} {% if variant == "photon" %} # If set to true, cloud-init will not use fallback network config. @@ -318,9 +320,10 @@ system_info: # Automatically discover the best ntp_client ntp_client: auto {% endif %} -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS", "ubuntu", "unknown"] +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS", "ubuntu", + "unknown"] or is_rhel %} # Other config here will be given to the distro class and/or path classes paths: @@ -328,7 +331,7 @@ system_info: templates_dir: /etc/cloud/templates/ {% elif is_bsd %} paths: - run_dir: /var/run/ + run_dir: /var/run/cloud-init/ {% endif %} {% if variant == "debian" %} package_mirrors: @@ -365,8 +368,9 @@ system_info: {% endif %} {% if variant in ["debian", "ubuntu", "unknown"] %} ssh_svcname: ssh -{% elif variant in ["alpine", "amazon", "arch", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS"] or is_rhel %} +{% elif variant in ["alpine", "amazon", "arch", "azurelinux", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS"] + or is_rhel %} ssh_svcname: sshd {% endif %} diff --git a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_lxd.py b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_lxd.py index 2fdb8e55f..aa77e9ef9 100644 --- a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_lxd.py +++ b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_lxd.py @@ -8,160 +8,29 @@ import logging import os -from textwrap import dedent from typing import List, Tuple -from cloudinit import safeyaml, subp, util +import yaml + +from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) _DEFAULT_NETWORK_NAME = "lxdbr0" - -MODULE_DESCRIPTION = """\ -This module configures lxd with user specified options using ``lxd init``. -If lxd is not present on the system but lxd configuration is provided, then -lxd will be installed. If the selected storage backend userspace utility is -not installed, it will be installed. If network bridge configuration is -provided, then lxd-bridge will be configured accordingly. -""" - -distros = ["ubuntu"] - meta: MetaSchema = { "id": "cc_lxd", - "name": "LXD", - "title": "Configure LXD with ``lxd init`` and optionally lxd-bridge", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - dedent( - """\ - # Simplest working directory backed LXD configuration - lxd: - init: - storage_backend: dir - """ - ), - dedent( - """\ - # LXD init showcasing cloud-init's LXD config options - lxd: - init: - network_address: 0.0.0.0 - network_port: 8443 - storage_backend: zfs - storage_pool: datapool - storage_create_loop: 10 - bridge: - mode: new - mtu: 1500 - name: lxdbr0 - ipv4_address: 10.0.8.1 - ipv4_netmask: 24 - ipv4_dhcp_first: 10.0.8.2 - ipv4_dhcp_last: 10.0.8.3 - ipv4_dhcp_leases: 250 - ipv4_nat: true - ipv6_address: fd98:9e0:3744::1 - ipv6_netmask: 64 - ipv6_nat: true - domain: lxd - """ - ), - dedent( - """\ - # For more complex non-iteractive LXD configuration of networks, - # storage_pools, profiles, projects, clusters and core config, - # `lxd:preseed` config will be passed as stdin to the command: - # lxd init --preseed - # See https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#non-interactive-configuration or - # run: lxd init --dump to see viable preseed YAML allowed. - # - # Preseed settings configuring the LXD daemon for HTTPS connections - # on 192.168.1.1 port 9999, a nested profile which allows for - # LXD nesting on containers and a limited project allowing for - # RBAC approach when defining behavior for sub projects. - lxd: - preseed: | - config: - core.https_address: 192.168.1.1:9999 - networks: - - config: - ipv4.address: 10.42.42.1/24 - ipv4.nat: true - ipv6.address: fd42:4242:4242:4242::1/64 - ipv6.nat: true - description: "" - name: lxdbr0 - type: bridge - project: default - storage_pools: - - config: - size: 5GiB - source: /var/snap/lxd/common/lxd/disks/default.img - description: "" - name: default - driver: zfs - profiles: - - config: {} - description: Default LXD profile - devices: - eth0: - name: eth0 - network: lxdbr0 - type: nic - root: - path: / - pool: default - type: disk - name: default - - config: {} - security.nesting: true - devices: - eth0: - name: eth0 - network: lxdbr0 - type: nic - root: - path: / - pool: default - type: disk - name: nested - projects: - - config: - features.images: true - features.networks: true - features.profiles: true - features.storage.volumes: true - description: Default LXD project - name: default - - config: - features.images: false - features.networks: true - features.profiles: false - features.storage.volumes: false - description: Limited Access LXD project - name: limited - - - """ # noqa: E501 - ), - ], + "distros": ["ubuntu"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["lxd"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore -def supplemental_schema_validation( - init_cfg: dict, bridge_cfg: dict, preseed_str: str -): +def supplemental_schema_validation(init_cfg, bridge_cfg, preseed_str): """Validate user-provided lxd network and bridge config option values. @raises: ValueError describing invalid values provided. @@ -512,8 +381,8 @@ def get_required_packages(init_cfg: dict, preseed_str: str) -> List[str]: if preseed_str and "storage_pools" in preseed_str: # Assume correct YAML preseed format try: - preseed_cfg = safeyaml.load(preseed_str) - except (safeyaml.YAMLError, TypeError, ValueError): + preseed_cfg = yaml.safe_load(preseed_str) + except (yaml.YAMLError, TypeError, ValueError): LOG.warning( "lxd.preseed string value is not YAML. " " Unable to determine required storage driver packages to" diff --git a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_snap.py b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_snap.py index b798b09d7..b2844035c 100644 --- a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_snap.py +++ b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_snap.py @@ -6,117 +6,28 @@ import logging import os -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE from cloudinit.subp import prepend_base_command -distros = ["ubuntu"] -frequency = PER_INSTANCE - LOG = logging.getLogger(__name__) meta: MetaSchema = { "id": "cc_snap", - "name": "Snap", - "title": "Install, configure and manage snapd and snap packages", - "description": dedent( - """\ - This module provides a simple configuration namespace in cloud-init to - both setup snapd and install snaps. - - .. note:: - Both ``assertions`` and ``commands`` values can be either a - dictionary or a list. If these configs are provided as a - dictionary, the keys are only used to order the execution of the - assertions or commands and the dictionary is merged with any - vendor-data snap configuration provided. If a list is provided by - the user instead of a dict, any vendor-data snap configuration is - ignored. - - The ``assertions`` configuration option is a dictionary or list of - properly-signed snap assertions which will run before any snap - ``commands``. They will be added to snapd's assertion database by - invoking ``snap ack ``. - - Snap ``commands`` is a dictionary or list of individual snap - commands to run on the target system. These commands can be used to - create snap users, install snaps and provide snap configuration. - - .. note:: - If 'side-loading' private/unpublished snaps on an instance, it is - best to create a snap seed directory and seed.yaml manifest in - **/var/lib/snapd/seed/** which snapd automatically installs on - startup. - """ - ), - "distros": distros, - "examples": [ - dedent( - """\ - snap: - assertions: - 00: | - signed_assertion_blob_here - 02: | - signed_assertion_blob_here - commands: - 00: snap create-user --sudoer --known @mydomain.com - 01: snap install canonical-livepatch - 02: canonical-livepatch enable - """ - ), - dedent( - """\ - # Convenience: the snap command can be omitted when specifying commands - # as a list and 'snap' will automatically be prepended. - # The following commands are equivalent: - snap: - commands: - 00: ['install', 'vlc'] - 01: ['snap', 'install', 'vlc'] - 02: snap install vlc - 03: 'snap install vlc' - """ - ), - dedent( - """\ - # You can use a list of commands - snap: - commands: - - ['install', 'vlc'] - - ['snap', 'install', 'vlc'] - - snap install vlc - - 'snap install vlc' - """ - ), - dedent( - """\ - # You can use a list of assertions - snap: - assertions: - - signed_assertion_blob_here - - | - signed_assertion_blob_here - """ - ), - ], + "distros": ["ubuntu"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["snap"], -} - - -__doc__ = get_meta_doc(meta) +} # type: ignore SNAP_CMD = "snap" def add_assertions(assertions, assertions_file): - """Import list of assertions. + r"""Import list of assertions. Import assertions by concatenating each assertion into a string separated by a '\n'. Write this string to a instance file and diff --git a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_ubuntu_autoinstall.py b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_ubuntu_autoinstall.py index c75f7a979..ff5286370 100644 --- a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_ubuntu_autoinstall.py +++ b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/config/cc_ubuntu_autoinstall.py @@ -6,7 +6,7 @@ import re from textwrap import dedent -from cloudinit import util +from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import ( @@ -16,7 +16,6 @@ get_meta_doc, ) from cloudinit.settings import PER_ONCE -from cloudinit.subp import subp LOG = logging.getLogger(__name__) @@ -85,7 +84,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: return util.wait_for_snap_seeded(cloud) - snap_list, _ = subp(["snap", "list"]) + snap_list, _ = subp.subp(["snap", "list"]) installer_present = None for snap_name in LIVE_INSTALLER_SNAPS: if re.search(snap_name, snap_list): diff --git a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/util.py b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/util.py index c9b7287ae..98dd66d59 100644 --- a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/util.py +++ b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/cloudinit/util.py @@ -49,6 +49,7 @@ Generator, List, Mapping, + NamedTuple, Optional, Sequence, TypeVar, @@ -56,12 +57,14 @@ ) from urllib import parse +import yaml + from cloudinit import ( features, importer, + log, mergers, net, - safeyaml, settings, subp, temp_utils, @@ -88,6 +91,11 @@ FALSE_STRINGS = ("off", "0", "no", "false") +class DeprecationLog(NamedTuple): + log_level: int + message: str + + def kernel_version(): return tuple(map(int, os.uname().release.split(".")[:2])) @@ -349,8 +357,6 @@ def read_conf(fname, *, instance_data_file=None) -> Dict: config_file, repr(e), ) - if config_file is None: - return {} return load_yaml(config_file, default={}) # pyright: ignore @@ -395,13 +401,13 @@ def clean_filename(fn): def decomp_gzip(data, quiet=True, decode=True): try: - buf = io.BytesIO(encode_text(data)) - with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: - # E1101 is https://github.com/PyCQA/pylint/issues/1444 + with io.BytesIO(encode_text(data)) as buf, gzip.GzipFile( + None, "rb", 1, buf + ) as gh: if decode: - return decode_binary(gh.read()) # pylint: disable=E1101 + return decode_binary(gh.read()) else: - return gh.read() # pylint: disable=E1101 + return gh.read() except Exception as e: if quiet: return data @@ -651,6 +657,7 @@ def _get_variant(info): "almalinux", "alpine", "arch", + "azurelinux", "centos", "cloudlinux", "debian", @@ -751,7 +758,7 @@ def get_cfg_by_path(yobj, keyp, default=None): or an iterable. @param default: The default to return if the path does not exist. @return: The value of the item at keyp." - is not found.""" + is not found.""" if isinstance(keyp, str): keyp = keyp.split("/") @@ -948,13 +955,14 @@ def del_dir(path): shutil.rmtree(path) -# read_optional_seed -# returns boolean indicating success or failure (presence of files) -# if files are present, populates 'fill' dictionary with 'user-data' and -# 'meta-data' entries def read_optional_seed(fill, base="", ext="", timeout=5): + """ + returns boolean indicating success or failure (presense of files) + if files are present, populates 'fill' dictionary with 'user-data' and + 'meta-data' entries + """ try: - (md, ud, vd) = read_seeded(base, ext, timeout) + md, ud, vd = read_seeded(base=base, ext=ext, timeout=timeout) fill["user-data"] = ud fill["vendor-data"] = vd fill["meta-data"] = md @@ -1008,7 +1016,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): len(blob), allowed, ) - converted = safeyaml.load(blob) + converted = yaml.safe_load(blob) if converted is None: LOG.debug("loaded blob returned None, returning default.") converted = default @@ -1019,7 +1027,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): % (allowed, type_utils.obj_name(converted)) ) loaded = converted - except (safeyaml.YAMLError, TypeError, ValueError) as e: + except (yaml.YAMLError, TypeError, ValueError) as e: msg = "Failed loading yaml blob" mark = None if hasattr(e, "context_mark") and getattr(e, "context_mark"): @@ -1038,7 +1046,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): return loaded -def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): +def read_seeded(base="", ext="", timeout=5, retries=10): if base.find("%s") >= 0: ud_url = base.replace("%s", "user-data" + ext) vd_url = base.replace("%s", "vendor-data" + ext) @@ -1389,20 +1397,6 @@ def search_for_mirror(candidates): return None -def close_stdin(): - """ - reopen stdin as /dev/null so even subprocesses or other os level things get - /dev/null as input. - - if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true - value then input will not be closed (useful for debugging). - """ - if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")): - return - with open(os.devnull) as fp: - os.dup2(fp.fileno(), sys.stdin.fileno()) - - def find_devs_with_freebsd( criteria=None, oformat="device", tag=None, no_cache=False, path=None ): @@ -1597,14 +1591,14 @@ def load_binary_file( quiet: bool = False, ) -> bytes: LOG.debug("Reading from %s (quiet=%s)", fname, quiet) - ofh = io.BytesIO() - try: - with open(fname, "rb") as ifh: - pipe_in_out(ifh, ofh, chunk_cb=read_cb) - except FileNotFoundError: - if not quiet: - raise - contents = ofh.getvalue() + with io.BytesIO() as ofh: + try: + with open(fname, "rb") as ifh: + pipe_in_out(ifh, ofh, chunk_cb=read_cb) + except FileNotFoundError: + if not quiet: + raise + contents = ofh.getvalue() LOG.debug("Read %s bytes from %s", len(contents), fname) return contents @@ -1795,21 +1789,10 @@ def get_config_logfiles(cfg): return list(set(logs + rotated_logs)) -def logexc(log, msg, *args): - # Setting this here allows this to change - # levels easily (not always error level) - # or even desirable to have that much junk - # coming out to a non-debug stream - if msg: - log.warning(msg, *args) - # Debug gets the full trace. However, nose has a bug whereby its - # logcapture plugin doesn't properly handle the case where there is no - # actual exception. To avoid tracebacks during the test suite then, we'll - # do the actual exc_info extraction here, and if there is no exception in - # flight, we'll just pass in None. - exc_info = sys.exc_info() - if exc_info == (None, None, None): - exc_info = None +def logexc( + log, msg, *args, log_level: int = logging.WARNING, exc_info=True +) -> None: + log.log(log_level, msg, *args) log.debug(msg, exc_info=exc_info, *args) @@ -2630,7 +2613,7 @@ def find_freebsd_part(fs): return splitted[0] elif len(splitted) == 3: return splitted[2] - elif splitted[2] in ["label", "gpt", "ufs"]: + elif splitted[2] in ["label", "gpt", "gptid", "ufs", "ufsid"]: target_label = fs[5:] (part, _err) = subp.subp(["glabel", "status", "-s"]) for labels in part.split("\n"): @@ -2814,7 +2797,7 @@ def log_time( if kwargs is None: kwargs = {} - start = time.time() + start = time.monotonic() ustart = None if get_uptime: @@ -2826,7 +2809,7 @@ def log_time( try: ret = func(*args, **kwargs) finally: - delta = time.time() - start + delta = time.monotonic() - start udelta = None if ustart is not None: try: @@ -2957,8 +2940,6 @@ def is_x86(uname_arch=None): def message_from_string(string): - if sys.version_info[:2] < (2, 7): - return email.message_from_file(io.StringIO(string)) return email.message_from_string(string) @@ -3033,7 +3014,7 @@ def rootdev_from_cmdline(cmdline): def load_shell_content(content, add_empty=False, empty_val=None): - """Given shell like syntax (key=value\nkey2=value2\n) in content + r"""Given shell like syntax (key=value\nkey2=value2\n) in content return the data in dictionary form. If 'add_empty' is True then add entries in to the returned dictionary for 'VAR=' variables. Set their value to empty_val.""" @@ -3121,7 +3102,7 @@ def udevadm_settle(exists=None, timeout=None): def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False): - """ + r""" Print error to stderr and return or exit @param msg: message to print @@ -3137,13 +3118,49 @@ def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False): @total_ordering class Version(namedtuple("Version", ["major", "minor", "patch", "rev"])): - def __new__(cls, major=-1, minor=-1, patch=-1, rev=-1): + """A class for comparing versions. + + Implemented as a named tuple with all ordering methods. Comparisons + between X.Y.N and X.Y always treats the more specific number as larger. + + :param major: the most significant number in a version + :param minor: next greatest significant number after major + :param patch: next greatest significant number after minor + :param rev: the least significant number in a version + + :raises TypeError: If invalid arguments are given. + :raises ValueError: If invalid arguments are given. + + Examples: + >>> Version(2, 9) == Version.from_str("2.9") + True + >>> Version(2, 9, 1) > Version.from_str("2.9.1") + False + >>> Version(3, 10) > Version.from_str("3.9.9.9") + True + >>> Version(3, 7) >= Version.from_str("3.7") + True + + """ + + def __new__( + cls, major: int = -1, minor: int = -1, patch: int = -1, rev: int = -1 + ) -> "Version": """Default of -1 allows us to tiebreak in favor of the most specific number""" return super(Version, cls).__new__(cls, major, minor, patch, rev) @classmethod - def from_str(cls, version: str): + def from_str(cls, version: str) -> "Version": + """Create a Version object from a string. + + :param version: A period-delimited version string, max 4 segments. + + :raises TypeError: Raised if invalid arguments are given. + :raises ValueError: Raised if invalid arguments are given. + + :return: A Version object. + """ return cls(*(list(map(int, version.split("."))))) def __gt__(self, other): @@ -3168,15 +3185,15 @@ def __iter__(self): def __str__(self): return ".".join(self) - def _compare_version(self, other) -> int: - """ - return values: - 1: self > v2 - -1: self < v2 - 0: self == v2 + def __hash__(self): + return hash(str(self)) + + def _compare_version(self, other: "Version") -> int: + """Compare this Version to another. - to break a tie between 3.1.N and 3.1, always treat the more - specific number as larger + :param other: A Version object. + + :return: -1 if self > other, 1 if self < other, else 0 """ if self == other: return 0 @@ -3191,14 +3208,27 @@ def _compare_version(self, other) -> int: return -1 +def should_log_deprecation(version: str, boundary_version: str) -> bool: + """Determine if a deprecation message should be logged. + + :param version: The version in which the thing was deprecated. + :param boundary_version: The version at which deprecation level is logged. + + :return: True if the message should be logged, else False. + """ + return boundary_version == "devel" or Version.from_str( + version + ) <= Version.from_str(boundary_version) + + def deprecate( *, deprecated: str, deprecated_version: str, extra_message: Optional[str] = None, schedule: int = 5, - return_log: bool = False, -): + skip_log: bool = False, +) -> DeprecationLog: """Mark a "thing" as deprecated. Deduplicated deprecations are logged. @@ -3214,13 +3244,15 @@ def deprecate( @param schedule: Manually set the deprecation schedule. Defaults to 5 years. Leave a comment explaining your reason for deviation if setting this value. - @param return_log: Return log text rather than logging it. Useful for + @param skip_log: Return log text rather than logging it. Useful for running prior to logging setup. + @return: NamedTuple containing log level and log message + DeprecationLog(level: int, message: str) Note: uses keyword-only arguments to improve legibility """ - if not hasattr(deprecate, "_log"): - deprecate._log = set() # type: ignore + if not hasattr(deprecate, "log"): + setattr(deprecate, "log", set()) message = extra_message or "" dedup = hash(deprecated + message + deprecated_version + str(schedule)) version = Version.from_str(deprecated_version) @@ -3230,14 +3262,19 @@ def deprecate( f"{deprecated_version} and scheduled to be removed in " f"{version_removed}. {message}" ).rstrip() - if return_log: - return deprecate_msg - if dedup not in deprecate._log: # type: ignore - deprecate._log.add(dedup) # type: ignore - if hasattr(LOG, "deprecated"): - LOG.deprecated(deprecate_msg) # type: ignore - else: - LOG.warning(deprecate_msg) + if not should_log_deprecation( + deprecated_version, features.DEPRECATION_INFO_BOUNDARY + ): + level = logging.INFO + elif hasattr(LOG, "deprecated"): + level = log.DEPRECATED + else: + level = logging.WARN + log_cache = getattr(deprecate, "log") + if not skip_log and dedup not in log_cache: + log_cache.add(dedup) + LOG.log(level, deprecate_msg) + return DeprecationLog(level, deprecate_msg) def deprecate_call( diff --git a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/systemd/cloud-config.service.tmpl b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/systemd/cloud-config.service.tmpl index 18295fc47..79c75c71a 100644 --- a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/systemd/cloud-config.service.tmpl +++ b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/systemd/cloud-config.service.tmpl @@ -1,8 +1,8 @@ ## template:jinja [Unit] -Description=Apply the settings specified in cloud-config +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Config Stage After=network-online.target cloud-config.target -Before=systemd-user-sessions.service Wants=network-online.target cloud-config.target ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled diff --git a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/tests/unittests/config/test_cc_ubuntu_autoinstall.py b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/tests/unittests/config/test_cc_ubuntu_autoinstall.py index 4b5666909..1a492ad0f 100644 --- a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/tests/unittests/config/test_cc_ubuntu_autoinstall.py +++ b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/tests/unittests/config/test_cc_ubuntu_autoinstall.py @@ -66,7 +66,7 @@ def test_runtime_validation_errors(self, src_cfg, error_msg): @mock.patch(MODPATH + "util.wait_for_snap_seeded") -@mock.patch(MODPATH + "subp") +@mock.patch(MODPATH + "subp.subp") class TestHandleAutoinstall: """Test cc_ubuntu_autoinstall handling of config.""" diff --git a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/tests/unittests/util.py b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/tests/unittests/util.py index ca59477ef..c779ae560 100644 --- a/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/tests/unittests/util.py +++ b/.pc/revert-551f560d-cloud-config-after-snap-seeding.patch/tests/unittests/util.py @@ -163,7 +163,7 @@ def shutdown_command(self, *, mode, delay, message): def package_command(self, command, args=None, pkgs=None): pass - def update_package_sources(self): + def update_package_sources(self, *, force=False): return (True, "yay") def do_as(self, command, args=None, **kwargs): diff --git a/.pc/status-do-not-remove-duplicated-data.patch/cloudinit/cmd/status.py b/.pc/status-do-not-remove-duplicated-data.patch/cloudinit/cmd/status.py index 33f697566..390898029 100644 --- a/.pc/status-do-not-remove-duplicated-data.patch/cloudinit/cmd/status.py +++ b/.pc/status-do-not-remove-duplicated-data.patch/cloudinit/cmd/status.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -"""Define 'status' utility and handler as part of cloud-init commandline.""" +"""Define 'status' utility and handler as part of cloud-init command line.""" import argparse import enum @@ -48,11 +48,11 @@ class EnabledStatus(enum.Enum): """Enum representing user-visible cloud-init boot status codes.""" DISABLED_BY_GENERATOR = "disabled-by-generator" - DISABLED_BY_KERNEL_CMDLINE = "disabled-by-kernel-cmdline" + DISABLED_BY_KERNEL_CMDLINE = "disabled-by-kernel-command-line" DISABLED_BY_MARKER_FILE = "disabled-by-marker-file" DISABLED_BY_ENV_VARIABLE = "disabled-by-environment-variable" ENABLED_BY_GENERATOR = "enabled-by-generator" - ENABLED_BY_KERNEL_CMDLINE = "enabled-by-kernel-cmdline" + ENABLED_BY_KERNEL_CMDLINE = "enabled-by-kernel-command-line" ENABLED_BY_SYSVINIT = "enabled-by-sysvinit" UNKNOWN = "unknown" diff --git a/.pc/status-do-not-remove-duplicated-data.patch/tests/unittests/cmd/test_status.py b/.pc/status-do-not-remove-duplicated-data.patch/tests/unittests/cmd/test_status.py index 971104336..22241139f 100644 --- a/.pc/status-do-not-remove-duplicated-data.patch/tests/unittests/cmd/test_status.py +++ b/.pc/status-do-not-remove-duplicated-data.patch/tests/unittests/cmd/test_status.py @@ -186,7 +186,7 @@ def test_get_status_systemd_failure( lambda config: f"Cloud-init disabled by {config.disable_file}", id="true_on_disable_file", ), - # Not disabled when using systemd and enabled via commandline. + # Not disabled when using systemd and enabled via command line. pytest.param( lambda config: config.disable_file, True, @@ -309,7 +309,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: disabled extended_status: disabled - boot_status_code: disabled-by-kernel-cmdline + boot_status_code: disabled-by-kernel-command-line detail: disabled for some reason errors: [] recoverable_errors: {} @@ -452,7 +452,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: error extended_status: error - running - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line last_update: Thu, 01 Jan 1970 00:02:05 +0000 detail: DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] errors: @@ -482,7 +482,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: running extended_status: running - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line last_update: Thu, 01 Jan 1970 00:02:04 +0000 detail: Running in stage: init errors: [] @@ -507,7 +507,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( dedent( """\ --- - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line datasource: '' detail: 'Running in stage: init' errors: [] @@ -542,7 +542,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( MyArgs(long=False, wait=False, format="json"), 0, { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "", "detail": "Running in stage: init", "errors": [], @@ -583,7 +583,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( MyArgs(long=False, wait=False, format="json"), 1, { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": ( "DataSourceNoCloud [seed=/var/.../seed/" @@ -666,7 +666,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( MyArgs(long=False, wait=False, format="json"), 2, { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": ( "DataSourceNoCloud [seed=/var/.../" @@ -746,7 +746,7 @@ def test_status_output( assert_file, cmdargs: MyArgs, expected_retcode: int, - expected_status: str, + expected_status: Union[str, dict], config: Config, capsys, ): diff --git a/.pc/status-retain-recoverable-error-exit-code.patch/cloudinit/cmd/status.py b/.pc/status-retain-recoverable-error-exit-code.patch/cloudinit/cmd/status.py index 4c862ca04..9c2698749 100644 --- a/.pc/status-retain-recoverable-error-exit-code.patch/cloudinit/cmd/status.py +++ b/.pc/status-retain-recoverable-error-exit-code.patch/cloudinit/cmd/status.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -"""Define 'status' utility and handler as part of cloud-init commandline.""" +"""Define 'status' utility and handler as part of cloud-init command line.""" import argparse import enum @@ -48,11 +48,11 @@ class EnabledStatus(enum.Enum): """Enum representing user-visible cloud-init boot status codes.""" DISABLED_BY_GENERATOR = "disabled-by-generator" - DISABLED_BY_KERNEL_CMDLINE = "disabled-by-kernel-cmdline" + DISABLED_BY_KERNEL_CMDLINE = "disabled-by-kernel-command-line" DISABLED_BY_MARKER_FILE = "disabled-by-marker-file" DISABLED_BY_ENV_VARIABLE = "disabled-by-environment-variable" ENABLED_BY_GENERATOR = "enabled-by-generator" - ENABLED_BY_KERNEL_CMDLINE = "enabled-by-kernel-cmdline" + ENABLED_BY_KERNEL_CMDLINE = "enabled-by-kernel-command-line" ENABLED_BY_SYSVINIT = "enabled-by-sysvinit" UNKNOWN = "unknown" diff --git a/.pc/status-retain-recoverable-error-exit-code.patch/tests/unittests/cmd/test_status.py b/.pc/status-retain-recoverable-error-exit-code.patch/tests/unittests/cmd/test_status.py index b85c2ef1e..b9ea7a5e5 100644 --- a/.pc/status-retain-recoverable-error-exit-code.patch/tests/unittests/cmd/test_status.py +++ b/.pc/status-retain-recoverable-error-exit-code.patch/tests/unittests/cmd/test_status.py @@ -186,7 +186,7 @@ def test_get_status_systemd_failure( lambda config: f"Cloud-init disabled by {config.disable_file}", id="true_on_disable_file", ), - # Not disabled when using systemd and enabled via commandline. + # Not disabled when using systemd and enabled via command line. pytest.param( lambda config: config.disable_file, True, @@ -309,7 +309,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: disabled extended_status: disabled - boot_status_code: disabled-by-kernel-cmdline + boot_status_code: disabled-by-kernel-command-line detail: disabled for some reason errors: [] recoverable_errors: {} @@ -452,7 +452,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: error extended_status: error - running - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line last_update: Thu, 01 Jan 1970 00:02:05 +0000 detail: DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] errors: @@ -482,7 +482,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: running extended_status: running - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line last_update: Thu, 01 Jan 1970 00:02:04 +0000 detail: Running in stage: init errors: [] @@ -508,7 +508,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ --- _schema_version: '1' - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line datasource: '' detail: 'Running in stage: init' errors: [] @@ -523,7 +523,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( recoverable_errors: {} schemas: '1': - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line datasource: '' detail: 'Running in stage: init' errors: [] @@ -560,7 +560,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( MyArgs(long=False, wait=False, format="json"), 0, { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "", "detail": "Running in stage: init", "errors": [], @@ -573,7 +573,9 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( "_schema_version": "1", "schemas": { "1": { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": ( + "enabled-by-kernel-command-line" + ), "datasource": "", "detail": "Running in stage: init", "errors": [], @@ -621,7 +623,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( 1, { "_schema_version": "1", - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": ( "DataSourceNoCloud [seed=/var/.../seed/" @@ -644,7 +646,9 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( "recoverable_errors": {}, "schemas": { "1": { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": ( + "enabled-by-kernel-command-line" + ), "datasource": "nocloud", "detail": "DataSourceNoCloud " "[seed=/var/.../seed/nocloud-net][dsmode=net]", @@ -729,7 +733,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( 2, { "_schema_version": "1", - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": ( "DataSourceNoCloud [seed=/var/.../" @@ -790,7 +794,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( }, "schemas": { "1": { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": "DataSourceNoCloud " "[seed=/var/.../seed/nocloud-net][dsmode=net]", @@ -892,7 +896,7 @@ def test_status_output( assert_file, cmdargs: MyArgs, expected_retcode: int, - expected_status: str, + expected_status: Union[str, dict], config: Config, capsys, ): diff --git a/.pylintrc b/.pylintrc index c8e2577a0..38a47cb65 100644 --- a/.pylintrc +++ b/.pylintrc @@ -7,7 +7,6 @@ jobs=4 [MESSAGES CONTROL] # Errors and warnings with some filtered: -# W0201(attribute-defined-outside-init) # W0212(protected-access) # W0221(arguments-differ) # W0222(signature-differs) @@ -27,7 +26,7 @@ jobs=4 # W1514(unspecified-encoding) # E0012(bad-option-value) -disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514, E0012 +disable=C, F, I, R, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514, E0012 [REPORTS] @@ -66,4 +65,3 @@ ignored-classes=argparse.Namespace,optparse.Values,thread._local,ImageManager,Co # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members=types,http.client,command_handlers,m_.*,enter_context - diff --git a/ChangeLog b/ChangeLog index 70dfe24fd..260c460df 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,390 @@ +24.2 + - test: Fix no default user in test_status.py (#5478) + - fix: correct deprecated_version=22.2 for users.sudo + - test: Add jsonschema guard in test_cc_ubuntu_pro.py (#5479) + - fix(test): Fix pycloudlib types in integration tests (#5350) + - fix(test): Fix ip printing for non-lxd instances (#5350) + - chore(mypy): Drop unused missing import exclusions (#5350) + - type: Add stub types for network v1/v2 config (#5350) + - chore: Auto-format network jsonschema in ci (#5350) + - fix(tox): Update tox.ini (#5350) + - chore(typing): Remove type ignores and casts (#5350) + - refactor(typing): Remove unused code paths (#5350) + - fix(typing): Add / update type annotations (#5350) + - fix(typing): Remove type annotation for unused variable (#5350) + - fix(typing): Remove invalid type annotations (#5350) + - ci(mypy): Set default follow_imports value (#5350) + - test: Update integration tests to pass on focal (#5476) + - tests: update ubuntu_pro test to account for info-level deprecations + (#5475) + - tests: update nocloud deprecation test for boundary version (#5474) + - fix(rh_subscription): add string type to org (#5453) + - tests: integration tests aware of features.DEPRECATION_INFO_BOUNDARY + - tests: update keyserver PPA key fur curtin-dev (#5472) + - test: Fix deprecation test failures (#5466) + - chore: fix schema.py formatting (#5465) + - fix: dont double-log deprecated INFOs (#5465) + - fix(test): Mock version boundary (#5464) + - fix(schema): Don't report changed keys as deprecated (#5464) + - test: fix unit test openstack vlan mac_address (#5367) + - fix: Ensure properties for bonded interfaces are properly translated + (#5367) [Curt Moore] + - fix(schema): permit deprecated hyphenated keys under users key (#5456) + - fix: Do not add the vlan_mac_address field into the VLAN object (#5365) + [Curt Moore] + - doc(refactor): Convert module docs to new system (#5427) [Sally] + - test: Add unit tests for features.DEPRECATION_INFO_BOUNDARY (#5411) + - feat: Add deprecation boundary support to schema validator (#5411) + - feat: Add deprecation boundary to logger (#5411) + - fix: Gracefully handle missing files (#5397) [Curt Moore] + - test(openstack): Test bond mac address (#5369) + - fix(openstack): Fix bond mac_address (#5369) [Curt Moore] + - test: Add ds-identify integration test coverage (#5394) + - chore(cmdline): Update comments (#5458) + - fix: Add get_connection_with_tls_context() for requests 2.32.2+ (#5435) + [eaglegai] + - fix(net): klibc ipconfig PROTO compatibility (#5437) + [Alexsander de Souza] (LP: #2065787) + - Support metalink in yum repository config (#5444) [Ani Sinha] + - tests: hard-code curtin-dev ppa instead of canonical-kernel-team (#5450) + - ci: PR update checklist GH- anchors to align w/ later template (#5449) + - test: update validate error message in test_networking (#5436) + - ci: Add PR checklist (#5446) + - chore: fix W0105 in t/u/s/h/test_netlink.py (#5409) + - chore(pyproject.toml): migrate to booleans (#5409) + - typing: add check_untyped_defs (#5409) + - fix(openstack): Append interface / scope_id for IPv6 link-local metadata + address (#5419) [Christian Rohmann] + - test: Update validation error in test_cli.py test (#5430) + - test: Update schema validation error in integration test (#5429) + - test: bump pycloudlib to get azure oracular images (#5428) + - fix(azure): fix discrepancy for monotonic() vs time() (#5420) + [Chris Patterson] + - fix(pytest): Fix broken pytest gdb flag (#5415) + - fix: Use monotonic time (#5423) + - docs: Remove mention of resolv.conf (#5424) + - perf(netplan): Improve network v1 -> network v2 performance (#5391) + - perf(set_passwords): Run module in Network stage (#5395) + - fix(test): Remove temporary directory side effect (#5416) + - Improve schema validator warning messages (#5404) [Ani Sinha] + - feat(sysconfig): Add DNS from interface config to resolv.conf (#5401) + [Ani Sinha] + - typing: add no_implicit_optional lint (#5408) + - doc: update examples to reflect alternative ways to provide `sudo` + option (#5418) [Ani Sinha] + - fix(jsonschema): Add missing sudo definition (#5418) + - chore(doc): migrate cc modules i through r to templates (#5313) + - chore(doc): migrate grub_dpkg to tmpl add changed/deprecation (#5313) + - chore(json): migrate cc_apt_configure and json schema indents (#5313) + - chore(doc): migrate ca_certs/chef to template, flatten schema (#5313) + - chore(doc): migrate cc_byobu to templates (#5313) + - chore(doc): migrate cc_bootcmd to templates (#5313) + - fix(apt): Enable calling apt update multiple times (#5230) + - chore(VMware): Modify section of instance-id in the customization config + (#5356) [PengpengSun] + - fix(treewide): Remove dead code (#5332) [Shreenidhi Shedi] + - doc: network-config v2 ethernets are of type object (#5381) [Malte Poll] + - Release 24.1.7 (#5375) + - fix(azure): url_helper: specify User-Agent when using headers_cb with + readurl() (#5298) [Ksenija Stanojevic] + - fix: Stop attempting to resize ZFS in cc_growpart on Linux (#5370) + - doc: update docs adding YAML 1.1 spec and jinja template references + - fix(final_message): do not warn on datasourcenone when single ds + - fix(growpart): correct growpart log message to include value of mode + - feat(hotplug): disable hotplugd.socket (#5058) + - feat(hotlug): trigger hotplug after cloud-init.service (#5058) + - test: add function to push and enable systemd units (#5058) + - test(util): fix wait_until_cloud_init exit code 2 (#5058) + - test(hotplug): fix race getting ipv6 (#5271) + - docs: Adjust CSS to increase font weight across the docs (#5363) [Sally] + - fix(ec2): Correctly identify netplan renderer (#5361) + - tests: fix expect logging from growpart on devent with partition (#5360) + - test: Add v2 test coverage to test_net.py (#5247) + - refactor: Simplify collect_logs() in logs.py (#5268) + - fix: Ensure no subp from logs.py import (#5268) + - tests: fix integration tests for ubuntu pro 32.3 release (#5351) + - tests: add oracular's hello package for pkg upgrade test (#5354) + - growpart: Fix behaviour for ZFS datasets (#5169) [Mina Galić] + - device_part_info: do not recurse if we did not match anything (#5169) + [Mina Galić] + - feat(alpine): add support for Busybox adduser/addgroup (#5176) + [dermotbradley] + - ci: Move lint tip and py3-dev jobs to daily (#5347) + - fix(netplan): treat netplan warnings on stderr as debug for cloud-init + (#5348) + - feat(disk_setup): Add support for nvme devices (#5263) + - fix(log): Do not warn when doing requested operation (#5263) + - Support sudoers in the "/usr/usr merge" location (#5161) + [Robert Schweikert] + - doc(nocloud): Document network-config file (#5204) + - fix(netplan): Fix predictable interface rename issue (#5339) + - cleanup: Don't execute code on import (#5295) + - fix(net): Make duplicate route add succeed. (#5343) + - fix(freebsd): correct configuration of IPv6 routes (#5291) [Théo Bertin] + - fix(azure): disable use-dns for secondary nics (#5314) + - chore: fix lint failure (#5320) + - Update pylint version to support python 3.12 (#5338) [Ani Sinha] + - fix(tests): use regex to avoid focal whitespace in jinja debug test + (#5335) + - chore: Add docstrings and types to Version class (#5262) + - ci(mypy): add type-jinja2 stubs (#5337) + - tests(alpine): github trust lxc mounted source dir cloud-init-ro (#5329) + - test: Add oracular release to integration tests (#5328) + - Release 24.1.6 (#5326) + - test: Fix failing test_ec2.py test (#5324) + - fix: Check renderer for netplan-specific code (#5321) + - docs: Removal of top-level --file breaking change (#5308) + - fix: typo correction of delaycompress (#5317) + - docs: Renderers/Activators have downstream overrides (#5322) + - fix(ec2): Ensure metadata exists before configuring PBR (#5287) + - fix(lxd): Properly handle unicode from LXD socket (#5309) + - docs: Prefer "artifact" over "artefact" (#5311) [Arthur Le Maitre] + - chore(doc): migrate cc_byobu to templates + - chore(doc): migrate cc_bootcmd to templates + - chore(doc): migrate apt_pipelining and apk_configure to templates + - tests: in_place mount module-docs into lxd vm/container + - feat(docs): generate rtd module schema from rtd/module-docs + - feat: Set RH ssh key permissions when no 'ssh_keys' group (#5296) + [Ani Sinha] + - test: Avoid circular import in Azure tests (#5280) + - test: Fix test_failing_userdata_modules_exit_codes (#5279) + - chore: Remove CPY check from ruff (#5281) + - chore: Clean up docstrings + - chore(ruff): Bump to version 0.4.3 + - feat(systemd): Improve AlmaLinux OS and CloudLinux OS support (#5265) + [Elkhan Mammadli] + - feat(ca_certs): Add AlmaLinux OS and CloudLinux OS support (#5264) + [Elkhan Mammadli] + - docs: cc_apt_pipelining docstring typo fix (#5273) [Alex Ratner] + - feat(azure): add request identifier to IMDS requests (#5218) + [Ksenija Stanojevic] + - test: Fix TestFTP integration test (#5237) [d1r3ct0r] + - feat(ifconfig): prepare for CIDR output (#5272) [Mina Galić] + - fix: stop manually dropping dhcp6 key in integration test (#5267) + [Alec Warren] + - test: Remove some CiTestCase tests (#5256) + - fix: Warn when signal is handled (#5186) + - fix(snapd): ubuntu do not snap refresh when snap absent (LP: #2064300) + - feat(landscape-client): handle already registered client (#4784) + [Fabian Lichtenegger-Lukas] + - doc: Show how to debug external services blocking cloud-init (#5255) + - fix(pdb): Enable running cloud-init under pdb (#5217) + - chore: Update systemd description (#5250) + - fix(time): Harden cloud-init to system clock changes + - fix: Update analyze timestamp uptime + - fix(schema): no network validation on netplan systems without API + - fix(mount): Don't run cloud-init.service if cloud-init disabled (#5226) + - fix(ntp): Fix AlmaLinux OS and CloudLinux OS support (#5235) + [Elkhan Mammadli] + - tests: force version of cloud-init from PPA regardless of version (#5251) + - ci: Print isort diff (#5242) + - test: Fix integration test dependencies (#5248) + - fix(ec2): Fix broken uuid match with other-endianness (#5236) + - fix(schema): allow networkv2 schema without top-level key (#5239) + [Cat Red] + - fix(cmd): Do not hardcode reboot command (#5208) + - test: Run Alpine tests without network (#5220) + - docs: Add base config reference from explanation (#5241) + - docs: Remove preview from WSL tutorial (#5225) + - chore: Remove broken maas code (#5219) + - feat(WSL): Add support for Ubuntu Pro configs (#5116) [Ash] + - chore: sync ChangeLog and version.py from 24.1.x (#5228) + - bug(package_update): avoid snap refresh in images without snap command + (LP: #2064132) + - ci: Skip package build on tox runs (#5210) + - chore: Fix test skip message + - test(ec2): adopt pycloudlib public ip creation while launching instances + - test(ec2): add ipv6 testing for multi-nic instances + - test(ec2): adopt pycloudlib enable_ipv6 while launching instances + - feat: tool to print diff between netplan and networkv2 schema (#5200) + [Cat Red] + - test: mock internet access in test_upgrade (#5212) + - ci: Add timezone for alpine unit tests (#5216) + - fix: Ensure dump timestamps parsed as UTC (#5214) + - docs: Add WSL tutorial (#5206) + - feature(schema): add networkv2 schema (#4892) [Cat Red] + - Add alpine unittests to ci (#5121) + - test: Fix invalid openstack datasource name (#4905) + - test: Fix MAAS test and mark xfail (#4905) + - chore(ds-identify): Update shellcheck ignores (#4905) + - fix(ds-identify): Prevent various false positives and false negatives + (#4905) + - Use grep for faster parsing of cloud config in ds-identify (#4905) + [Scott Moser] (LP: #2030729) + - tests: validate netplan API YAML instead of strict content (#5195) + - chore(templates): update ubuntu universe wording (#5199) + - Deprecate the users ssh-authorized-keys property (#5162) + [Anders Björklund] + - doc(nocloud): Describe ftp and ftp over tls implementation (#5193) + - feat(net): provide network config to netplan.State for render (#4981) + - docs: Add breaking datasource identification changes (#5171) + - fix(openbsd): Update build-on-openbsd python dependencies (#5172) + [Hyacinthe Cartiaux] + - fix: Add subnet ipv4/ipv6 to network schema (#5191) + - docs: Add deprecated system_info to schema (#5168) + - docs: Add DataSourceNone documentation (#5165) + - test: Skip test if console log is None (#5188) + - fix(dhcp): Enable interactively running cloud-init init --local (#5166) + - test: Update message for netplan apply dbus issue + - test: install software-properties-common if absent during PPA setup + - test: bump pycloudlib to use latest version + - test: Update version of hello package installed on noble + - test: universally ignore netplan apply dbus issue (#5178) + - chore: Remove obsolete nose workaround + - feat: Add support for FTP and FTP over TLS (#4834) + - feat(opennebula): Add support for posix shell + - test: Make analyze tests not depend on GNU date + - test: Eliminate bash dependency from subp tests + - docs: Add breaking changes section to reference docs (#5147) [Cat Red] + - util: add log_level kwarg for logexc() (#5125) [Chris Patterson] + - refactor: Make device info part of distro definition (#5067) + - refactor: Distro-specific growpart code (#5067) + - test(ec2): fix mocking with responses==0.9.0 (focal) (#5163) + - chore(safeyaml): Remove unicode helper for Python2 (#5142) + - Revert "test: fix upgrade dhcp6 on ec2 (#5131)" (#5148) + - refactor(net): Reuse netops code + - refactor(iproute2): Make expressions multi-line for legibility + - feat(freebsd): support freebsd find part by gptid and ufsid (#5122) + [jinkangkang] + - feat: Determining route metric based on NIC name (#5070) [qidong.ld] + - test: Enable profiling in integration tests (#5130) + - dhcp: support configuring static routes for dhclient's unknown-121 + option (#5146) [Chris Patterson] + - feat(azure): parse ProvisionGuestProxyAgent as bool (#5126) + [Ksenija Stanojevic] + - fix(url_helper): fix TCP connection leak on readurl() retries (#5144) + [Chris Patterson] + - test: pytest-ify t/u/sources/test_ec2.py + - Revert "ec2: Do not enable dhcp6 on EC2 (#5104)" (#5145) [Major Hayden] + - fix: Logging sensitive data + - test: Mock ds-identify systemd path (#5119) + - fix(dhcpcd): Make lease parsing more robust (#5129) + - test: fix upgrade dhcp6 on ec2 (#5131) + - net/dhcp: raise InvalidDHCPLeaseFileError on error parsing dhcpcd lease + (#5128) [Chris Patterson] + - fix: Fix runtime file locations for cloud-init (#4820) + - ci: fix linkcheck.yml invalid yaml (#5123) + - net/dhcp: bump dhcpcd timeout to 300s (#5127) [Chris Patterson] + - ec2: Do not enable dhcp6 on EC2 (#5104) [Major Hayden] + - fix: Fall back to cached local ds if no valid ds found (#4997) + [PengpengSun] + - ci: Make linkcheck a scheduled job (#5118) + - net: Warn when interface rename fails + - ephemeral(dhcpcd): Set dhcpcd interface down + - Release 24.1.3 + - chore: Handle all level 1 TiCS security violations (#5103) + - fix: Always use single datasource if specified (#5098) + - fix(tests): Leaked mocks (#5097) + - fix(rhel)!: Fix network boot order in upstream cloud-init + - fix(rhel): Fix network ordering in sysconfig + - feat: Use NetworkManager renderer by default in RHEL family + - fix: Allow caret at the end of apt package (#5099) + - test: Add missing mocks to prevent bleed through (#5082) + [Robert Schweikert] + - fix: Ensure network config in DataSourceOracle can be unpickled (#5073) + - docs: set the home directory using homedir, not home (#5101) + [Olivier Gayot] (LP: #2047796) + - fix(cacerts): Correct configuration customizations for Photon (#5077) + [Christopher McCann] + - fix(test): Mock systemd fs path for non-systemd distros + - fix(tests): Leaked subp.which mock + - fix(networkd): add GatewayOnLink flag when necessary (#4996) [王煎饼] + - Release 24.1.2 + - test: fix `disable_sysfs_net` mock (#5065) + - refactor: don't import subp function directly (#5065) + - test: Remove side effects from tests (#5074) + - refactor: Import log module rather than functions (#5074) + - fix: Fix breaking changes in package install (#5069) + - fix: Undeprecate 'network' in schema route definition (#5072) + - refactor(ec2): simplify convert_ec2_metadata_network_config + - fix(ec2): fix ipv6 policy routing + - fix: document and add 'accept-ra' to network schema (#5060) + - bug(maas): register the correct DatasourceMAASLocal in init-local + (#5068) (LP: #2057763) + - ds-identify: Improve ds-identify testing flexibility (#5047) + - fix(ansible): Add verify_commit and inventory to ansible.pull schema + (#5032) [Fionn Fitzmaurice] + - doc: Explain breaking change in status code (#5049) + - gpg: Handle temp directory containing files (#5063) + - distro(freebsd): add_user: respect homedir (#5061) [Mina Galić] + - doc: Install required dependencies (#5054) + - networkd: Always respect accept-ra if set (#4928) [Phil Sphicas] + - chore: ignore all cloud-init_*.tar.gz in .gitignore (#5059) + - test: Don't assume ordering of ThreadPoolExecutor submissions (#5052) + - feat: Add new distro 'azurelinux' for Microsoft Azure Linux. (#4931) + [Dan Streetman] + - fix(gpg): Make gpg resilient to host configuration changes (#5026) + - Sync 24.1.1 changelog and version + - DS VMware: Fix ipv6 addr converter from netinfo to netifaces (#5029) + [PengpengSun] + - packages/debian: remove dependency on isc-dhcp-client (#5041) + [Chris Patterson] + - test: Allow fake_filesystem to work with TemporaryDirectory (#5035) + - tests: Don't wait for GCE instance teardown (#5037) + - fix: Include DataSourceCloudStack attribute in unpickle test (#5039) + - bug(vmware): initialize new DataSourceVMware attributes at unpickle + (#5021) (LP: #2056439) + - fix(apt): Don't warn on apt 822 source format (#5028) + - fix(atomic_helper.py): ensure presence of parent directories (#4938) + [Shreenidhi Shedi] + - fix: Add "broadcast" to network v1 schema (#5034) (LP: #2056460) + - pro: honor but warn on custom ubuntu_advantage in /etc/cloud/cloud.cfg + (#5030) + - net/dhcp: handle timeouts for dhcpcd (#5022) [Chris Patterson] + - fix: Make wait_for_url respect explicit arguments + - test: Fix scaleway retry assumptions + - fix: Make DataSourceOracle more resilient to early network issues + (#5025) (LP: #2056194) + - chore(cmd-modules): fix exit code when --mode init (#5017) + - feat: pylint: enable W0201 - attribute-defined-outside-init + - refactor: Ensure no attributes defined outside __init__ + - chore: disable attribute-defined-outside-init check in tests + - refactor: Use _unpickle rather than hasattr() in sources + - chore: remove unused vendordata "_pure" variables + - chore(cmd-modules): deprecate --mode init (#5005) + - tests: drop CiTestCase and convert to pytest + - bug(tests): mock reads of host's /sys/class/net via get_sys_class_path + - fix: log correct disabled path in ds-identify (#5016) + - tests: ec2 dont spend > 1 second retrying 19 times when 3 times will do + - tests: openstack mock expected ipv6 IMDS + - bug(wait_for_url): when exceptions occur url is unset, use url_exc + (LP: #2055077) + - feat(run-container): Run from arbitrary commitish (#5015) + - tests: Fix wsl test (#5008) + - feat(ds-identify): Don't run unnecessary systemd-detect-virt (#4633) + - chore(ephemeral): add debug log when bringing up ephemeral network + (#5010) [Alec Warren] + - release: sync changelog and version (#5011) + - Cleanup test_net.py (#4840) + - refactor: remove dependency on netifaces (#4634) [Cat Red] + - feat: make lxc binary configurable (#5000) + - docs: update 404 page for new doc site and bug link + - test(aws): local network connectivity on multi-nics (#4982) + - test: Make integration test output more useful (#4984) + +24.1.7 + - fix(ec2): Correctly identify netplan renderer (#5361) + +24.1.6 + - fix(ec2): Ensure metadata exists before configuring PBR (#5287) + - fix: Check renderer for netplan-specific code (#5321) + - test: Fix failing test_ec2.py test (#5324) + +24.1.5 + - fix(package_update): avoid snap refresh in images without snap command + (LP: #2064132) + +24.1.4 + - fix(dhcpcd): Make lease parsing more robust (#5129) + - net/dhcp: raise InvalidDHCPLeaseFileError on error parsing dhcpcd lease + (#5128) [Chris Patterson] + - fix: Fix runtime file locations for cloud-init (#4820) + - net/dhcp: bump dhcpcd timeout to 300s (#5127) [Chris Patterson] + - net: Warn when interface rename fails + - ephemeral(dhcpcd): Set dhcpcd interface down + - test: Remove side effects from tests (#5074) + - refactor: Import log module rather than functions (#5074) + 24.1.3 - fix: Always use single datasource if specified (#5098) - fix: Allow caret at the end of apt package (#5099) diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index 60a2fd9b2..55d149c44 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -2,7 +2,7 @@ import calendar import sys -from datetime import datetime +from datetime import datetime, timezone from cloudinit import atomic_helper, subp, util @@ -36,13 +36,16 @@ def parse_timestamp(timestampstr): if "." in timestampstr: FMT = CLOUD_INIT_JOURNALCTL_FMT dt = datetime.strptime( - timestampstr + " " + str(datetime.now().year), FMT - ) - timestamp = dt.strftime("%s.%f") + timestampstr + " " + str(datetime.now().year), + FMT, + ).replace(tzinfo=timezone.utc) + timestamp = dt.timestamp() elif "," in timestampstr: # 2016-09-12 14:39:20,839 - dt = datetime.strptime(timestampstr, CLOUD_INIT_ASCTIME_FMT) - timestamp = dt.strftime("%s.%f") + dt = datetime.strptime(timestampstr, CLOUD_INIT_ASCTIME_FMT).replace( + tzinfo=timezone.utc + ) + timestamp = dt.timestamp() else: # allow GNU date(1) to handle other formats we don't expect # This may throw a ValueError if no GNU date can be found @@ -51,18 +54,27 @@ def parse_timestamp(timestampstr): return float(timestamp) +def has_gnu_date(): + """GNU date includes a string containing the word GNU in it in + help output. Posix date does not. Use this to indicate on Linux + systems without GNU date that the extended parsing is not + available. + """ + return "GNU" in subp.subp(["date", "--help"]).stdout + + def parse_timestamp_from_date(timestampstr): - cmd = "date" - if not util.is_Linux(): - if subp.which("gdate"): - cmd = "gdate" - else: - raise ValueError( - f"Unable to parse timestamp without GNU date: [{timestampstr}]" - ) - out, _ = subp.subp([cmd, "+%s.%3N", "-d", timestampstr]) - timestamp = out.strip() - return float(timestamp) + if not util.is_Linux() and subp.which("gdate"): + date = "gdate" + elif has_gnu_date(): + date = "date" + else: + raise ValueError( + f"Unable to parse timestamp without GNU date: [{timestampstr}]" + ) + return float( + subp.subp([date, "-u", "+%s.%3N", "-d", timestampstr]).stdout.strip() + ) def parse_ci_logline(line): diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index 41219709f..b3814c646 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -6,7 +6,6 @@ import datetime import json -import os import sys import time @@ -46,9 +45,6 @@ "%T": "total_time", } -formatting_help = " ".join( - ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()] -) SUCCESS_CODE = "successful" FAIL_CODE = "failure" CONTAINER_CODE = "container" @@ -243,19 +239,6 @@ def gather_timestamps_using_systemd(): # lxc based containers do not set their monotonic zero point to be when # the container starts, instead keep using host boot as zero point if util.is_container(): - # clock.monotonic also uses host boot as zero point - base_time = float(time.time()) - float(time.monotonic()) - # TODO: lxcfs automatically truncates /proc/uptime to seconds - # in containers when https://github.com/lxc/lxcfs/issues/292 - # is fixed, util.uptime() should be used instead of stat on - try: - file_stat = os.stat("/proc/1/cmdline") - kernel_start = file_stat.st_atime - except OSError as err: - raise RuntimeError( - "Could not determine container boot " - "time from /proc/1/cmdline. ({})".format(err) - ) from err status = CONTAINER_CODE kernel_end = base_time + delta_k_end cloudinit_sysd = base_time + delta_ci_s @@ -271,21 +254,15 @@ def gather_timestamps_using_systemd(): def generate_records( events, - blame_sort=False, print_format="(%n) %d seconds in %I%D", - dump_files=False, - log_datafiles=False, ): """ Take in raw events and create parent-child dependencies between events in order to order events in chronological order. :param events: JSONs from dump that represents events taken from logs - :param blame_sort: whether to sort by timestamp or by time taken. :param print_format: formatting to represent event, time stamp, and time taken by the event in one line - :param dump_files: whether to dump files into JSONs - :param log_datafiles: whether or not to log events generated :return: boot records ordered chronologically """ diff --git a/cloudinit/apport.py b/cloudinit/apport.py index f7d83e2b3..d52b79ab5 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -68,10 +68,6 @@ "Other", ] -# Potentially clear text collected logs -CLOUDINIT_LOG = "/var/log/cloud-init.log" -CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log" - def _get_user_data_file() -> str: paths = read_cfg_paths() diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index f309d26e2..8d355b3c2 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -7,6 +7,8 @@ import tempfile from base64 import b64decode, b64encode +from cloudinit import util + _DEF_PERMS = 0o644 LOG = logging.getLogger(__name__) @@ -43,9 +45,9 @@ def write_file( tf = None try: - tf = tempfile.NamedTemporaryFile( - dir=os.path.dirname(filename), delete=False, mode=omode - ) + dirname = os.path.dirname(filename) + util.ensure_dir(dirname) + tf = tempfile.NamedTemporaryFile(dir=dirname, delete=False, mode=omode) LOG.debug( "Atomically writing to file %s (via temporary file %s) - %s: [%o]" " %d bytes/chars", diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 5666a4eab..d852ab711 100755 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -"""Define 'clean' utility and handler as part of cloud-init commandline.""" +"""Define 'clean' utility and handler as part of cloud-init command line.""" import argparse import glob @@ -106,9 +106,10 @@ def get_parser(parser=None): return parser -def remove_artifacts(remove_logs, remove_seed=False, remove_config=None): +def remove_artifacts(init, remove_logs, remove_seed=False, remove_config=None): """Helper which removes artifacts dir and optionally log files. + @param: init: Init object to use @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False preserves them. @param: remove_seed: Boolean. Set True to also delete seed subdir in @@ -117,7 +118,6 @@ def remove_artifacts(remove_logs, remove_seed=False, remove_config=None): Can be any of: all, network, ssh_config. @returns: 0 on success, 1 otherwise. """ - init = Init(ds_deps=[]) init.read_cfg() if remove_logs: for log_file in get_config_logfiles(init.cfg): @@ -158,8 +158,9 @@ def remove_artifacts(remove_logs, remove_seed=False, remove_config=None): def handle_clean_args(name, args): """Handle calls to 'cloud-init clean' as a subcommand.""" + init = Init(ds_deps=[]) exit_code = remove_artifacts( - args.remove_logs, args.remove_seed, args.remove_config + init, args.remove_logs, args.remove_seed, args.remove_config ) if args.machine_id: if uses_systemd(): @@ -169,7 +170,9 @@ def handle_clean_args(name, args): # Non-systemd like FreeBSD regen machine-id when file is absent del_file(ETC_MACHINE_ID) if exit_code == 0 and args.reboot: - cmd = ["shutdown", "-r", "now"] + cmd = init.distro.shutdown_command( + mode="reboot", delay="now", message=None + ) try: subp(cmd, capture=False) except ProcessExecutionError as e: diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py index cd218bce1..9c50ddfa1 100644 --- a/cloudinit/cmd/devel/__init__.py +++ b/cloudinit/cmd/devel/__init__.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -"""Common cloud-init devel commandline utility functions.""" +"""Common cloud-init devel command line utility functions.""" from cloudinit.helpers import Paths from cloudinit.stages import Init diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index 83f574c10..a1e4eb8df 100755 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -11,12 +11,11 @@ import shutil import subprocess import sys -from datetime import datetime +from datetime import datetime, timezone from pathlib import Path -from typing import NamedTuple +from typing import NamedTuple, Optional from cloudinit.cmd.devel import read_cfg_paths -from cloudinit.helpers import Paths from cloudinit.stages import Init from cloudinit.subp import ProcessExecutionError, subp from cloudinit.temp_utils import tempdir @@ -28,7 +27,23 @@ write_file, ) -CLOUDINIT_RUN_DIR = "/run/cloud-init" + +class LogPaths(NamedTuple): + userdata_raw: str + cloud_data: str + run_dir: str + instance_data_sensitive: str + + +def get_log_paths(init: Optional[Init] = None) -> LogPaths: + """Return a Paths object based on the system configuration on disk.""" + paths = init.paths if init else read_cfg_paths() + return LogPaths( + userdata_raw=paths.get_ipath_cur("userdata_raw"), + cloud_data=paths.get_cpath("data"), + run_dir=paths.run_dir, + instance_data_sensitive=paths.lookups["instance_data_sensitive"], + ) class ApportFile(NamedTuple): @@ -81,16 +96,6 @@ class ApportFile(NamedTuple): ] -def _get_user_data_file() -> str: - paths = read_cfg_paths() - return paths.get_ipath_cur("userdata_raw") - - -def _get_cloud_data_path() -> str: - paths = read_cfg_paths() - return paths.get_cpath("data") - - def get_parser(parser=None): """Build or extend and arg parser for collect-logs utility. @@ -122,7 +127,6 @@ def get_parser(parser=None): " Default: cloud-init.tar.gz" ), ) - user_data_file = _get_user_data_file() parser.add_argument( "--include-userdata", "-u", @@ -131,20 +135,20 @@ def get_parser(parser=None): dest="userdata", help=( "Optionally include user-data from {0} which could contain" - " sensitive information.".format(user_data_file) + " sensitive information.".format(get_log_paths().userdata_raw) ), ) return parser -def _copytree_rundir_ignore_files(curdir, files): +def _get_copytree_ignore_files(paths: LogPaths): """Return a list of files to ignore for /run/cloud-init directory""" ignored_files = [ "hook-hotplug-cmd", # named pipe for hotplug ] if os.getuid() != 0: # Ignore root-permissioned files - ignored_files.append(Paths({}).lookups["instance_data_sensitive"]) + ignored_files.append(paths.instance_data_sensitive) return ignored_files @@ -167,7 +171,7 @@ def _stream_command_output_to_file(cmd, filename, msg, verbosity): ensure_dir(os.path.dirname(filename)) try: with open(filename, "w") as f: - subprocess.call(cmd, stdout=f, stderr=f) + subprocess.call(cmd, stdout=f, stderr=f) # nosec B603 except OSError as e: write_file(filename, str(e)) _debug("collecting %s failed.\n" % msg, 1, verbosity) @@ -188,7 +192,9 @@ def _collect_file(path, out_dir, verbosity): _debug("file %s did not exist\n" % path, 2, verbosity) -def collect_installer_logs(log_dir, include_userdata, verbosity): +def _collect_installer_logs( + log_dir: str, include_userdata: bool, verbosity: int +): """Obtain subiquity logs and config files.""" for src_file in INSTALLER_APPORT_FILES: destination_dir = Path(log_dir + src_file.path).parent @@ -203,11 +209,91 @@ def collect_installer_logs(log_dir, include_userdata, verbosity): _collect_file(src_file.path, str(destination_dir), verbosity) -def collect_logs(tarfile, include_userdata: bool, verbosity=0): +def _collect_version_info(log_dir: str, verbosity: int): + version = _write_command_output_to_file( + cmd=["cloud-init", "--version"], + filename=os.path.join(log_dir, "version"), + msg="cloud-init --version", + verbosity=verbosity, + ) + dpkg_ver = _write_command_output_to_file( + cmd=["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], + filename=os.path.join(log_dir, "dpkg-version"), + msg="dpkg version", + verbosity=verbosity, + ) + if not version: + version = dpkg_ver if dpkg_ver else "not-available" + _debug("collected cloud-init version: %s\n" % version, 1, verbosity) + + +def _collect_system_logs(log_dir: str, verbosity: int): + _stream_command_output_to_file( + cmd=["dmesg"], + filename=os.path.join(log_dir, "dmesg.txt"), + msg="dmesg output", + verbosity=verbosity, + ) + _stream_command_output_to_file( + cmd=["journalctl", "--boot=0", "-o", "short-precise"], + filename=os.path.join(log_dir, "journal.txt"), + msg="systemd journal of current boot", + verbosity=verbosity, + ) + + +def _collect_cloudinit_logs( + log_dir: str, + verbosity: int, + init: Init, + paths: LogPaths, + include_userdata: bool, +): + for log in get_config_logfiles(init.cfg): + _collect_file(log, log_dir, verbosity) + if include_userdata: + user_data_file = paths.userdata_raw + _collect_file(user_data_file, log_dir, verbosity) + + +def _collect_run_dir(log_dir: str, verbosity: int, paths: LogPaths): + run_dir = os.path.join(log_dir, "run") + ensure_dir(run_dir) + if os.path.exists(paths.run_dir): + try: + shutil.copytree( + paths.run_dir, + os.path.join(run_dir, "cloud-init"), + ignore=lambda _, __: _get_copytree_ignore_files(paths), + ) + except shutil.Error as e: + sys.stderr.write("Failed collecting file(s) due to error:\n") + sys.stderr.write(str(e) + "\n") + _debug("collected dir %s\n" % paths.run_dir, 1, verbosity) + else: + _debug( + "directory '%s' did not exist\n" % paths.run_dir, + 1, + verbosity, + ) + if os.path.exists(os.path.join(paths.run_dir, "disabled")): + # Fallback to grab previous cloud/data + cloud_data_dir = Path(paths.cloud_data) + if cloud_data_dir.exists(): + shutil.copytree( + str(cloud_data_dir), + Path(log_dir + str(cloud_data_dir)), + ) + + +def collect_logs( + tarfile: str, include_userdata: bool, verbosity: int = 0 +) -> int: """Collect all cloud-init logs and tar them up into the provided tarfile. @param tarfile: The path of the tar-gzipped file to create. @param include_userdata: Boolean, true means include user-data. + @return: 0 on success, 1 on failure. """ if include_userdata and os.getuid() != 0: sys.stderr.write( @@ -216,84 +302,36 @@ def collect_logs(tarfile, include_userdata: bool, verbosity=0): ) return 1 - init = Init(ds_deps=[]) tarfile = os.path.abspath(tarfile) - log_dir = datetime.utcnow().date().strftime("cloud-init-logs-%Y-%m-%d") + log_dir = ( + datetime.now(timezone.utc).date().strftime("cloud-init-logs-%Y-%m-%d") + ) with tempdir(dir="/tmp") as tmp_dir: log_dir = os.path.join(tmp_dir, log_dir) - version = _write_command_output_to_file( - cmd=["cloud-init", "--version"], - filename=os.path.join(log_dir, "version"), - msg="cloud-init --version", - verbosity=verbosity, - ) - dpkg_ver = _write_command_output_to_file( - cmd=["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], - filename=os.path.join(log_dir, "dpkg-version"), - msg="dpkg version", - verbosity=verbosity, - ) - if not version: - version = dpkg_ver if dpkg_ver else "not-available" - print("version: ", version) - _debug("collected cloud-init version: %s\n" % version, 1, verbosity) - _stream_command_output_to_file( - cmd=["dmesg"], - filename=os.path.join(log_dir, "dmesg.txt"), - msg="dmesg output", - verbosity=verbosity, - ) - _stream_command_output_to_file( - cmd=["journalctl", "--boot=0", "-o", "short-precise"], - filename=os.path.join(log_dir, "journal.txt"), - msg="systemd journal of current boot", - verbosity=verbosity, - ) - + init = Init(ds_deps=[]) init.read_cfg() - for log in get_config_logfiles(init.cfg): - _collect_file(log, log_dir, verbosity) - if include_userdata: - user_data_file = _get_user_data_file() - _collect_file(user_data_file, log_dir, verbosity) - collect_installer_logs(log_dir, include_userdata, verbosity) - - run_dir = os.path.join(log_dir, "run") - ensure_dir(run_dir) - if os.path.exists(CLOUDINIT_RUN_DIR): - try: - shutil.copytree( - CLOUDINIT_RUN_DIR, - os.path.join(run_dir, "cloud-init"), - ignore=_copytree_rundir_ignore_files, - ) - except shutil.Error as e: - sys.stderr.write("Failed collecting file(s) due to error:\n") - sys.stderr.write(str(e) + "\n") - _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) - else: - _debug( - "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, - 1, - verbosity, - ) - if os.path.exists(os.path.join(CLOUDINIT_RUN_DIR, "disabled")): - # Fallback to grab previous cloud/data - cloud_data_dir = Path(_get_cloud_data_path()) - if cloud_data_dir.exists(): - shutil.copytree( - str(cloud_data_dir), - Path(log_dir + str(cloud_data_dir)), - ) + paths = get_log_paths(init) + + _collect_version_info(log_dir, verbosity) + _collect_system_logs(log_dir, verbosity) + _collect_cloudinit_logs( + log_dir, verbosity, init, paths, include_userdata + ) + _collect_installer_logs(log_dir, include_userdata, verbosity) + _collect_run_dir(log_dir, verbosity, paths) with chdir(tmp_dir): - subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")]) + subp(["tar", "czvf", tarfile, log_dir.replace(f"{tmp_dir}/", "")]) sys.stderr.write("Wrote %s\n" % tarfile) return 0 def handle_collect_logs_args(name, args): """Handle calls to 'cloud-init collect-logs' as a subcommand.""" - return collect_logs(args.tarfile, args.userdata, args.verbosity) + return collect_logs( + tarfile=args.tarfile, + include_userdata=args.userdata, + verbosity=args.verbosity, + ) def main(): diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 8fd7c6460..4b7c27ae4 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -115,7 +115,7 @@ def handle_args(name, args): if args.kind == "eni": pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": - pre_ns = safeyaml.load(net_data) + pre_ns = yaml.safe_load(net_data) if "network" in pre_ns: pre_ns = pre_ns.get("network") if args.debug: diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 99c24e1de..152b77681 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -18,6 +18,7 @@ ) NAME = "render" +CLOUDINIT_RUN_DIR = read_cfg_paths().run_dir LOG = logging.getLogger(__name__) @@ -40,8 +41,8 @@ def get_parser(parser=None): "--instance-data", type=str, help=( - "Optional path to instance-data.json file. Defaults to" - " /run/cloud-init/instance-data.json" + "Optional path to instance-data.json file. " + f"Defaults to {CLOUDINIT_RUN_DIR}" ), ) parser.add_argument( diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index f72ff8a8b..eff87b4f2 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -16,10 +16,10 @@ import json import os import sys -import time import traceback import logging -from typing import Tuple +import yaml +from typing import Tuple, Callable from cloudinit import netinfo from cloudinit import signal_handler @@ -35,16 +35,8 @@ from cloudinit.config import cc_set_hostname from cloudinit.config.modules import Modules from cloudinit.config.schema import validate_cloudconfig_schema -from cloudinit.log import ( - LogExporter, - setup_basic_logging, - setup_logging, - reset_logging, - configure_root_logger, - DEPRECATED, -) +from cloudinit import log from cloudinit.reporting import events -from cloudinit.safeyaml import load from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG # Welcome message template @@ -64,6 +56,14 @@ "once": PER_ONCE, } +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +STAGE_NAME = { + "init-local": "Local Stage", + "init": "Network Stage", + "modules-config": "Config Stage", + "modules-final": "Final Stage", +} + LOG = logging.getLogger(__name__) @@ -102,6 +102,20 @@ def welcome_format(action): ) +def close_stdin(logger: Callable[[str], None] = LOG.debug): + """ + reopen stdin as /dev/null to ensure no side effects + + logger: a function for logging messages + """ + if not os.isatty(sys.stdin.fileno()): + logger("Closing stdin") + with open(os.devnull) as fp: + os.dup2(fp.fileno(), sys.stdin.fileno()) + else: + logger("Not closing stdin, stdin is a tty.") + + def extract_fns(args): # Files are already opened so lets just pass that along # since it would of broke if it couldn't have @@ -227,17 +241,12 @@ def attempt_cmdline_url(path, network=True, cmdline=None) -> Tuple[int, str]: is_cloud_cfg = False if is_cloud_cfg: if cmdline_name == "url": - return ( - DEPRECATED, - str( - util.deprecate( - deprecated="The kernel command line key `url`", - deprecated_version="22.3", - extra_message=" Please use `cloud-config-url` " - "kernel command line parameter instead", - return_log=True, - ), - ), + return util.deprecate( + deprecated="The kernel command line key `url`", + deprecated_version="22.3", + extra_message=" Please use `cloud-config-url` " + "kernel command line parameter instead", + skip_log=True, ) else: if cmdline_name == "cloud-config-url": @@ -340,9 +349,8 @@ def main_init(name, args): outfmt = None errfmt = None try: - early_logs.append((logging.DEBUG, "Closing stdin.")) - util.close_stdin() - (outfmt, errfmt) = util.fixup_output(init.cfg, name) + close_stdin(lambda msg: early_logs.append((logging.DEBUG, msg))) + outfmt, errfmt = util.fixup_output(init.cfg, name) except Exception: msg = "Failed to setup output redirection!" util.logexc(LOG, msg) @@ -353,8 +361,8 @@ def main_init(name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(init.cfg) + log.reset_logging() + log.setup_logging(init.cfg) apply_reporting_cfg(init.cfg) # Any log usage prior to setup_logging above did not have local user log @@ -493,7 +501,7 @@ def main_init(name, args): cloud_cfg_path = init.paths.get_ipath_cur("cloud_config") if os.path.exists(cloud_cfg_path) and os.stat(cloud_cfg_path).st_size != 0: validate_cloudconfig_schema( - config=load(util.load_text_file(cloud_cfg_path)), + config=yaml.safe_load(util.load_text_file(cloud_cfg_path)), strict=False, log_details=False, log_deprecations=True, @@ -515,7 +523,7 @@ def main_init(name, args): (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") - setup_logging(mods.cfg) + log.setup_logging(mods.cfg) # give the activated datasource a chance to adjust init.activate_datasource() @@ -610,8 +618,7 @@ def main_modules(action_name, args): mods = Modules(init, extract_fns(args), reporter=args.reporter) # Stage 4 try: - LOG.debug("Closing stdin") - util.close_stdin() + close_stdin() util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to setup output redirection!") @@ -620,13 +627,20 @@ def main_modules(action_name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(mods.cfg) + log.reset_logging() + log.setup_logging(mods.cfg) apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome welcome(name, msg=w_msg) + if name == "init": + util.deprecate( + deprecated="`--mode init`", + deprecated_version="24.1", + extra_message="Use `cloud-init init` instead.", + ) + # Stage 5 return run_module_section(mods, name, name) @@ -672,8 +686,7 @@ def main_single(name, args): mod_freq = FREQ_SHORT_NAMES.get(mod_freq) # Stage 4 try: - LOG.debug("Closing stdin") - util.close_stdin() + close_stdin() util.fixup_output(mods.cfg, None) except Exception: util.logexc(LOG, "Failed to setup output redirection!") @@ -682,8 +695,8 @@ def main_single(name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(mods.cfg) + log.reset_logging() + log.setup_logging(mods.cfg) apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome @@ -702,12 +715,10 @@ def main_single(name, args): return 0 -def status_wrapper(name, args, data_d=None, link_d=None): - if data_d is None: - paths = read_cfg_paths() - data_d = paths.get_cpath("data") - if link_d is None: - link_d = os.path.normpath("/run/cloud-init") +def status_wrapper(name, args): + paths = read_cfg_paths() + data_d = paths.get_cpath("data") + link_d = os.path.normpath(paths.run_dir) status_path = os.path.join(data_d, "status.json") status_link = os.path.join(link_d, "status.json") @@ -734,18 +745,26 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ( - "init", - "init-local", - "modules-config", - "modules-final", - ) - if mode not in modes: + if mode not in STAGE_NAME: raise ValueError( "Invalid cloud init mode specified '{0}'".format(mode) ) - status = None + nullstatus = { + "errors": [], + "recoverable_errors": {}, + "start": None, + "finished": None, + } + status = { + "v1": { + "datasource": None, + "init": nullstatus.copy(), + "init-local": nullstatus.copy(), + "modules-config": nullstatus.copy(), + "modules-final": nullstatus.copy(), + } + } if mode == "init-local": for f in (status_link, result_link, status_path, result_path): util.del_file(f) @@ -755,25 +774,22 @@ def status_wrapper(name, args, data_d=None, link_d=None): except Exception: pass - nullstatus = { - "errors": [], - "start": None, - "finished": None, - } - - if status is None: - status = {"v1": {}} - status["v1"]["datasource"] = None - - for m in modes: - if m not in status["v1"]: - status["v1"][m] = nullstatus.copy() + if mode not in status["v1"]: + # this should never happen, but leave it just to be safe + status["v1"][mode] = nullstatus.copy() v1 = status["v1"] v1["stage"] = mode - v1[mode]["start"] = time.time() - v1[mode]["recoverable_errors"] = next( - filter(lambda h: isinstance(h, LogExporter), root_logger.handlers) + if v1[mode]["start"] and not v1[mode]["finished"]: + # This stage was restarted, which isn't expected. + LOG.warning( + "Unexpected start time found for %s. Was this stage restarted?", + STAGE_NAME[mode], + ) + + v1[mode]["start"] = float(util.uptime()) + preexisting_recoverable_errors = next( + filter(lambda h: isinstance(h, log.LogExporter), root_logger.handlers) ).export_logs() # Write status.json prior to running init / module code @@ -791,27 +807,56 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: errors = ret - v1[mode]["errors"] = [str(e) for e in errors] - + v1[mode]["errors"].extend([str(e) for e in errors]) except Exception as e: - util.logexc(LOG, "failed stage %s", mode) + LOG.exception("failed stage %s", mode) print_exc("failed run of stage %s" % mode) - v1[mode]["errors"] = [str(e)] - - v1[mode]["finished"] = time.time() - v1["stage"] = None + v1[mode]["errors"].append(str(e)) + except SystemExit as e: + # All calls to sys.exit() resume running here. + # silence a pylint false positive + # https://github.com/pylint-dev/pylint/issues/9556 + if e.code: # pylint: disable=using-constant-test + # Only log errors when sys.exit() is called with a non-zero + # exit code + LOG.exception("failed stage %s", mode) + print_exc("failed run of stage %s" % mode) + v1[mode]["errors"].append(f"sys.exit({str(e.code)}) called") + finally: + # Before it exits, cloud-init will: + # 1) Write status.json (and result.json if in Final stage). + # 2) Write the final log message containing module run time. + # 3) Flush any queued reporting event handlers. + v1[mode]["finished"] = float(util.uptime()) + v1["stage"] = None + + # merge new recoverable errors into existing recoverable error list + new_recoverable_errors = next( + filter( + lambda h: isinstance(h, log.LogExporter), root_logger.handlers + ) + ).export_logs() + for key in new_recoverable_errors.keys(): + if key in preexisting_recoverable_errors: + v1[mode]["recoverable_errors"][key] = list( + set( + preexisting_recoverable_errors[key] + + new_recoverable_errors[key] + ) + ) + else: + v1[mode]["recoverable_errors"][key] = new_recoverable_errors[ + key + ] - # Write status.json after running init / module code - v1[mode]["recoverable_errors"] = next( - filter(lambda h: isinstance(h, LogExporter), root_logger.handlers) - ).export_logs() - atomic_helper.write_json(status_path, status) + # Write status.json after running init / module code + atomic_helper.write_json(status_path, status) if mode == "modules-final": # write the 'finished' file errors = [] - for m in modes: - if v1[m]["errors"]: + for m in v1.keys(): + if isinstance(v1[m], dict) and v1[m].get("errors"): errors.extend(v1[m].get("errors", [])) atomic_helper.write_json( @@ -861,7 +906,7 @@ def main_features(name, args): def main(sysv_args=None): - configure_root_logger() + log.configure_root_logger() if not sysv_args: sysv_args = sys.argv parser = argparse.ArgumentParser(prog=sysv_args.pop(0)) @@ -934,11 +979,20 @@ def main(sysv_args=None): parser_mod = subparsers.add_parser( "modules", help="Activate modules using a given configuration key." ) + extra_help = util.deprecate( + deprecated="`init`", + deprecated_version="24.1", + extra_message="Use `cloud-init init` instead.", + skip_log=True, + ).message parser_mod.add_argument( "--mode", "-m", action="store", - help="Module configuration name to use (default: %(default)s).", + help=( + f"Module configuration name to use (default: %(default)s)." + f" {extra_help}" + ), default="config", choices=("init", "config", "final"), ) @@ -1048,7 +1102,7 @@ def main(sysv_args=None): handle_collect_logs_args, ) - logs_parser(parser_collect_logs) + logs_parser(parser=parser_collect_logs) parser_collect_logs.set_defaults( action=("collect-logs", handle_collect_logs_args) ) @@ -1096,9 +1150,11 @@ def main(sysv_args=None): # - if --debug is passed, logging.DEBUG # - if --debug is not passed, logging.WARNING if name not in ("init", "modules"): - setup_basic_logging(logging.DEBUG if args.debug else logging.WARNING) + log.setup_basic_logging( + logging.DEBUG if args.debug else logging.WARNING + ) elif args.debug: - setup_basic_logging() + log.setup_basic_logging() # Setup signal handlers before running signal_handler.attach_handlers() @@ -1148,6 +1204,12 @@ def main(sysv_args=None): args=(name, args), ) reporting.flush_events() + + # handle return code for main_modules, as it is not wrapped by + # status_wrapped when mode == init + if "modules" == name and "init" == args.mode: + retval = len(retval) + return retval diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index cf85003bb..283c6069a 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -131,7 +131,7 @@ def load_userdata(ud_file_path): @returns: String of uncompressed userdata if possible, otherwise bytes. """ - bdata = util.load_binary_file(ud_file_path) + bdata = util.load_binary_file(ud_file_path, quiet=True) try: return bdata.decode("utf-8") except UnicodeDecodeError: @@ -281,7 +281,7 @@ def handle_args(name, args): try: rendered_payload = render_jinja_payload( payload=payload, - payload_fn="query commandline", + payload_fn="query command line", instance_data=instance_data, debug=True if args.debug else False, ) diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index f6dfb60eb..b8c1b7e82 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -"""Define 'status' utility and handler as part of cloud-init commandline.""" +"""Define 'status' utility and handler as part of cloud-init command line.""" import argparse import enum @@ -48,11 +48,11 @@ class EnabledStatus(enum.Enum): """Enum representing user-visible cloud-init boot status codes.""" DISABLED_BY_GENERATOR = "disabled-by-generator" - DISABLED_BY_KERNEL_CMDLINE = "disabled-by-kernel-cmdline" + DISABLED_BY_KERNEL_CMDLINE = "disabled-by-kernel-command-line" DISABLED_BY_MARKER_FILE = "disabled-by-marker-file" DISABLED_BY_ENV_VARIABLE = "disabled-by-environment-variable" ENABLED_BY_GENERATOR = "enabled-by-generator" - ENABLED_BY_KERNEL_CMDLINE = "enabled-by-kernel-cmdline" + ENABLED_BY_KERNEL_CMDLINE = "enabled-by-kernel-command-line" ENABLED_BY_SYSVINIT = "enabled-by-sysvinit" UNKNOWN = "unknown" diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index e56702572..01da83fcf 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -1 +1,3 @@ Config = dict +Netv1 = dict +Netv2 = dict diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py index 659420aff..fce8ae3b4 100644 --- a/cloudinit/config/cc_ansible.py +++ b/cloudinit/config/cc_ansible.py @@ -6,61 +6,23 @@ import sys import sysconfig from copy import deepcopy -from textwrap import dedent from typing import Optional +from cloudinit import subp from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, Distro from cloudinit.settings import PER_INSTANCE -from cloudinit.subp import subp, which from cloudinit.util import Version, get_cfg_by_path meta: MetaSchema = { "id": "cc_ansible", - "name": "Ansible", - "title": "Configure ansible for instance", "frequency": PER_INSTANCE, "distros": [ALL_DISTROS], "activate_by_schema_keys": ["ansible"], - "description": dedent( - """\ - This module provides ``ansible`` integration for - augmenting cloud-init's configuration of the local - node. +} # type: ignore - - This module installs ansible during boot and - then uses ``ansible-pull`` to run the playbook - repository at the remote URL. - """ - ), - "examples": [ - dedent( - """\ - ansible: - package_name: ansible-core - install_method: distro - pull: - url: "https://github.com/holmanb/vmboot.git" - playbook_name: ubuntu.yml - """ - ), - dedent( - """\ - ansible: - package_name: ansible-core - install_method: pip - pull: - url: "https://github.com/holmanb/vmboot.git" - playbook_name: ubuntu.yml - """ - ), - ], -} - -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) CFG_OVERRIDE = "ansible_config" @@ -100,7 +62,7 @@ def do_as(self, command: list, **kwargs): return self.distro.do_as(command, self.run_user, **kwargs) def subp(self, command, **kwargs): - return subp(command, update_env=self.env, **kwargs) + return subp.subp(command, update_env=self.env, **kwargs) @abc.abstractmethod def is_installed(self): @@ -165,7 +127,7 @@ def install(self, pkg_name: str): self.distro.install_packages([pkg_name]) def is_installed(self) -> bool: - return bool(which("ansible")) + return bool(subp.which("ansible")) def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index d3cfd091c..fcfbe0569 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -7,12 +7,11 @@ """Apk Configure: Configures apk repositories file.""" import logging -from textwrap import dedent from cloudinit import temp_utils, templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -52,60 +51,12 @@ """ - -frequency = PER_INSTANCE -distros = ["alpine"] meta: MetaSchema = { "id": "cc_apk_configure", - "name": "APK Configure", - "title": "Configure apk repositories file", - "description": dedent( - """\ - This module handles configuration of the /etc/apk/repositories file. - - .. note:: - To ensure that apk configuration is valid yaml, any strings - containing special characters, especially ``:`` should be quoted. - """ - ), - "distros": distros, - "examples": [ - dedent( - """\ - # Keep the existing /etc/apk/repositories file unaltered. - apk_repos: - preserve_repositories: true - """ - ), - dedent( - """\ - # Create repositories file for Alpine v3.12 main and community - # using default mirror site. - apk_repos: - alpine_repo: - community_enabled: true - version: 'v3.12' - """ - ), - dedent( - """\ - # Create repositories file for Alpine Edge main, community, and - # testing using a specified mirror site and also a local repo. - apk_repos: - alpine_repo: - base_url: 'https://some-alpine-mirror/alpine' - community_enabled: true - testing_enabled: true - version: 'edge' - local_repo_base_url: 'https://my-local-server/local-alpine' - """ - ), - ], - "frequency": frequency, + "distros": ["alpine"], + "frequency": PER_INSTANCE, "activate_by_schema_keys": ["apk_repos"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index bec83ffa8..b79b6483b 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -14,14 +14,14 @@ import pathlib import re import shutil -import signal -from textwrap import dedent, indent +from textwrap import indent from typing import Dict, Iterable, List, Mapping -from cloudinit import features, gpg, subp, templater, util +from cloudinit import features, subp, templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema +from cloudinit.gpg import GPG from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -34,9 +34,6 @@ CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/" DISABLE_SUITES_REDACT_PREFIX = "# cloud-init disable_suites redacted: " -frequency = PER_INSTANCE -distros = ["ubuntu", "debian"] - PACKAGE_DEPENDENCY_BY_COMMAND: Mapping[str, str] = { "add-apt-repository": "software-properties-common", "gpg": "gnupg", @@ -44,122 +41,10 @@ meta: MetaSchema = { "id": "cc_apt_configure", - "name": "Apt Configure", - "title": "Configure apt for the user", - "description": dedent( - """\ - This module handles both configuration of apt options and adding - source lists. There are configuration options such as - ``apt_get_wrapper`` and ``apt_get_command`` that control how - cloud-init invokes apt-get. These configuration options are - handled on a per-distro basis, so consult documentation for - cloud-init's distro support for instructions on using - these config options. - - By default, cloud-init will generate default - apt sources information in deb822 format at - :file:`/etc/apt/sources.list.d/.sources`. When the value - of `sources_list` does not appear to be deb822 format, or stable - distribution releases disable deb822 format, - :file:`/etc/apt/sources.list` will be written instead. - - .. note:: - To ensure that apt configuration is valid yaml, any strings - containing special characters, especially ``:`` should be quoted. - - .. note:: - For more information about apt configuration, see the - ``Additional apt configuration`` example.""" - ), - "distros": distros, - "examples": [ - dedent( - """\ - apt: - preserve_sources_list: false - disable_suites: - - $RELEASE-updates - - backports - - $RELEASE - - mysuite - primary: - - arches: - - amd64 - - i386 - - default - uri: 'http://us.archive.ubuntu.com/ubuntu' - search: - - 'http://cool.but-sometimes-unreachable.com/ubuntu' - - 'http://us.archive.ubuntu.com/ubuntu' - search_dns: false - - arches: - - s390x - - arm64 - uri: 'http://archive-to-use-for-arm64.example.com/ubuntu' - - security: - - arches: - - default - search_dns: true - sources_list: | - deb $MIRROR $RELEASE main restricted - deb-src $MIRROR $RELEASE main restricted - deb $PRIMARY $RELEASE universe restricted - deb $SECURITY $RELEASE-security multiverse - debconf_selections: - set1: the-package the-package/some-flag boolean true - conf: | - APT { - Get { - Assume-Yes 'true'; - Fix-Broken 'true'; - } - } - proxy: 'http://[[user][:pass]@]host[:port]/' - http_proxy: 'http://[[user][:pass]@]host[:port]/' - ftp_proxy: 'ftp://[[user][:pass]@]host[:port]/' - https_proxy: 'https://[[user][:pass]@]host[:port]/' - sources: - source1: - keyid: 'keyid' - keyserver: 'keyserverurl' - source: 'deb [signed-by=$KEY_FILE] http:/// bionic main' - source2: - source: 'ppa:' - source3: - source: 'deb $MIRROR $RELEASE multiverse' - key: | - ------BEGIN PGP PUBLIC KEY BLOCK------- - - ------END PGP PUBLIC KEY BLOCK------- - source4: - source: 'deb $MIRROR $RELEASE multiverse' - append: false - key: | - ------BEGIN PGP PUBLIC KEY BLOCK------- - - ------END PGP PUBLIC KEY BLOCK-------""" - ), - dedent( - """\ - # cloud-init version 23.4 will generate a deb822 formatted sources - # file at /etc/apt/sources.list.d/.sources instead of - # /etc/apt/sources.list when `sources_list` content is deb822 - # format. - apt: - sources_list: | - Types: deb - URIs: http://archive.ubuntu.com/ubuntu/ - Suites: $RELEASE - Components: main - """ - ), - ], - "frequency": frequency, + "distros": ["ubuntu", "debian"], + "frequency": PER_INSTANCE, "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore # place where apt stores cached repository data @@ -222,13 +107,13 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if not isinstance(apt_cfg, dict): raise ValueError( - "Expected dictionary for 'apt' config, found {config_type}".format( - config_type=type(apt_cfg) - ) + "Expected dictionary for 'apt' config, " + "found {config_type}".format(config_type=type(apt_cfg)) ) apply_debconf_selections(apt_cfg) - apply_apt(apt_cfg, cloud) + with GPG() as gpg_context: + apply_apt(apt_cfg, cloud, gpg_context) def _should_configure_on_empty_apt(): @@ -240,7 +125,7 @@ def _should_configure_on_empty_apt(): return True, "Apt is available." -def apply_apt(cfg, cloud): +def apply_apt(cfg, cloud, gpg): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: should_config, msg = _should_configure_on_empty_apt() @@ -262,7 +147,7 @@ def apply_apt(cfg, cloud): _ensure_dependencies(cfg, matcher, cloud) if util.is_false(cfg.get("preserve_sources_list", False)): - add_mirror_keys(cfg, cloud) + add_mirror_keys(cfg, cloud, gpg) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, arch) @@ -280,32 +165,10 @@ def apply_apt(cfg, cloud): add_apt_sources( cfg["sources"], cloud, + gpg, template_params=params, aa_repo_match=matcher, ) - # GH: 4344 - stop gpg-agent/dirmgr daemons spawned by gpg key imports. - # Daemons spawned by cloud-config.service on systemd v253 report (running) - gpg_process_out, _err = subp.subp( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) - gpg_pids = re.findall(r"(?P\d+)\s+(?P\d+)", gpg_process_out) - root_gpg_pids = [int(pid[1]) for pid in gpg_pids if pid[0] == "1"] - if root_gpg_pids: - LOG.debug("Killing gpg-agent and dirmngr pids: %s", root_gpg_pids) - for gpg_pid in root_gpg_pids: - os.kill(gpg_pid, signal.SIGKILL) def debconf_set_selections(selections): @@ -558,11 +421,11 @@ def disable_suites(disabled, src, release) -> str: return retsrc -def add_mirror_keys(cfg, cloud): +def add_mirror_keys(cfg, cloud, gpg): """Adds any keys included in the primary/security mirror clauses""" for key in ("primary", "security"): for mirror in cfg.get(key, []): - add_apt_key(mirror, cloud, file_name=key) + add_apt_key(mirror, cloud, gpg, file_name=key) def is_deb822_sources_format(apt_src_content: str) -> bool: @@ -722,7 +585,7 @@ def generate_sources_list(cfg, release, mirrors, cloud): util.del_file(apt_sources_list) -def add_apt_key_raw(key, file_name, hardened=False): +def add_apt_key_raw(key, file_name, gpg, hardened=False): """ actual adding of a key as defined in key argument to the system @@ -730,7 +593,9 @@ def add_apt_key_raw(key, file_name, hardened=False): LOG.debug("Adding key:\n'%s'", key) try: name = pathlib.Path(file_name).stem - return apt_key("add", output_file=name, data=key, hardened=hardened) + return apt_key( + "add", gpg, output_file=name, data=key, hardened=hardened + ) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise @@ -770,7 +635,7 @@ def _ensure_dependencies(cfg, aa_repo_match, cloud): cloud.distro.install_packages(sorted(missing_packages)) -def add_apt_key(ent, cloud, hardened=False, file_name=None): +def add_apt_key(ent, cloud, gpg, hardened=False, file_name=None): """ Add key to the system as defined in ent (if any). Supports raw keys or keyid's @@ -785,15 +650,13 @@ def add_apt_key(ent, cloud, hardened=False, file_name=None): if "key" in ent: return add_apt_key_raw( - ent["key"], file_name or ent["filename"], hardened=hardened + ent["key"], file_name or ent["filename"], gpg, hardened=hardened ) -def update_packages(cloud): - cloud.distro.update_package_sources() - - -def add_apt_sources(srcdict, cloud, template_params=None, aa_repo_match=None): +def add_apt_sources( + srcdict, cloud, gpg, template_params=None, aa_repo_match=None +): """ install keys and repo source .list files defined in 'sources' @@ -834,10 +697,10 @@ def add_apt_sources(srcdict, cloud, template_params=None, aa_repo_match=None): ent["filename"] = filename if "source" in ent and "$KEY_FILE" in ent["source"]: - key_file = add_apt_key(ent, cloud, hardened=True) + key_file = add_apt_key(ent, cloud, gpg, hardened=True) template_params["KEY_FILE"] = key_file else: - add_apt_key(ent, cloud) + add_apt_key(ent, cloud, gpg) if "source" not in ent: continue @@ -874,7 +737,7 @@ def add_apt_sources(srcdict, cloud, template_params=None, aa_repo_match=None): LOG.exception("failed write to file %s: %s", sourcefn, detail) raise - update_packages(cloud) + cloud.distro.update_package_sources(force=True) return @@ -1187,7 +1050,12 @@ def apply_apt_config(cfg, proxy_fname, config_fname): def apt_key( - command, output_file=None, data=None, hardened=False, human_output=True + command, + gpg, + output_file=None, + data=None, + hardened=False, + human_output=True, ): """apt-key replacement @@ -1215,7 +1083,7 @@ def _get_key_files(): key_files.append(APT_TRUSTED_GPG_DIR + file) return key_files if key_files else "" - def apt_key_add(): + def apt_key_add(gpg_context): """apt-key add returns filepath to new keyring, or '/dev/null' when an error occurs @@ -1230,7 +1098,7 @@ def apt_key_add(): key_dir = ( CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR ) - stdout = gpg.dearmor(data) + stdout = gpg_context.dearmor(data) file_name = "{}{}.gpg".format(key_dir, output_file) util.write_file(file_name, stdout) except subp.ProcessExecutionError: @@ -1243,7 +1111,7 @@ def apt_key_add(): ) return file_name - def apt_key_list(): + def apt_key_list(gpg_context): """apt-key list returns string of all trusted keys (in /etc/apt/trusted.gpg and @@ -1252,15 +1120,17 @@ def apt_key_list(): key_list = [] for key_file in _get_key_files(): try: - key_list.append(gpg.list(key_file, human_output=human_output)) + key_list.append( + gpg_context.list_keys(key_file, human_output=human_output) + ) except subp.ProcessExecutionError as error: LOG.warning('Failed to list key "%s": %s', key_file, error) return "\n".join(key_list) if command == "add": - return apt_key_add() + return apt_key_add(gpg) elif command == "finger" or command == "list": - return apt_key_list() + return apt_key_list(gpg) else: raise ValueError( "apt_key() commands add, list, and finger are currently supported" diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 1a99c3bf4..95498832d 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -7,18 +7,15 @@ """Apt Pipelining: configure apt pipelining.""" import logging -from textwrap import dedent from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -frequency = PER_INSTANCE -distros = ["ubuntu", "debian"] DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining" APT_PIPE_TPL = ( "//Written by cloud-init per 'apt_pipelining'\n" @@ -31,32 +28,10 @@ meta: MetaSchema = { "id": "cc_apt_pipelining", - "name": "Apt Pipelining", - "title": "Configure apt pipelining", - "description": dedent( - """\ - This module configures apt's ``Acquite::http::Pipeline-Depth`` option, - which controls how apt handles HTTP pipelining. It may be useful for - pipelining to be disabled, because some web servers, such as S3 do not - pipeline properly (LP: #948461). - - Value configuration options for this module are: - - * ``os``: (Default) use distro default - * ``false`` disable pipelining altogether - * ````: Manually specify pipeline depth. This is not recommended.""" # noqa: E501 - ), - "distros": distros, - "frequency": frequency, - "examples": [ - "apt_pipelining: false", - "apt_pipelining: os", - "apt_pipelining: 3", - ], + "distros": ["ubuntu", "debian"], + "frequency": PER_INSTANCE, "activate_by_schema_keys": ["apt_pipelining"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 8e0bbfc9e..054e5c4c2 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -10,58 +10,24 @@ """Bootcmd: run arbitrary commands early in the boot process.""" import logging -from textwrap import dedent from cloudinit import subp, temp_utils, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_ALWAYS LOG = logging.getLogger(__name__) frequency = PER_ALWAYS -distros = ["all"] meta: MetaSchema = { "id": "cc_bootcmd", - "name": "Bootcmd", - "title": "Run arbitrary commands early in the boot process", - "description": dedent( - """\ - This module runs arbitrary commands very early in the boot process, - only slightly after a boothook would run. This is very similar to a - boothook, but more user friendly. The environment variable - ``INSTANCE_ID`` will be set to the current instance id for all run - commands. Commands can be specified either as lists or strings. For - invocation details, see ``runcmd``. - - .. note:: - bootcmd should only be used for things that could not be done later - in the boot process. - - .. note:: - - when writing files, do not use /tmp dir as it races with - systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. - """ - ), - "distros": distros, - "examples": [ - dedent( - """\ - bootcmd: - - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] - """ - ) - ], + "distros": ["all"], "frequency": PER_ALWAYS, "activate_by_schema_keys": ["bootcmd"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index b29944865..43d16b052 100644 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -13,46 +13,18 @@ from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ug_util from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module controls whether byobu is enabled or disabled system wide and for -the default system user. If byobu is to be enabled, this module will ensure it -is installed. Likewise, if it is to be disabled, it will be removed if -installed. - -Valid configuration options for this module are: - - - ``enable-system``: enable byobu system wide - - ``enable-user``: enable byobu for the default user - - ``disable-system``: disable byobu system wide - - ``disable-user``: disable byobu for the default user - - ``enable``: enable byobu both system wide and for default user - - ``disable``: disable byobu for all users - - ``user``: alias for ``enable-user`` - - ``system``: alias for ``enable-system`` -""" -distros = ["ubuntu", "debian"] - LOG = logging.getLogger(__name__) meta: MetaSchema = { "id": "cc_byobu", - "name": "Byobu", - "title": "Enable/disable byobu system wide and for default user", - "description": MODULE_DESCRIPTION, - "distros": distros, + "distros": ["ubuntu", "debian"], "frequency": PER_INSTANCE, - "examples": [ - "byobu_by_default: enable-user", - "byobu_by_default: disable-system", - ], "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 3f6b8fb17..61345fcb5 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -6,12 +6,11 @@ import logging import os -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -46,9 +45,9 @@ "ca_cert_update_cmd": ["update-ca-certificates"], }, "photon": { - "ca_cert_path": "/etc/ssl/certs/", - "ca_cert_local_path": "/etc/pki/tls/certs/", - "ca_cert_filename": "cloud-init-ca-cert-{cert_index}.crt", + "ca_cert_path": "/etc/pki/tls/certs/", + "ca_cert_local_path": "/etc/ssl/certs/", + "ca_cert_filename": "cloud-init-ca-cert-{cert_index}.pem", "ca_cert_config": None, "ca_cert_update_cmd": ["rehash_ca_certificates.sh"], }, @@ -64,21 +63,15 @@ ): DISTRO_OVERRIDES[distro] = DISTRO_OVERRIDES["opensuse"] -MODULE_DESCRIPTION = """\ -This module adds CA certificates to the system's CA store and updates any -related files using the appropriate OS-specific utility. The default CA -certificates can be disabled/deleted from use by the system with the -configuration option ``remove_defaults``. - -.. note:: - certificates must be specified using valid yaml. in order to specify a - multiline certificate, the yaml multiline list syntax must be used +for distro in ( + "almalinux", + "cloudlinux", +): + DISTRO_OVERRIDES[distro] = DISTRO_OVERRIDES["rhel"] -.. note:: - Alpine Linux requires the ca-certificates package to be installed in - order to provide the ``update-ca-certificates`` command. -""" distros = [ + "almalinux", + "cloudlinux", "alpine", "debian", "fedora", @@ -96,29 +89,10 @@ meta: MetaSchema = { "id": "cc_ca_certs", - "name": "CA Certificates", - "title": "Add ca certificates", - "description": MODULE_DESCRIPTION, "distros": distros, "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - ca_certs: - remove_defaults: true - trusted: - - single_line_cert - - | - -----BEGIN CERTIFICATE----- - YOUR-ORGS-TRUSTED-CA-CERT-HERE - -----END CERTIFICATE----- - """ - ) - ], "activate_by_schema_keys": ["ca_certs", "ca-certs"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def _distro_ca_certs_configs(distro_name): diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 6aa2836f7..ba6c752cb 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -12,13 +12,12 @@ import json import logging import os -from textwrap import dedent from typing import List from cloudinit import subp, temp_utils, templater, url_helper, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import Distro from cloudinit.settings import PER_ALWAYS @@ -45,8 +44,6 @@ OMNIBUS_URL_RETRIES = 5 CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem" -CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret" -CHEF_ENVIRONMENT = "_default" CHEF_FB_PATH = "/etc/chef/firstboot.json" CHEF_RB_TPL_DEFAULTS = { # These are ruby symbols... @@ -95,58 +92,14 @@ CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"]) -frequency = PER_ALWAYS -distros = ["all"] - LOG = logging.getLogger(__name__) meta: MetaSchema = { "id": "cc_chef", - "name": "Chef", - "title": "module that configures, starts and installs chef", - "description": dedent( - """\ - This module enables chef to be installed (from packages, - gems, or from omnibus). Before this occurs, chef configuration is - written to disk (validation.pem, client.pem, firstboot.json, - client.rb), and required directories are created (/etc/chef and - /var/log/chef and so-on). If configured, chef will be - installed and started in either daemon or non-daemon mode. - If run in non-daemon mode, post run actions are executed to do - finishing activities such as removing validation.pem.""" - ), - "distros": distros, - "examples": [ - dedent( - """ - chef: - directories: - - /etc/chef - - /var/log/chef - validation_cert: system - install_type: omnibus - initial_attributes: - apache: - prefork: - maxclients: 100 - keepalive: off - run_list: - - recipe[apache2] - - role[db] - encrypted_data_bag_secret: /etc/chef/encrypted_data_bag_secret - environment: _default - log_level: :auto - omnibus_url_retries: 2 - server_url: https://chef.yourorg.com:4000 - ssl_verify_mode: :verify_peer - validation_name: yourorg-validator""" - ) - ], - "frequency": frequency, + "distros": ["all"], + "frequency": PER_ALWAYS, "activate_by_schema_keys": ["chef"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def post_run_chef(chef_cfg): diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 4ad789c0b..7869655f3 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -9,12 +9,11 @@ """Disable EC2 Metadata: Disable AWS EC2 metadata.""" import logging -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_ALWAYS @@ -25,21 +24,10 @@ meta: MetaSchema = { "id": "cc_disable_ec2_metadata", - "name": "Disable EC2 Metadata", - "title": "Disable AWS EC2 Metadata", - "description": dedent( - """\ - This module can disable the ec2 datasource by rejecting the route to - ``169.254.169.254``, the usual route to the datasource. This module - is disabled by default.""" - ), "distros": [ALL_DISTROS], "frequency": PER_ALWAYS, - "examples": ["disable_ec2_metadata: true"], "activate_by_schema_keys": ["disable_ec2_metadata"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 0ccf6a35c..ba3106a41 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -10,105 +10,24 @@ import logging import os import shlex -from textwrap import dedent +from pathlib import Path from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -# Define the commands to use -SFDISK_CMD = subp.which("sfdisk") -SGDISK_CMD = subp.which("sgdisk") -LSBLK_CMD = subp.which("lsblk") -BLKID_CMD = subp.which("blkid") -BLKDEV_CMD = subp.which("blockdev") -PARTPROBE_CMD = subp.which("partprobe") -WIPEFS_CMD = subp.which("wipefs") - LANG_C_ENV = {"LANG": "C"} LOG = logging.getLogger(__name__) -MODULE_DESCRIPTION = """\ -This module is able to configure simple partition tables and filesystems. - -.. note:: - for more detail about configuration options for disk setup, see the disk - setup example - -.. note:: - if a swap partition is being created via ``disk_setup`` then a ``fs_entry`` - entry is also needed in order for mkswap to be run, otherwise when swap - activation is later attempted it will fail. - -For convenience, aliases can be specified for disks using the -``device_aliases`` config key, which takes a dictionary of alias: path -mappings. There are automatic aliases for ``swap`` and ``ephemeral``, where -``swap`` will always refer to the active swap partition and ``ephemeral`` -will refer to the block device of the ephemeral image. - -Disk partitioning is done using the ``disk_setup`` directive. This config -directive accepts a dictionary where each key is either a path to a block -device or an alias specified in ``device_aliases``, and each value is the -configuration options for the device. File system configuration is done using -the ``fs_setup`` directive. This config directive accepts a list of -filesystem configs. -""" - meta: MetaSchema = { "id": "cc_disk_setup", - "name": "Disk Setup", - "title": "Configure partitions and filesystems", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - device_aliases: - my_alias: /dev/sdb - swap_disk: /dev/sdc - disk_setup: - my_alias: - table_type: gpt - layout: [50, 50] - overwrite: true - swap_disk: - table_type: gpt - layout: [[100, 82]] - overwrite: true - /dev/sdd: - table_type: mbr - layout: true - overwrite: true - fs_setup: - - label: fs1 - filesystem: ext4 - device: my_alias.1 - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s - - label: fs2 - device: my_alias.2 - filesystem: ext4 - - label: swap - device: swap_disk.1 - filesystem: swap - - label: fs3 - device: /dev/sdd1 - filesystem: ext4 - mounts: - - ["my_alias.1", "/mnt1"] - - ["my_alias.2", "/mnt2"] - - ["swap_disk.1", "none", "swap", "sw", "0", "0"] - - ["/dev/sdd1", "/mnt3"] - """ - ) - ], "activate_by_schema_keys": ["disk_setup", "fs_setup"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: @@ -257,7 +176,7 @@ def enumerate_disk(device, nodeps=False): """ lsblk_cmd = [ - LSBLK_CMD, + "lsblk", "--pairs", "--output", "NAME,TYPE,FSTYPE,LABEL", @@ -331,7 +250,7 @@ def check_fs(device): """ out, label, fs_type, uuid = None, None, None, None - blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device] + blkid_cmd = ["blkid", "-c", "/dev/null", device] try: out, _err = subp.subp(blkid_cmd, rcs=[0, 2]) except Exception as e: @@ -438,8 +357,8 @@ def is_disk_used(device): def get_hdd_size(device): try: - size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device]) - sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device]) + size_in_bytes, _ = subp.subp(["blockdev", "--getsize64", device]) + sector_size, _ = subp.subp(["blockdev", "--getss", device]) except Exception as e: raise RuntimeError("Failed to get %s size\n%s" % (device, e)) from e @@ -455,7 +374,8 @@ def check_partition_mbr_layout(device, layout): """ read_parttbl(device) - prt_cmd = [SFDISK_CMD, "-l", device] + + prt_cmd = ["sfdisk", "-l", device] try: out, _err = subp.subp(prt_cmd, data="%s\n" % layout) except Exception as e: @@ -486,7 +406,7 @@ def check_partition_mbr_layout(device, layout): def check_partition_gpt_layout(device, layout): - prt_cmd = [SGDISK_CMD, "-p", device] + prt_cmd = ["sgdisk", "-p", device] try: out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: @@ -676,7 +596,7 @@ def purge_disk(device): # wipe any file systems first for d in enumerate_disk(device): if d["type"] not in ["disk", "crypt"]: - wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]] + wipefs_cmd = ["wipefs", "--all", "/dev/%s" % d["name"]] try: LOG.info("Purging filesystem on /dev/%s", d["name"]) subp.subp(wipefs_cmd) @@ -709,10 +629,11 @@ def read_parttbl(device): `Partprobe` is preferred over `blkdev` since it is more reliably able to probe the partition table. """ - if PARTPROBE_CMD is not None: - probe_cmd = [PARTPROBE_CMD, device] + partprobe = "partprobe" + if subp.which(partprobe): + probe_cmd = [partprobe, device] else: - probe_cmd = [BLKDEV_CMD, "--rereadpt", device] + probe_cmd = ["blockdev", "--rereadpt", device] util.udevadm_settle() try: subp.subp(probe_cmd) @@ -728,7 +649,7 @@ def exec_mkpart_mbr(device, layout): types, i.e. gpt """ # Create the partitions - prt_cmd = [SFDISK_CMD, "--force", device] + prt_cmd = ["sfdisk", "--force", device] try: subp.subp(prt_cmd, data="%s\n" % layout) except Exception as e: @@ -741,12 +662,12 @@ def exec_mkpart_mbr(device, layout): def exec_mkpart_gpt(device, layout): try: - subp.subp([SGDISK_CMD, "-Z", device]) + subp.subp(["sgdisk", "-Z", device]) for index, (partition_type, (start, end)) in enumerate(layout): index += 1 subp.subp( [ - SGDISK_CMD, + "sgdisk", "-n", "{}:{}:{}".format(index, start, end), device, @@ -757,7 +678,7 @@ def exec_mkpart_gpt(device, layout): # 82 -> 8200. 'Linux' -> 'Linux' pinput = str(partition_type).ljust(4, "0") subp.subp( - [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device] + ["sgdisk", "-t", "{}:{}".format(index, pinput), device] ) except Exception: LOG.warning("Failed to partition device %s", device) @@ -915,7 +836,15 @@ def mkfs(fs_cfg): if not partition or partition.isdigit(): # Handle manual definition of partition if partition.isdigit(): + # nvme support + # https://github.com/torvalds/linux/blob/45db3ab/block/partitions + # /core.c#L330 + if device[-1].isdigit(): + device = f"{device}p" device = "%s%s" % (device, partition) + if not Path(device).is_block_device(): + LOG.warning("Path %s does not exist or is not a block device") + return LOG.debug( "Manual request of partition %s for %s", partition, device ) @@ -937,7 +866,7 @@ def mkfs(fs_cfg): LOG.debug("Device %s has required file system", device) return else: - LOG.warning("Destroying filesystem on %s", device) + LOG.debug("Destroying filesystem on %s", device) else: LOG.debug("Device %s is cleared for formatting", device) diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index 5efac3427..aa630a30b 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -6,55 +6,19 @@ """Fan: Configure ubuntu fan networking""" import logging -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module installs, configures and starts the ubuntu fan network system. For -more information about Ubuntu Fan, see: -``https://wiki.ubuntu.com/FanNetworking``. - -If cloud-init sees a ``fan`` entry in cloud-config it will: - - - write ``config_path`` with the contents of the ``config`` key - - install the package ``ubuntu-fan`` if it is not installed - - ensure the service is started (or restarted if was previously running) - -Additionally, the ``ubuntu-fan`` package will be automatically installed -if not present. -""" - -distros = ["ubuntu"] meta: MetaSchema = { "id": "cc_fan", - "name": "Fan", - "title": "Configure ubuntu fan networking", - "description": MODULE_DESCRIPTION, - "distros": distros, + "distros": ["ubuntu"], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - fan: - config: | - # fan 240 - 10.0.0.0/8 eth0/16 dhcp - 10.0.0.0/8 eth1/16 dhcp off - # fan 241 - 241.0.0.0/8 eth0/16 dhcp - config_path: /etc/network/fan - """ - ) - ], "activate_by_schema_keys": ["fan"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index deb49227c..dc263ee2d 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -8,56 +8,23 @@ """Final Message: Output final message when cloud-init has finished""" import logging -from textwrap import dedent from cloudinit import templater, util, version from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_ALWAYS -MODULE_DESCRIPTION = """\ -This module configures the final message that cloud-init writes. The message is -specified as a jinja template with the following variables set: - - - ``version``: cloud-init version - - ``timestamp``: time at cloud-init finish - - ``datasource``: cloud-init data source - - ``uptime``: system uptime - -This message is written to the cloud-init log (usually /var/log/cloud-init.log) -as well as stderr (which usually redirects to /var/log/cloud-init-output.log). - -Upon exit, this module writes the system uptime, timestamp, and cloud-init -version to ``/var/lib/cloud/instance/boot-finished`` independent of any -user data specified for this module. -""" frequency = PER_ALWAYS meta: MetaSchema = { "id": "cc_final_message", - "name": "Final Message", - "title": "Output final message when cloud-init has finished", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": frequency, - "examples": [ - dedent( - """\ - final_message: | - cloud-init has finished - version: $version - timestamp: $timestamp - datasource: $datasource - uptime: $uptime - """ - ) - ], "activate_by_schema_keys": [], -} +} # type: ignore LOG = logging.getLogger(__name__) -__doc__ = get_meta_doc(meta) # Jinja formatted default message FINAL_MESSAGE_DEF = ( @@ -110,5 +77,6 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: except Exception: util.logexc(LOG, "Failed to write boot finished file %s", boot_fin_fn) - if cloud.datasource.is_disconnected: - LOG.warning("Used fallback datasource") + if cloud.datasource.dsname == "None": + if cloud.datasource.sys_cfg.get("datasource_list") != ["None"]: + LOG.warning("Used fallback datasource") diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 00245b8f5..e1a56f91f 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -18,86 +18,24 @@ from abc import ABC, abstractmethod from contextlib import suppress from pathlib import Path -from textwrap import dedent from typing import Optional, Tuple from cloudinit import subp, temp_utils, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, Distro from cloudinit.settings import PER_ALWAYS MODULE_DESCRIPTION = """\ -Growpart resizes partitions to fill the available disk space. -This is useful for cloud instances with a larger amount of disk space available -than the pristine image uses, as it allows the instance to automatically make -use of the extra space. Note that this only works if the partition to be -resized is the last one on a disk with classic partitioning scheme (MBR, BSD, -GPT). LVM, Btrfs and ZFS have no such restrictions. - -The devices on which to run growpart are specified as a list under the -``devices`` key. - -There is some functionality overlap between this module and the ``growroot`` -functionality of ``cloud-initramfs-tools``. However, there are some situations -where one tool is able to function and the other is not. The default -configuration for both should work for most cloud instances. To explicitly -prevent ``cloud-initramfs-tools`` from running ``growroot``, the file -``/etc/growroot-disabled`` can be created. By default, both ``growroot`` and -``cc_growpart`` will check for the existence of this file and will not run if -it is present. However, this file can be ignored for ``cc_growpart`` by setting -``ignore_growroot_disabled`` to ``true``. For more information on -``cloud-initramfs-tools`` see: https://launchpad.net/cloud-initramfs-tools - -On FreeBSD, there is also the ``growfs`` service, which has a lot of overlap -with ``cc_growpart`` and ``cc_resizefs``, but only works on the root partition. -In that configuration, we use it, otherwise, we fall back to ``gpart``. - -Note however, that ``growfs`` may insert a swap partition, if none is present, -unless instructed not to via ``growfs_swap_size=0`` in either ``kenv(1)``, or -``rc.conf(5)``. - -Growpart is enabled by default on the root partition. The default config for -growpart is:: - - growpart: - mode: auto - devices: ["/"] - ignore_growroot_disabled: false """ frequency = PER_ALWAYS meta: MetaSchema = { "id": "cc_growpart", - "name": "Growpart", - "title": "Grow partitions", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": frequency, - "examples": [ - dedent( - """\ - growpart: - mode: auto - devices: ["/"] - ignore_growroot_disabled: false - """ - ), - dedent( - """\ - growpart: - mode: growpart - devices: - - "/" - - "/dev/vdb1" - ignore_growroot_disabled: true - """ - ), - ], "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore DEFAULT_CONFIG = { "mode": "auto", @@ -118,36 +56,6 @@ class RESIZE: LOG = logging.getLogger(__name__) -def resizer_factory(mode: str, distro: Distro, devices: list): - resize_class = None - if mode == "auto": - for _name, resizer in RESIZERS: - cur = resizer(distro) - if cur.available(devices=devices): - resize_class = cur - break - - if not resize_class: - raise ValueError("No resizers available") - - else: - mmap = {} - for k, v in RESIZERS: - mmap[k] = v - - if mode not in mmap: - raise TypeError("unknown resize mode %s" % mode) - - mclass = mmap[mode](distro) - if mclass.available(devices=devices): - resize_class = mclass - - if not resize_class: - raise ValueError("mode %s not available" % mode) - - return resize_class - - class ResizeFailedException(Exception): pass @@ -161,7 +69,7 @@ def available(self, devices: list) -> bool: ... @abstractmethod - def resize(self, diskdev, partnum, partdev): + def resize(self, diskdev, partnum, partdev, fs): ... @@ -178,8 +86,8 @@ def available(self, devices: list): pass return False - def resize(self, diskdev, partnum, partdev): - before = get_size(partdev) + def resize(self, diskdev, partnum, partdev, fs): + before = get_size(partdev, fs) # growpart uses tmp dir to store intermediate states # and may conflict with systemd-tmpfiles-clean @@ -211,7 +119,7 @@ def resize(self, diskdev, partnum, partdev): util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) raise ResizeFailedException(e) from e - return (before, get_size(partdev)) + return (before, get_size(partdev, fs)) class ResizeGrowFS(Resizer): @@ -231,15 +139,15 @@ def available(self, devices: list): """growfs only works on the root partition""" return os.path.isfile("/etc/rc.d/growfs") and devices == ["/"] - def resize(self, diskdev, partnum, partdev): - before = get_size(partdev) + def resize(self, diskdev, partnum, partdev, fs): + before = get_size(partdev, fs) try: self._distro.manage_service(action="onestart", service="growfs") except subp.ProcessExecutionError as e: util.logexc(LOG, "Failed: service growfs onestart") raise ResizeFailedException(e) from e - return (before, get_size(partdev)) + return (before, get_size(partdev, fs)) class ResizeGpart(Resizer): @@ -255,7 +163,7 @@ def available(self, devices: list): pass return False - def resize(self, diskdev, partnum, partdev): + def resize(self, diskdev, partnum, partdev, fs): """ GPT disks store metadata at the beginning (primary) and at the end (secondary) of the disk. When launching an image with a @@ -270,79 +178,79 @@ def resize(self, diskdev, partnum, partdev): util.logexc(LOG, "Failed: gpart recover %s", diskdev) raise ResizeFailedException(e) from e - before = get_size(partdev) + before = get_size(partdev, fs) try: subp.subp(["gpart", "resize", "-i", partnum, diskdev]) except subp.ProcessExecutionError as e: util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev) raise ResizeFailedException(e) from e - return (before, get_size(partdev)) + return (before, get_size(partdev, fs)) -def get_size(filename) -> Optional[int]: +def resizer_factory(mode: str, distro: Distro, devices: list) -> Resizer: + resize_class = None + if mode == "auto": + for _name, resizer in RESIZERS: + cur = resizer(distro) + if cur.available(devices=devices): + resize_class = cur + break + + if not resize_class: + raise ValueError("No resizers available") + + else: + mmap = {} + for k, v in RESIZERS: + mmap[k] = v + + if mode not in mmap: + raise TypeError("unknown resize mode %s" % mode) + + mclass = mmap[mode](distro) + if mclass.available(devices=devices): + resize_class = mclass + + if not resize_class: + raise ValueError("mode %s not available" % mode) + + return resize_class + + +def get_size(filename, fs) -> Optional[int]: fd = None try: fd = os.open(filename, os.O_RDONLY) return os.lseek(fd, 0, os.SEEK_END) except FileNotFoundError: + if fs == "zfs": + return get_zfs_size(filename) return None finally: if fd: os.close(fd) -def device_part_info(devpath): - # convert an entry in /dev/ to parent disk and partition number - - # input of /dev/vdb or /dev/disk/by-label/foo - # rpath is hopefully a real-ish path in /dev (vda, sdb..) - rpath = os.path.realpath(devpath) - - bname = os.path.basename(rpath) - syspath = "/sys/class/block/%s" % bname - - if util.is_BSD(): - # FreeBSD doesn't know of sysfs so just get everything we need from - # the device, like /dev/vtbd0p2. - fpart = "/dev/" + util.find_freebsd_part(devpath) - # Handle both GPT partitions and MBR slices with partitions - m = re.search( - r"^(?P/dev/.+)[sp](?P\d+[a-z]*)$", fpart - ) - if m: - return m["dev"], m["part_slice"] - - if not os.path.exists(syspath): - raise ValueError("%s had no syspath (%s)" % (devpath, syspath)) - - ptpath = os.path.join(syspath, "partition") - if not os.path.exists(ptpath): - raise TypeError("%s not a partition" % devpath) - - ptnum = util.load_text_file(ptpath).rstrip() - - # for a partition, real syspath is something like: - # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1 - rsyspath = os.path.realpath(syspath) - disksyspath = os.path.dirname(rsyspath) - - diskmajmin = util.load_text_file(os.path.join(disksyspath, "dev")).rstrip() - diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin) - - # diskdevpath has something like 253:0 - # and udev has put links in /dev/block/253:0 to the device name in /dev/ - return (diskdevpath, ptnum) +def get_zfs_size(dataset) -> Optional[int]: + zpool = dataset.split("/")[0] + try: + size, _ = subp.subp(["zpool", "get", "-Hpovalue", "size", zpool]) + except subp.ProcessExecutionError as e: + LOG.debug("Failed: zpool get size %s: %s", zpool, e) + return None + return int(size.strip()) def devent2dev(devent): if devent.startswith("/dev/"): - return devent - else: - result = util.get_mount_info(devent) - if not result: - raise ValueError("Could not determine device of '%s' % dev_ent") - dev = result[0] + return devent, None + + result = util.get_mount_info(devent) + if not result: + raise ValueError("Could not determine device of '%s' % dev_ent") + dev = result[0] + fs = result[1] container = util.is_container() @@ -353,51 +261,9 @@ def devent2dev(devent): if os.path.exists(dev): # if /dev/root exists, but we failed to convert # that to a "real" /dev/ path device, then return it. - return dev + return dev, None raise ValueError("Unable to find device '/dev/root'") - return dev - - -def get_mapped_device(blockdev, distro_name): - """Returns underlying block device for a mapped device. - - If it is mapped, blockdev will usually take the form of - /dev/mapper/some_name - - If blockdev is a symlink pointing to a /dev/dm-* device, return - the device pointed to. Otherwise, return None. - """ - realpath = os.path.realpath(blockdev) - - if distro_name == "alpine": - if blockdev.startswith("/dev/mapper"): - # For Alpine systems a /dev/mapper/ entry is *not* a - # symlink to the related /dev/dm-X block device, - # rather it is a block device itself. - - # Get the major/minor of the /dev/mapper block device - major = os.major(os.stat(blockdev).st_rdev) - minor = os.minor(os.stat(blockdev).st_rdev) - - # Find the /dev/dm-X device with the same major/minor - with os.scandir("/dev/") as it: - for deventry in it: - if deventry.name.startswith("dm-"): - res = os.lstat(deventry.path) - if stat.S_ISBLK(res.st_mode): - if ( - os.major(os.stat(deventry.path).st_rdev) - == major - and os.minor(os.stat(deventry.path).st_rdev) - == minor - ): - realpath = os.path.realpath(deventry.path) - break - - if realpath.startswith("/dev/dm-"): - LOG.debug("%s is a mapped device pointing to %s", blockdev, realpath) - return realpath - return None + return dev, fs def is_encrypted(blockdev, partition) -> bool: @@ -499,15 +365,64 @@ def resize_encrypted(blockdev, partition) -> Tuple[str, str]: ) -def resize_devices(resizer, devices, distro_name): +def _call_resizer(resizer, devent, disk, ptnum, blockdev, fs): + info = [] + try: + old, new = resizer.resize(disk, ptnum, blockdev, fs) + if old == new: + info.append( + ( + devent, + RESIZE.NOCHANGE, + "no change necessary (%s, %s)" % (disk, ptnum), + ) + ) + elif new is None or old is None: + msg = "" + if disk is not None and ptnum is None: + msg = "changed (%s, %s) size, new size is unknown" % ( + disk, + ptnum, + ) + else: + msg = "changed (%s) size, new size is unknown" % blockdev + info.append((devent, RESIZE.CHANGED, msg)) + else: + msg = "" + if disk is not None and ptnum is None: + msg = "changed (%s, %s) from %s to %s" % ( + disk, + ptnum, + old, + new, + ) + else: + msg = "changed (%s) from %s to %s" % (blockdev, old, new) + info.append((devent, RESIZE.CHANGED, msg)) + + except ResizeFailedException as e: + info.append( + ( + devent, + RESIZE.FAILED, + "failed to resize: disk=%s, ptnum=%s: %s" % (disk, ptnum, e), + ) + ) + return info + + +def resize_devices(resizer: Resizer, devices, distro: Distro): # returns a tuple of tuples containing (entry-in-devices, action, message) devices = copy.copy(devices) info = [] while devices: devent = devices.pop(0) + disk = None + ptnum = None + try: - blockdev = devent2dev(devent) + blockdev, fs = devent2dev(devent) except ValueError as e: info.append( ( @@ -518,6 +433,16 @@ def resize_devices(resizer, devices, distro_name): ) continue + LOG.debug("growpart found fs=%s", fs) + # TODO: This seems to be the wrong place for this. On Linux, we the + # `os.stat(blockdev)` call below will fail on a ZFS filesystem. + # We then delay resizing the FS until calling cc_resizefs. Yet + # the code here is to accommodate the FreeBSD `growfs` service. + # Ideally we would grow the FS for both OSes in the same module. + if fs == "zfs" and isinstance(resizer, ResizeGrowFS): + info += _call_resizer(resizer, devent, disk, ptnum, blockdev, fs) + continue + try: statret = os.stat(blockdev) except OSError as e: @@ -542,7 +467,7 @@ def resize_devices(resizer, devices, distro_name): ) continue - underlying_blockdev = get_mapped_device(blockdev, distro_name) + underlying_blockdev = distro.get_mapped_device(blockdev) if underlying_blockdev: try: # We need to resize the underlying partition first @@ -586,7 +511,7 @@ def resize_devices(resizer, devices, distro_name): # though we should probably grow the ability to continue try: - (disk, ptnum) = device_part_info(blockdev) + disk, ptnum = distro.device_part_info(blockdev) except (TypeError, ValueError) as e: info.append( ( @@ -597,44 +522,7 @@ def resize_devices(resizer, devices, distro_name): ) continue - try: - old, new = resizer.resize(disk, ptnum, blockdev) - if old == new: - info.append( - ( - devent, - RESIZE.NOCHANGE, - "no change necessary (%s, %s)" % (disk, ptnum), - ) - ) - elif new is None or old is None: - info.append( - ( - devent, - RESIZE.CHANGED, - "changed (%s, %s) size, new size is unknown" - % (disk, ptnum), - ) - ) - else: - info.append( - ( - devent, - RESIZE.CHANGED, - "changed (%s, %s) from %s to %s" - % (disk, ptnum, old, new), - ) - ) - - except ResizeFailedException as e: - info.append( - ( - devent, - RESIZE.FAILED, - "failed to resize: disk=%s, ptnum=%s: %s" - % (disk, ptnum, e), - ) - ) + info += _call_resizer(resizer, devent, disk, ptnum, blockdev, fs) return info @@ -655,7 +543,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if util.is_false(mode): if mode != "off": util.deprecate( - deprecated="Growpart's 'mode' key with value '{mode}'", + deprecated=f"Growpart's 'mode' key with value '{mode}'", deprecated_version="22.2", extra_message="Use 'off' instead.", ) @@ -685,7 +573,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: logfunc=LOG.debug, msg="resize_devices", func=resize_devices, - args=(resizer, devices, cloud.distro.name), + args=(resizer, devices, cloud.distro), ) for entry, action, msg in resized: if action == RESIZE.CHANGED: diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index a7e2fbfa5..fef0f9972 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -10,53 +10,23 @@ import logging import os -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE from cloudinit.subp import ProcessExecutionError MODULE_DESCRIPTION = """\ -Configure which device is used as the target for grub installation. This module -can be enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` -config dict. This module automatically selects a disk using ``grub-probe`` if -no installation device is specified. - -The value which is placed into the debconf database is in the format which the -grub postinstall script expects. Normally, this is a /dev/disk/by-id/ value, -but we do fallback to the plain disk name if a by-id name is not present. - -If this module is executed inside a container, then the debconf database is -seeded with empty values, and install_devices_empty is set to true. """ -distros = ["ubuntu", "debian"] meta: MetaSchema = { "id": "cc_grub_dpkg", - "name": "Grub Dpkg", - "title": "Configure grub debconf installation device", - "description": MODULE_DESCRIPTION, - "distros": distros, + "distros": ["ubuntu", "debian"], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - grub_dpkg: - enabled: true - # BIOS mode (install_devices needs disk) - grub-pc/install_devices: /dev/sda - grub-pc/install_devices_empty: false - # EFI mode (install_devices needs partition) - grub-efi/install_devices: /dev/sda - """ - ) - ], "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index e9567da12..11b89ef73 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -2,12 +2,11 @@ """Install hotplug udev rules if supported and enabled""" import logging import os -from textwrap import dedent from cloudinit import stages, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.event import EventScope, EventType from cloudinit.settings import PER_INSTANCE @@ -15,48 +14,11 @@ meta: MetaSchema = { "id": "cc_install_hotplug", - "name": "Install Hotplug", - "title": "Install hotplug udev rules if supported and enabled", - "description": dedent( - """\ - This module will install the udev rules to enable hotplug if - supported by the datasource and enabled in the userdata. The udev - rules will be installed as - ``/etc/udev/rules.d/90-cloud-init-hook-hotplug.rules``. - - When hotplug is enabled, newly added network devices will be added - to the system by cloud-init. After udev detects the event, - cloud-init will refresh the instance metadata from the datasource, - detect the device in the updated metadata, then apply the updated - network configuration. - - Currently supported datasources: Openstack, EC2 - """ - ), "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - # Enable hotplug of network devices - updates: - network: - when: ["hotplug"] - """ - ), - dedent( - """\ - # Enable network hotplug alongside boot event - updates: - network: - when: ["boot", "hotplug"] - """ - ), - ], "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py index 66af85962..e6e7749ce 100644 --- a/cloudinit/config/cc_keyboard.py +++ b/cloudinit/config/cc_keyboard.py @@ -7,12 +7,11 @@ """keyboard: set keyboard layout""" import logging -from textwrap import dedent from cloudinit import distros from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE # FIXME: setting keyboard layout should be supported by all OSes. @@ -27,45 +26,12 @@ meta: MetaSchema = { "id": "cc_keyboard", - "name": "Keyboard", - "title": "Set keyboard layout", - "description": "Handle keyboard configuration.", "distros": supported_distros, - "examples": [ - dedent( - """\ - # Set keyboard layout to "us" - keyboard: - layout: us - """ - ), - dedent( - """\ - # Set specific keyboard layout, model, variant, options - keyboard: - layout: de - model: pc105 - variant: nodeadkeys - options: compose:rwin - """ - ), - dedent( - """\ - # For Alpine Linux set specific keyboard layout and variant, - # as used by setup-keymap. Model and options are ignored. - keyboard: - layout: gb - variant: gb-extd - """ - ), - ], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["keyboard"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) - LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index 88a09c43e..3ca2c0882 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -10,63 +10,22 @@ import logging import os -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE # This is a tool that cloud init provides HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints" -distros = ["all"] - meta: MetaSchema = { "id": "cc_keys_to_console", - "name": "Keys to Console", - "title": "Control which SSH host keys may be written to console", - "description": ( - "For security reasons it may be desirable not to write SSH host keys" - " and their fingerprints to the console. To avoid either being written" - " to the console the ``emit_keys_to_console`` config key under the" - " main ``ssh`` config key can be used. To avoid the fingerprint of" - " types of SSH host keys being written to console the" - " ``ssh_fp_console_blacklist`` config key can be used. By default," - " all types of keys will have their fingerprints written to console." - " To avoid host keys of a key type being written to console the" - "``ssh_key_console_blacklist`` config key can be used. By default" - " all supported host keys are written to console." - ), - "distros": distros, - "examples": [ - dedent( - """\ - # Do not print any SSH keys to system console - ssh: - emit_keys_to_console: false - """ - ), - dedent( - """\ - # Do not print certain ssh key types to console - ssh_key_console_blacklist: [rsa] - """ - ), - dedent( - """\ - # Do not print specific ssh key fingerprints to console - ssh_fp_console_blacklist: - - E25451E0221B5773DEBFF178ECDACB160995AA89 - - FE76292D55E8B28EE6DB2B34B2D8A784F8C0AAB0 - """ - ), - ], + "distros": ["all"], "frequency": PER_INSTANCE, "activate_by_schema_keys": [], -} -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index b8b8179ae..6bd8ed5ee 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -10,14 +10,13 @@ import logging from itertools import chain -from textwrap import dedent from configobj import ConfigObj from cloudinit import subp, type_utils, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf" @@ -33,84 +32,13 @@ } } -MODULE_DESCRIPTION = """\ -This module installs and configures ``landscape-client``. The landscape client -will only be installed if the key ``landscape`` is present in config. Landscape -client configuration is given under the ``client`` key under the main -``landscape`` config key. The config parameters are not interpreted by -cloud-init, but rather are converted into a ConfigObj formatted file and -written out to the `[client]` section in ``/etc/landscape/client.conf``. - -The following default client config is provided, but can be overridden:: - - landscape: - client: - log_level: "info" - url: "https://landscape.canonical.com/message-system" - ping_url: "http://landscape.canoncial.com/ping" - data_path: "/var/lib/landscape/client" - -.. note:: - see landscape documentation for client config keys - -.. note:: - if ``tags`` is defined, its contents should be a string delimited with - ``,`` rather than a list -""" -distros = ["ubuntu"] - meta: MetaSchema = { "id": "cc_landscape", - "name": "Landscape", - "title": "Install and configure landscape client", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - dedent( - """\ - # To discover additional supported client keys, run - # man landscape-config. - landscape: - client: - url: "https://landscape.canonical.com/message-system" - ping_url: "http://landscape.canonical.com/ping" - data_path: "/var/lib/landscape/client" - http_proxy: "http://my.proxy.com/foobar" - https_proxy: "https://my.proxy.com/foobar" - tags: "server,cloud" - computer_title: "footitle" - registration_key: "fookey" - account_name: "fooaccount" - """ - ), - dedent( - """\ - # Minimum viable config requires account_name and computer_title - landscape: - client: - computer_title: kiosk 1 - account_name: Joe's Biz - """ - ), - dedent( - """\ - # To install landscape-client from a PPA, specify apt.sources - apt: - sources: - trunk-testing-ppa: - source: ppa:landscape/self-hosted-beta - landscape: - client: - account_name: myaccount - computer_title: himom - """ - ), - ], + "distros": ["ubuntu"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["landscape"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) @@ -151,8 +79,16 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: ] ) ) - subp.subp(["landscape-config", "--silent"] + cmd_params) - util.write_file(LS_DEFAULT_FILE, "RUN=1\n") + try: + subp.subp(["landscape-config", "--silent", "--is-registered"], rcs=[5]) + subp.subp(["landscape-config", "--silent"] + cmd_params) + except subp.ProcessExecutionError as e: + if e.exit_code == 0: + LOG.warning("Client already registered to Landscape") + else: + msg = f"Failure registering client:\n{e}" + util.logexc(LOG, msg) + raise RuntimeError(msg) from e def merge_together(objs): diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 04765624d..8cb2491f6 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -9,46 +9,20 @@ """Locale: set system locale""" import logging -from textwrap import dedent from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE -distros = ["all"] - meta: MetaSchema = { "id": "cc_locale", - "name": "Locale", - "title": "Set system locale", - "description": dedent( - """\ - Configure the system locale and apply it system wide. By default use - the locale specified by the datasource.""" - ), - "distros": distros, - "examples": [ - dedent( - """\ - # Set the locale to ar_AE - locale: ar_AE - """ - ), - dedent( - """\ - # Set the locale to fr_CA in /etc/alternate_path/locale - locale: fr_CA - locale_configfile: /etc/alternate_path/locale - """ - ), - ], + "distros": ["all"], "frequency": PER_INSTANCE, "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 9f267b4c9..767c889d5 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -8,160 +8,29 @@ import logging import os -from textwrap import dedent from typing import List, Tuple -from cloudinit import safeyaml, subp, util +import yaml + +from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) _DEFAULT_NETWORK_NAME = "lxdbr0" - -MODULE_DESCRIPTION = """\ -This module configures lxd with user specified options using ``lxd init``. -If lxd is not present on the system but lxd configuration is provided, then -lxd will be installed. If the selected storage backend userspace utility is -not installed, it will be installed. If network bridge configuration is -provided, then lxd-bridge will be configured accordingly. -""" - -distros = ["ubuntu"] - meta: MetaSchema = { "id": "cc_lxd", - "name": "LXD", - "title": "Configure LXD with ``lxd init`` and optionally lxd-bridge", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - dedent( - """\ - # Simplest working directory backed LXD configuration - lxd: - init: - storage_backend: dir - """ - ), - dedent( - """\ - # LXD init showcasing cloud-init's LXD config options - lxd: - init: - network_address: 0.0.0.0 - network_port: 8443 - storage_backend: zfs - storage_pool: datapool - storage_create_loop: 10 - bridge: - mode: new - mtu: 1500 - name: lxdbr0 - ipv4_address: 10.0.8.1 - ipv4_netmask: 24 - ipv4_dhcp_first: 10.0.8.2 - ipv4_dhcp_last: 10.0.8.3 - ipv4_dhcp_leases: 250 - ipv4_nat: true - ipv6_address: fd98:9e0:3744::1 - ipv6_netmask: 64 - ipv6_nat: true - domain: lxd - """ - ), - dedent( - """\ - # For more complex non-iteractive LXD configuration of networks, - # storage_pools, profiles, projects, clusters and core config, - # `lxd:preseed` config will be passed as stdin to the command: - # lxd init --preseed - # See https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#non-interactive-configuration or - # run: lxd init --dump to see viable preseed YAML allowed. - # - # Preseed settings configuring the LXD daemon for HTTPS connections - # on 192.168.1.1 port 9999, a nested profile which allows for - # LXD nesting on containers and a limited project allowing for - # RBAC approach when defining behavior for sub projects. - lxd: - preseed: | - config: - core.https_address: 192.168.1.1:9999 - networks: - - config: - ipv4.address: 10.42.42.1/24 - ipv4.nat: true - ipv6.address: fd42:4242:4242:4242::1/64 - ipv6.nat: true - description: "" - name: lxdbr0 - type: bridge - project: default - storage_pools: - - config: - size: 5GiB - source: /var/snap/lxd/common/lxd/disks/default.img - description: "" - name: default - driver: zfs - profiles: - - config: {} - description: Default LXD profile - devices: - eth0: - name: eth0 - network: lxdbr0 - type: nic - root: - path: / - pool: default - type: disk - name: default - - config: {} - security.nesting: true - devices: - eth0: - name: eth0 - network: lxdbr0 - type: nic - root: - path: / - pool: default - type: disk - name: nested - projects: - - config: - features.images: true - features.networks: true - features.profiles: true - features.storage.volumes: true - description: Default LXD project - name: default - - config: - features.images: false - features.networks: true - features.profiles: false - features.storage.volumes: false - description: Limited Access LXD project - name: limited - - - """ # noqa: E501 - ), - ], + "distros": ["ubuntu"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["lxd"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore -def supplemental_schema_validation( - init_cfg: dict, bridge_cfg: dict, preseed_str: str -): +def supplemental_schema_validation(init_cfg, bridge_cfg, preseed_str): """Validate user-provided lxd network and bridge config option values. @raises: ValueError describing invalid values provided. @@ -511,8 +380,8 @@ def get_required_packages(init_cfg: dict, preseed_str: str) -> List[str]: if preseed_str and "storage_pools" in preseed_str: # Assume correct YAML preseed format try: - preseed_cfg = safeyaml.load(preseed_str) - except (safeyaml.YAMLError, TypeError, ValueError): + preseed_cfg = yaml.safe_load(preseed_str) + except (yaml.YAMLError, TypeError, ValueError): LOG.warning( "lxd.preseed string value is not YAML. " " Unable to determine required storage driver packages to" diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index e5d0145b8..a70bd59d6 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -7,12 +7,11 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" Mcollective: Install, configure and start mcollective""" +"""Mcollective: Install, configure and start mcollective""" import errno import io import logging -from textwrap import dedent # Used since this can maintain comments # and doesn't need a top level section @@ -21,71 +20,20 @@ from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" SERVER_CFG = "/etc/mcollective/server.cfg" -MODULE_DESCRIPTION = """\ -This module installs, configures and starts mcollective. If the ``mcollective`` -key is present in config, then mcollective will be installed and started. - -Configuration for ``mcollective`` can be specified in the ``conf`` key under -``mcollective``. Each config value consists of a key value pair and will be -written to ``/etc/mcollective/server.cfg``. The ``public-cert`` and -``private-cert`` keys, if present in conf may be used to specify the public and -private certificates for mcollective. Their values will be written to -``/etc/mcollective/ssl/server-public.pem`` and -``/etc/mcollective/ssl/server-private.pem``. - -.. note:: - The ec2 metadata service is readable by non-root users. - If security is a concern, use include-once and ssl urls. -""" - -distros = ["all"] - meta: MetaSchema = { "id": "cc_mcollective", - "name": "Mcollective", - "title": "Install, configure and start mcollective", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - dedent( - """\ - # Provide server private and public key and provide the following - # config settings in /etc/mcollective/server.cfg: - # loglevel: debug - # plugin.stomp.host: dbhost - - # WARNING WARNING WARNING - # The ec2 metadata service is a network service, and thus is - # readable by non-root users on the system - # (ie: 'ec2metadata --user-data') - # If you want security for this, please use include-once + SSL urls - mcollective: - conf: - loglevel: debug - plugin.stomp.host: dbhost - public-cert: | - -------BEGIN CERTIFICATE-------- - - -------END CERTIFICATE-------- - private-cert: | - -------BEGIN CERTIFICATE-------- - - -------END CERTIFICATE-------- - """ - ), - ], + "distros": ["all"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["mcollective"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 4cc32be55..0fdcf3c19 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -13,110 +13,19 @@ import os import re from string import whitespace -from textwrap import dedent from cloudinit import subp, type_utils, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module can add or remove mountpoints from ``/etc/fstab`` as well as -configure swap. The ``mounts`` config key takes a list of fstab entries to add. -Each entry is specified as a list of ``[ fs_spec, fs_file, fs_vfstype, -fs_mntops, fs-freq, fs_passno ]``. For more information on these options, -consult the manual for ``/etc/fstab``. When specifying the ``fs_spec``, if the -device name starts with one of ``xvd``, ``sd``, ``hd``, or ``vd``, the leading -``/dev`` may be omitted. - -Any mounts that do not appear to either an attached block device or network -resource will be skipped with a log like "Ignoring nonexistent mount ...". - -Cloud-init will attempt to add the following mount directives if available and -unconfigured in `/etc/fstab`:: - - mounts: - - ["ephemeral0", "/mnt", "auto",\ -"defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"] - - ["swap", "none", "swap", "sw", "0", "0"] - -In order to remove a previously listed mount, an entry can be added to -the `mounts` list containing ``fs_spec`` for the device to be removed but no -mountpoint (i.e. ``[ swap ]`` or ``[ swap, null ]``). - -The ``mount_default_fields`` config key allows default values to be specified -for the fields in a ``mounts`` entry that are not specified, aside from the -``fs_spec`` and the ``fs_file`` fields. If specified, this must be a list -containing 6 values. It defaults to:: - - mount_default_fields: [none, none, "auto",\ -"defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"] - -Non-systemd init systems will vary in ``mount_default_fields``. - -Swap files can be configured by setting the path to the swap file to create -with ``filename``, the size of the swap file with ``size`` maximum size of -the swap file if using an ``size: auto`` with ``maxsize``. By default no -swap file is created. - -.. note:: - If multiple mounts are specified where a subsequent mount's mountpoint is - inside of a previously declared mount's mountpoint (i.e. the 1st mount has - a mountpoint of ``/abc`` and the 2nd mount has a mountpoint of - ``/abc/def``) then this will not work as expected - ``cc_mounts`` first - creates the directories for all the mountpoints **before** it starts to - perform any mounts and so the sub-mountpoint directory will not be created - correctly inside the parent mountpoint. - - For systems using util-linux's ``mount`` program this issue can be - worked around by specifying ``X-mount.mkdir`` as part of a ``fs_mntops`` - value for the subsequent mount entry. -""" - -example = dedent( - """\ - # Mount ephemeral0 with "noexec" flag, /dev/sdc with mount_default_fields, - # and /dev/xvdh with custom fs_passno "0" to avoid fsck on the mount. - # Also provide an automatically sized swap with a max size of 10485760 - # bytes. - mounts: - - [ /dev/ephemeral0, /mnt, auto, "defaults,noexec" ] - - [ sdc, /opt/data ] - - [ xvdh, /opt/data, auto, "defaults,nofail", "0", "0" ] - mount_default_fields: [None, None, auto, "defaults,nofail", "0", "2"] - swap: - filename: /my/swapfile - size: auto - maxsize: 10485760 - """ -) - -distros = ["all"] - meta: MetaSchema = { "id": "cc_mounts", - "name": "Mounts", - "title": "Configure mount points and swap files", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - example, - dedent( - """\ - # Create a 2 GB swap file at /swapfile using human-readable values - swap: - filename: /swapfile - size: 2G - maxsize: 2G - """ - ), - ], + "distros": ["all"], "frequency": PER_INSTANCE, "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" @@ -304,6 +213,10 @@ def create_swap(fname, size, method): "bs=1M", "count=%s" % size, ] + else: + raise subp.ProcessExecutionError( + "Missing dependency: 'dd' and 'fallocate' are not available" + ) try: subp.subp(cmd, capture=True) @@ -427,7 +340,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: uses_systemd = cloud.distro.uses_systemd() if uses_systemd: def_mnt_opts = ( - "defaults,nofail,x-systemd.requires=cloud-init.service,_netdev" + "defaults,nofail,x-systemd.after=cloud-init.service,_netdev" ) defvals = [None, None, "auto", def_mnt_opts, "0", "2"] diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 1f636d09a..3d659525e 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -9,12 +9,11 @@ import copy import logging import os -from textwrap import dedent from cloudinit import subp, temp_utils, templater, type_utils, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -25,6 +24,7 @@ distros = [ "almalinux", "alpine", + "azurelinux", "centos", "cloudlinux", "cos", @@ -109,6 +109,15 @@ "service_name": "ntpd", }, }, + "azurelinux": { + "chrony": { + "service_name": "chronyd", + }, + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", + }, + }, "centos": { "ntp": { "service_name": "ntpd", @@ -224,6 +233,9 @@ for distro in ("opensuse-microos", "opensuse-tumbleweed", "opensuse-leap"): DISTRO_CLIENT_CONFIG[distro] = DISTRO_CLIENT_CONFIG["opensuse"] +for distro in ("almalinux", "cloudlinux"): + DISTRO_CLIENT_CONFIG[distro] = DISTRO_CLIENT_CONFIG["rhel"] + for distro in ("sle_hpc", "sle-micro"): DISTRO_CLIENT_CONFIG[distro] = DISTRO_CLIENT_CONFIG["sles"] @@ -235,77 +247,10 @@ meta: MetaSchema = { "id": "cc_ntp", - "name": "NTP", - "title": "enable and configure ntp", - "description": dedent( - """\ - Handle ntp configuration. If ntp is not installed on the system and - ntp configuration is specified, ntp will be installed. If there is a - default ntp config file in the image or one is present in the - distro's ntp package, it will be copied to a file with ``.dist`` - appended to the filename before any changes are made. A list of ntp - pools and ntp servers can be provided under the ``ntp`` config key. - If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used - in the format ``{0-3}.{distro}.pool.ntp.org``.""" - ), "distros": distros, - "examples": [ - dedent( - """\ - # Override ntp with chrony configuration on Ubuntu - ntp: - enabled: true - ntp_client: chrony # Uses cloud-init default chrony configuration - """ - ), - dedent( - """\ - # Provide a custom ntp client configuration - ntp: - enabled: true - ntp_client: myntpclient - config: - confpath: /etc/myntpclient/myntpclient.conf - check_exe: myntpclientd - packages: - - myntpclient - service_name: myntpclient - template: | - ## template:jinja - # My NTP Client config - {% if pools -%}# pools{% endif %} - {% for pool in pools -%} - pool {{pool}} iburst - {% endfor %} - {%- if servers %}# servers - {% endif %} - {% for server in servers -%} - server {{server}} iburst - {% endfor %} - {% if peers -%}# peers{% endif %} - {% for peer in peers -%} - peer {{peer}} - {% endfor %} - {% if allow -%}# allow{% endif %} - {% for cidr in allow -%} - allow {{cidr}} - {% endfor %} - pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] - servers: - - ntp.server.local - - ntp.ubuntu.com - - 192.168.23.2 - allow: - - 192.168.23.0/32 - peers: - - km001 - - km002""" - ), - ], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["ntp"], -} -__doc__ = get_meta_doc(meta) +} # type: ignore REQUIRED_NTP_CONFIG_KEYS = frozenset( diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 42d8c0043..4edba27e7 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -9,12 +9,11 @@ import logging import os import time -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.log import flush_loggers from cloudinit.settings import PER_INSTANCE @@ -22,40 +21,10 @@ REBOOT_FILES = ("/var/run/reboot-required", "/run/reboot-needed") REBOOT_CMD = ["/sbin/reboot"] -MODULE_DESCRIPTION = """\ -This module allows packages to be updated, upgraded or installed during boot. -If any packages are to be installed or an upgrade is to be performed then the -package cache will be updated first. If a package installation or upgrade -requires a reboot, then a reboot can be performed if -``package_reboot_if_required`` is specified. -""" - meta: MetaSchema = { "id": "cc_package_update_upgrade_install", - "name": "Package Update Upgrade Install", - "title": "Update, upgrade, and install packages", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - packages: - - pwgen - - pastebinit - - [libpython3.8, 3.8.10-0ubuntu1~20.04.2] - - snap: - - certbot - - [juju, --edge] - - [lxd, --channel=5.15/stable] - - apt: - - mg - package_update: true - package_upgrade: true - package_reboot_if_required: true - """ - ) - ], "activate_by_schema_keys": [ "apt_update", "package_update", @@ -63,9 +32,8 @@ "package_upgrade", "packages", ], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) @@ -76,17 +44,20 @@ def _multi_cfg_bool_get(cfg, *keys): return False -def _fire_reboot(wait_attempts=6, initial_sleep=1, backoff=2): +def _fire_reboot( + wait_attempts: int = 6, initial_sleep: int = 1, backoff: int = 2 +): + """Run a reboot command and panic if it doesn't happen fast enough.""" subp.subp(REBOOT_CMD) - start = time.time() + start = time.monotonic() wait_time = initial_sleep for _i in range(wait_attempts): time.sleep(wait_time) wait_time *= backoff - elapsed = time.time() - start + elapsed = time.monotonic() - start LOG.debug("Rebooted, but still running after %s seconds", int(elapsed)) # If we got here, not good - elapsed = time.time() - start + elapsed = time.monotonic() - start raise RuntimeError( "Reboot did not happen after %s seconds!" % (int(elapsed)) ) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 600bab081..50cecc03b 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -9,17 +9,14 @@ """Phone Home: Post data to url""" import logging -from textwrap import dedent from cloudinit import templater, url_helper, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -frequency = PER_INSTANCE - POST_LIST_ALL = [ "pub_key_rsa", "pub_key_ecdsa", @@ -29,69 +26,13 @@ "fqdn", ] -MODULE_DESCRIPTION = """\ -This module can be used to post data to a remote host after boot is complete. -If the post url contains the string ``$INSTANCE_ID`` it will be replaced with -the id of the current instance. Either all data can be posted or a list of -keys to post. Available keys are: - - - ``pub_key_rsa`` - - ``pub_key_ecdsa`` - - ``pub_key_ed25519`` - - ``instance_id`` - - ``hostname`` - - ``fdqn`` - -Data is sent as ``x-www-form-urlencoded`` arguments. - -**Example HTTP POST**: - -.. code-block:: http - - POST / HTTP/1.1 - Content-Length: 1337 - User-Agent: Cloud-Init/21.4 - Accept-Encoding: gzip, deflate - Accept: */* - Content-Type: application/x-www-form-urlencoded - - pub_key_rsa=rsa_contents&pub_key_ecdsa=ecdsa_contents&pub_key_ed25519=ed25519_contents&instance_id=i-87018aed&hostname=myhost&fqdn=myhost.internal -""" - meta: MetaSchema = { "id": "cc_phone_home", - "name": "Phone Home", - "title": "Post data to url", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - phone_home: - url: http://example.com/$INSTANCE_ID/ - post: all - """ - ), - dedent( - """\ - phone_home: - url: http://example.com/$INSTANCE_ID/ - post: - - pub_key_rsa - - pub_key_ecdsa - - pub_key_ed25519 - - instance_id - - hostname - - fqdn - tries: 5 - """ - ), - ], "activate_by_schema_keys": ["phone_home"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) # phone_home: # url: http://my.foo.bar/$INSTANCE/ diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index dd8bf8aec..5bd6b96d8 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -12,12 +12,11 @@ import re import subprocess import time -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE @@ -25,60 +24,13 @@ EXIT_FAIL = 254 -MODULE_DESCRIPTION = """\ -This module handles shutdown/reboot after all config modules have been run. By -default it will take no action, and the system will keep running unless a -package installation/upgrade requires a system reboot (e.g. installing a new -kernel) and ``package_reboot_if_required`` is true. - -Using this module ensures that cloud-init is entirely finished with -modules that would be executed. - -An example to distinguish delay from timeout: - -If you delay 5 (5 minutes) and have a timeout of -120 (2 minutes), then the max time until shutdown will be 7 minutes, though -it could be as soon as 5 minutes. Cloud-init will invoke 'shutdown +5' after -the process finishes, or when 'timeout' seconds have elapsed. - -.. note:: - With Alpine Linux any message value specified is ignored as Alpine's halt, - poweroff, and reboot commands do not support broadcasting a message. - -""" - meta: MetaSchema = { "id": "cc_power_state_change", - "name": "Power State Change", - "title": "Change power state", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - power_state: - delay: now - mode: poweroff - message: Powering off - timeout: 2 - condition: true - """ - ), - dedent( - """\ - power_state: - delay: 30 - mode: reboot - message: Rebooting machine - condition: test -f /var/tmp/reboot_me - """ - ), - ], "activate_by_schema_keys": ["power_state"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) @@ -126,8 +78,8 @@ def check_condition(cond): def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: try: - (args, timeout, condition) = load_power_state(cfg, cloud.distro) - if args is None: + (arg_list, timeout, condition) = load_power_state(cfg, cloud.distro) + if arg_list is None: LOG.debug("no power_state provided. doing nothing") return except Exception as e: @@ -147,7 +99,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: devnull_fp = open(os.devnull, "w") - LOG.debug("After pid %s ends, will execute: %s", mypid, " ".join(args)) + LOG.debug("After pid %s ends, will execute: %s", mypid, " ".join(arg_list)) util.fork_cb( run_after_pid_gone, @@ -156,7 +108,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: timeout, condition, execmd, - [args, devnull_fp], + [arg_list, devnull_fp], ) @@ -205,7 +157,7 @@ def doexit(sysexit): def execmd(exe_args, output=None, data_in=None): ret = 1 try: - proc = subprocess.Popen( + proc = subprocess.Popen( # nosec B603 exe_args, stdin=subprocess.PIPE, stdout=output, @@ -223,7 +175,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, condition, func, args): # is no longer alive. After it is gone, or timeout has passed # execute func(args) msg = None - end_time = time.time() + timeout + end_time = time.monotonic() + timeout def fatal(msg): LOG.warning(msg) @@ -232,7 +184,7 @@ def fatal(msg): known_errnos = (errno.ENOENT, errno.ESRCH) while True: - if time.time() > end_time: + if time.monotonic() > end_time: msg = "timeout reached before %s ended" % pid break diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index ec09e1b8c..13cb49efc 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -13,7 +13,6 @@ import socket from contextlib import suppress from io import StringIO -from textwrap import dedent from typing import List, Union import yaml @@ -21,7 +20,7 @@ from cloudinit import helpers, subp, temp_utils, url_helper, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, Distro, PackageInstallerError from cloudinit.settings import PER_INSTANCE @@ -29,85 +28,12 @@ PUPPET_AGENT_DEFAULT_ARGS = ["--test"] PUPPET_PACKAGE_NAMES = ("puppet-agent", "puppet") -MODULE_DESCRIPTION = """\ -This module handles puppet installation and configuration. If the ``puppet`` -key does not exist in global configuration, no action will be taken. If a -config entry for ``puppet`` is present, then by default the latest version of -puppet will be installed. If the ``puppet`` config key exists in the config -archive, this module will attempt to start puppet even if no installation was -performed. - -The module also provides keys for configuring the new puppet 4 paths and -installing the puppet package from the puppetlabs repositories: -https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html -The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and -``csr_attributes_path``. If unset, their values will default to -ones that work with puppet 3.x and with distributions that ship modified -puppet 4.x that uses the old paths. -""" - meta: MetaSchema = { "id": "cc_puppet", - "name": "Puppet", - "title": "Install, configure and start puppet", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - puppet: - install: true - version: "7.7.0" - install_type: "aio" - collection: "puppet7" - aio_install_url: 'https://git.io/JBhoQ' - cleanup: true - conf_file: "/etc/puppet/puppet.conf" - ssl_dir: "/var/lib/puppet/ssl" - csr_attributes_path: "/etc/puppet/csr_attributes.yaml" - exec: true - exec_args: ['--test'] - conf: - agent: - server: "puppetserver.example.org" - certname: "%i.%f" - ca_cert: | - -----BEGIN CERTIFICATE----- - MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe - Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf - MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc - b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu - 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA - qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv - T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd - BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG - SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf - +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb - hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d - -----END CERTIFICATE----- - csr_attributes: - custom_attributes: - 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 - extension_requests: - pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E - pp_image_name: my_ami_image - pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 - """ # noqa: E501 - ), - dedent( - """\ - puppet: - install_type: "packages" - package_name: "puppet" - exec: false - """ - ), - ], "activate_by_schema_keys": ["puppet"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 394579467..77a2a26a7 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,13 +13,12 @@ import os import re import stat -from textwrap import dedent from typing import Optional from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_ALWAYS @@ -27,30 +26,10 @@ meta: MetaSchema = { "id": "cc_resizefs", - "name": "Resizefs", - "title": "Resize filesystem", - "description": dedent( - """\ - Resize a filesystem to use all available space on partition. This - module is useful along with ``cc_growpart`` and will ensure that if the - root partition has been resized the root filesystem will be resized - along with it. By default, ``cc_resizefs`` will resize the root - partition and will block the boot process while the resize command is - running. Optionally, the resize operation can be performed in the - background while cloud-init continues running modules. This can be - enabled by setting ``resize_rootfs`` to ``noblock``. This module can be - disabled altogether by setting ``resize_rootfs`` to ``false``.""" - ), "distros": [ALL_DISTROS], - "examples": [ - "resize_rootfs: false # disable root filesystem resize operation", - "resize_rootfs: noblock # runs resize operation in the background", - ], "frequency": PER_ALWAYS, "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) @@ -193,7 +172,7 @@ def maybe_get_writable_device_path(devpath, info): @param info: String representing information about the requested device. @param log: Logger to which logs will be added upon error. - @returns devpath or updated devpath per kernel commandline if the device + @returns devpath or updated devpath per kernel command line if the device path is a writable block device, returns None otherwise. """ container = util.is_container() diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index aa88919cc..408bbbca8 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -9,12 +9,11 @@ """Resolv Conf: configure resolv.conf""" import logging -from textwrap import dedent from cloudinit import templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -24,40 +23,11 @@ "/etc/systemd/resolved.conf": "systemd.resolved.conf", } -MODULE_DESCRIPTION = """\ -Unless manually editing :file:`/etc/resolv.conf` is the correct way to manage -nameserver information on your operating system, you do not want to use -this module. Many distros have moved away from manually editing ``resolv.conf`` -so please verify that this is the preferred nameserver management method for -your distro before using this module. - -Note that using :ref:`network_config` is preferred, rather than using this -module, when possible. - -This module is intended to manage resolv.conf in environments where early -configuration of resolv.conf is necessary for further bootstrapping and/or -where configuration management such as puppet or chef own DNS configuration. - -When using a :ref:`datasource_config_drive` and a RHEL-like system, -resolv.conf will also be managed automatically due to the available -information provided for DNS servers in the :ref:`network_config_v2` format. -For those that wish to have different settings, use this module. - -In order for the ``resolv_conf`` section to be applied, ``manage_resolv_conf`` -must be set ``true``. - -.. note:: - For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP - enabled NICs. -""" - meta: MetaSchema = { "id": "cc_resolv_conf", - "name": "Resolv Conf", - "title": "Configure resolv.conf", - "description": MODULE_DESCRIPTION, "distros": [ "alpine", + "azurelinux", "fedora", "mariner", "opensuse", @@ -72,31 +42,8 @@ "openeuler", ], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - manage_resolv_conf: true - resolv_conf: - nameservers: - - 8.8.8.8 - - 8.8.4.4 - searchdomains: - - foo.example.com - - bar.example.com - domain: example.com - sortlist: - - 10.0.0.1/255 - - 10.0.0.2 - options: - rotate: true - timeout: 1 - """ - ) - ], "activate_by_schema_keys": ["manage_resolv_conf"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def generate_resolv_conf(template_fn, params, target_fname): diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 95bc2b2e4..b21aa97a5 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -6,77 +6,21 @@ """Red Hat Subscription: Register Red Hat Enterprise Linux based system""" import logging -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -MODULE_DESCRIPTION = """\ -Register a Red Hat system either by username and password *or* activation and -org. Following a successful registration, you can: - - - auto-attach subscriptions - - set the service level - - add subscriptions based on pool id - - enable/disable yum repositories based on repo id - - alter the rhsm_baseurl and server-hostname in ``/etc/rhsm/rhs.conf``. -""" - meta: MetaSchema = { "id": "cc_rh_subscription", - "name": "Red Hat Subscription", - "title": "Register Red Hat Enterprise Linux based system", - "description": MODULE_DESCRIPTION, "distros": ["fedora", "rhel", "openeuler"], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - rh_subscription: - username: joe@foo.bar - ## Quote your password if it has symbols to be safe - password: '1234abcd' - """ - ), - dedent( - """\ - rh_subscription: - activation-key: foobar - org: 12345 - """ - ), - dedent( - """\ - rh_subscription: - activation-key: foobar - org: 12345 - auto-attach: true - service-level: self-support - add-pool: - - 1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a - - 2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b - enable-repo: - - repo-id-to-enable - - other-repo-id-to-enable - disable-repo: - - repo-id-to-disable - - other-repo-id-to-disable - # Alter the baseurl in /etc/rhsm/rhsm.conf - rhsm-baseurl: http://url - # Alter the server hostname in /etc/rhsm/rhsm.conf - server-hostname: foo.bar.com - """ - ), - ], "activate_by_schema_keys": ["rh_subscription"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index a04595bb9..3edf9972b 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -20,31 +20,12 @@ from cloudinit import log, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, Distro from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module configures remote system logging using rsyslog. - -Configuration for remote servers can be specified in ``configs``, but for -convenience it can be specified as key value pairs in ``remotes``. - -This module can install rsyslog if not already present on the system using the -``install_rsyslog``, ``packages``, and ``check_exe`` options. Installation -may not work on systems where this module runs before networking is up. - -.. note:: - On BSD cloud-init will attempt to disable and stop the base system syslogd. - This may fail on a first run. - We recommend creating images with ``service syslogd disable``. -""" - meta: MetaSchema = { "id": "cc_rsyslog", - "name": "Rsyslog", - "title": "Configure system logging via rsyslog", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, "examples": [ @@ -86,9 +67,7 @@ ), ], "activate_by_schema_keys": ["rsyslog"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore RSYSLOG_CONFIG = { "config_dir": "/etc/rsyslog.d", diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 3ea1e71b4..2212fd36a 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -10,12 +10,11 @@ import logging import os -from textwrap import dedent from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE @@ -25,55 +24,12 @@ # configuration options before actually attempting to deploy with said # configuration. - -MODULE_DESCRIPTION = """\ -Run arbitrary commands at a rc.local like time-frame with output to the -console. Each item can be either a list or a string. The item type affects -how it is executed: - - -* If the item is a string, it will be interpreted by ``sh``. -* If the item is a list, the items will be executed as if passed to execve(3) - (with the first arg as the command). - -Note that the ``runcmd`` module only writes the script to be run -later. The module that actually runs the script is ``scripts_user`` -in the :ref:`Final` boot stage. - -.. note:: - - all commands must be proper yaml, so you have to quote any characters - yaml would eat (':' can be problematic) - -.. note:: - - when writing files, do not use /tmp dir as it races with - systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. -""" - meta: MetaSchema = { "id": "cc_runcmd", - "name": "Runcmd", - "title": "Run arbitrary commands", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - runcmd: - - [ ls, -l, / ] - - [ sh, -xc, "echo $(date) ': hello world!'" ] - - [ sh, -c, echo "=========hello world'=========" ] - - ls -l /root - - [ wget, "http://example.org", -O, /tmp/index.html ] - """ - ) - ], "activate_by_schema_keys": ["runcmd"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 5515a68cc..2bfb94f73 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -6,70 +6,21 @@ import logging import os -from textwrap import dedent from cloudinit import safeyaml, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module installs, configures and starts salt minion. If the ``salt_minion`` -key is present in the config parts, then salt minion will be installed and -started. Configuration for salt minion can be specified in the ``conf`` key -under ``salt_minion``. Any conf values present there will be assigned in -``/etc/salt/minion``. The public and private keys to use for salt minion can be -specified with ``public_key`` and ``private_key`` respectively. Optionally if -you have a custom package name, service name or config directory you can -specify them with ``pkg_name``, ``service_name`` and ``config_dir``. - -Salt keys can be manually generated by: ``salt-key --gen-keys=GEN_KEYS``, -where ``GEN_KEYS`` is the name of the keypair, e.g. 'minion'. The keypair -will be copied to ``/etc/salt/pki`` on the minion instance. -""" - meta: MetaSchema = { "id": "cc_salt_minion", - "name": "Salt Minion", - "title": "Setup and run salt minion", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - salt_minion: - pkg_name: salt-minion - service_name: salt-minion - config_dir: /etc/salt - conf: - file_client: local - fileserver_backend: - - gitfs - gitfs_remotes: - - https://github.com/_user_/_repo_.git - master: salt.example.com - grains: - role: - - web - public_key: | - ------BEGIN PUBLIC KEY------- - - ------END PUBLIC KEY------- - private_key: | - ------BEGIN PRIVATE KEY------ - - ------END PRIVATE KEY------- - pki_dir: /etc/salt/pki/minion - """ - ) - ], "activate_by_schema_keys": ["salt_minion"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) # Note: see https://docs.saltstack.com/en/latest/topics/installation/ diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py index 73ddc231e..fa78b8a48 100644 --- a/cloudinit/config/cc_scripts_per_boot.py +++ b/cloudinit/config/cc_scripts_per_boot.py @@ -13,30 +13,19 @@ from cloudinit import subp from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -MODULE_DESCRIPTION = """\ -Any scripts in the ``scripts/per-boot`` directory on the datasource will be run -every time the system boots. Scripts will be run in alphabetical order. This -module does not accept any config keys. -""" - meta: MetaSchema = { "id": "cc_scripts_per_boot", - "name": "Scripts Per Boot", - "title": "Run per boot scripts", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": frequency, - "examples": [], "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) SCRIPT_SUBDIR = "per-boot" diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py index c4e10dc66..c5c52b97c 100644 --- a/cloudinit/config/cc_scripts_per_instance.py +++ b/cloudinit/config/cc_scripts_per_instance.py @@ -13,31 +13,17 @@ from cloudinit import subp from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -Any scripts in the ``scripts/per-instance`` directory on the datasource will -be run when a new instance is first booted. Scripts will be run in alphabetical -order. This module does not accept any config keys. - -Some cloud platforms change instance-id if a significant change was made to -the system. As a result per-instance scripts will run again. -""" - meta: MetaSchema = { "id": "cc_scripts_per_instance", - "name": "Scripts Per Instance", - "title": "Run per instance scripts", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [], "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) SCRIPT_SUBDIR = "per-instance" diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py index 82c6f9c1d..4d9ac583c 100644 --- a/cloudinit/config/cc_scripts_per_once.py +++ b/cloudinit/config/cc_scripts_per_once.py @@ -13,30 +13,17 @@ from cloudinit import subp from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_ONCE -frequency = PER_ONCE -MODULE_DESCRIPTION = """\ -Any scripts in the ``scripts/per-once`` directory on the datasource will be run -only once. Changes to the instance will not force a re-run. The only way to -re-run these scripts is to run the clean subcommand and reboot. Scripts will -be run in alphabetical order. This module does not accept any config keys. -""" - meta: MetaSchema = { "id": "cc_scripts_per_once", - "name": "Scripts Per Once", - "title": "Run one time scripts", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], - "frequency": frequency, - "examples": [], + "frequency": PER_ONCE, "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) SCRIPT_SUBDIR = "per-once" diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py index 9c39b75b2..eb63f7566 100644 --- a/cloudinit/config/cc_scripts_user.py +++ b/cloudinit/config/cc_scripts_user.py @@ -13,34 +13,19 @@ from cloudinit import subp from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module runs all user scripts. User scripts are not specified in the -``scripts`` directory in the datasource, but rather are present in the -``scripts`` dir in the instance configuration. Any cloud-config parts with a -``#!`` will be treated as a script and run. Scripts specified as cloud-config -parts will be run in the order they are specified in the configuration. -This module does not accept any config keys. -""" - meta: MetaSchema = { "id": "cc_scripts_user", - "name": "Scripts User", - "title": "Run user scripts", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [], "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) - SCRIPT_SUBDIR = "scripts" diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py index fb8dddad4..a102c794a 100644 --- a/cloudinit/config/cc_scripts_vendor.py +++ b/cloudinit/config/cc_scripts_vendor.py @@ -7,61 +7,23 @@ import logging import os -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -On select Datasources, vendors have a channel for the consumption -of all supported user data types via a special channel called -vendor data. Any scripts in the ``scripts/vendor`` directory in the datasource -will be run when a new instance is first booted. Scripts will be run in -alphabetical order. This module allows control over the execution of -vendor data. -""" - meta: MetaSchema = { "id": "cc_scripts_vendor", - "name": "Scripts Vendor", - "title": "Run vendor scripts", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - vendor_data: - enabled: true - prefix: /usr/bin/ltrace - """ - ), - dedent( - """\ - vendor_data: - enabled: true - prefix: [timeout, 30] - """ - ), - dedent( - """\ - # Vendor data will not be processed - vendor_data: - enabled: false - """ - ), - ], "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) - SCRIPT_SUBDIR = "vendor" diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 3e8b8267b..edac6dbb9 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -11,69 +11,22 @@ import base64 import logging from io import BytesIO -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -MODULE_DESCRIPTION = """\ -All cloud instances started from the same image will produce very similar -data when they are first booted as they are all starting with the same seed -for the kernel's entropy keyring. To avoid this, random seed data can be -provided to the instance either as a string or by specifying a command to run -to generate the data. - -Configuration for this module is under the ``random_seed`` config key. If -the cloud provides its own random seed data, it will be appended to ``data`` -before it is written to ``file``. - -If the ``command`` key is specified, the given command will be executed. This -will happen after ``file`` has been populated. That command's environment will -contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is -specified that cannot be run, no error will be reported unless -``command_required`` is set to true. -""" - meta: MetaSchema = { "id": "cc_seed_random", - "name": "Seed Random", - "title": "Provide random seed data", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - random_seed: - file: /dev/urandom - data: my random string - encoding: raw - command: ['sh', '-c', 'dd if=/dev/urandom of=$RANDOM_SEED_FILE'] - command_required: true - """ - ), - dedent( - """\ - # To use 'pollinate' to gather data from a remote entropy - # server and write it to '/dev/urandom', the following - # could be used: - random_seed: - file: /dev/urandom - command: ["pollinate", "--server=http://local.polinate.server"] - command_required: true - """ - ), - ], "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def _decode(data, encoding=None): diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index ea898a907..d2c73322f 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -9,74 +9,22 @@ import logging import os -from textwrap import dedent from cloudinit import util from cloudinit.atomic_helper import write_json from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -frequency = PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module handles setting the system hostname and fully qualified domain -name (FQDN). If ``preserve_hostname`` is set, then the hostname will not be -altered. - -A hostname and FQDN can be provided by specifying a full domain name under the -``FQDN`` key. Alternatively, a hostname can be specified using the ``hostname`` -key, and the FQDN of the cloud will be used. If a FQDN specified with the -``hostname`` key, it will be handled properly, although it is better to use -the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, -the ``prefer_fqdn_over_hostname`` will force the use of FQDN in all distros -when true, and when false it will force the short hostname. Otherwise, the -hostname to use is distro-dependent. - -.. note:: - We strip a trailing . from the FQDN, if it is present. This causes problems - with a lot of tools, if it is left in place. - -This module will run in the init-local stage before networking is configured -if the hostname is set by metadata or user data on the local system. - -This will occur on datasources like nocloud and ovf where metadata and user -data are available locally. This ensures that the desired hostname is applied -before any DHCP requests are performed on these platforms where dynamic DNS is -based on initial hostname. -""" - meta: MetaSchema = { "id": "cc_set_hostname", - "name": "Set Hostname", - "title": "Set hostname and FQDN", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], - "frequency": frequency, - "examples": [ - "preserve_hostname: true", - dedent( - """\ - hostname: myhost - create_hostname_file: true - fqdn: myhost.example.com - prefer_fqdn_over_hostname: true - """ - ), - dedent( - """\ - # On a machine without an ``/etc/hostname`` file, don't create it - # In most clouds, this will result in a DHCP-configured hostname - # provided by the cloud - create_hostname_file: false - """ - ), - ], + "frequency": PER_INSTANCE, "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 24d8267ad..21408105c 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -10,82 +10,22 @@ import logging import re from string import ascii_letters, digits -from textwrap import dedent from typing import List from cloudinit import features, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, Distro, ug_util from cloudinit.settings import PER_INSTANCE from cloudinit.ssh_util import update_ssh_config -MODULE_DESCRIPTION = """\ -This module consumes three top-level config keys: ``ssh_pwauth``, ``chpasswd`` -and ``password``. - -The ``ssh_pwauth`` config key determines whether or not sshd will be configured -to accept password authentication. - -The ``chpasswd`` config key accepts a dictionary containing either or both of -``users`` and ``expire``. The ``users`` key is used to assign a password to a -corresponding pre-existing user. The ``expire`` key is used to set -whether to expire all user passwords specified by this module, -such that a password will need to be reset on the user's next login. - -.. note:: - Prior to cloud-init 22.3, the ``expire`` key only applies to plain text - (including ``RANDOM``) passwords. Post 22.3, the ``expire`` key applies to - both plain text and hashed passwords. - -``password`` config key is used to set the default user's password. It is -ignored if the ``chpasswd`` ``users`` is used. Note: the ``list`` keyword is -deprecated in favor of ``users``. -""" - meta: MetaSchema = { "id": "cc_set_passwords", - "name": "Set Passwords", - "title": "Set user passwords and enable/disable SSH password auth", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - # Set a default password that would need to be changed - # at first login - ssh_pwauth: true - password: password1 - """ - ), - dedent( - """\ - # Disable ssh password authentication - # Don't require users to change their passwords on next login - # Set the password for user1 to be 'password1' (OS does hashing) - # Set the password for user2 to a pre-hashed password - # Set the password for user3 to be a randomly generated password, - # which will be written to the system console - ssh_pwauth: false - chpasswd: - expire: false - users: - - name: user1 - password: password1 - type: text - - name: user2 - password: $6$rounds=4096$5DJ8a9WMTEzIo5J4$Yms6imfeBvf3Yfu84mQBerh18l7OR1Wm1BJXZqFSpJ6BVas0AYJqIjP7czkOaAZHZi1kxQ5Y1IhgWN8K9NgxR1 - - name: user3 - type: RANDOM - """ # noqa - ), - ], "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index bfeeecf02..13f566fca 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -6,117 +6,28 @@ import logging import os -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE from cloudinit.subp import prepend_base_command -distros = ["ubuntu"] -frequency = PER_INSTANCE - LOG = logging.getLogger(__name__) meta: MetaSchema = { "id": "cc_snap", - "name": "Snap", - "title": "Install, configure and manage snapd and snap packages", - "description": dedent( - """\ - This module provides a simple configuration namespace in cloud-init to - both setup snapd and install snaps. - - .. note:: - Both ``assertions`` and ``commands`` values can be either a - dictionary or a list. If these configs are provided as a - dictionary, the keys are only used to order the execution of the - assertions or commands and the dictionary is merged with any - vendor-data snap configuration provided. If a list is provided by - the user instead of a dict, any vendor-data snap configuration is - ignored. - - The ``assertions`` configuration option is a dictionary or list of - properly-signed snap assertions which will run before any snap - ``commands``. They will be added to snapd's assertion database by - invoking ``snap ack ``. - - Snap ``commands`` is a dictionary or list of individual snap - commands to run on the target system. These commands can be used to - create snap users, install snaps and provide snap configuration. - - .. note:: - If 'side-loading' private/unpublished snaps on an instance, it is - best to create a snap seed directory and seed.yaml manifest in - **/var/lib/snapd/seed/** which snapd automatically installs on - startup. - """ - ), - "distros": distros, - "examples": [ - dedent( - """\ - snap: - assertions: - 00: | - signed_assertion_blob_here - 02: | - signed_assertion_blob_here - commands: - 00: snap create-user --sudoer --known @mydomain.com - 01: snap install canonical-livepatch - 02: canonical-livepatch enable - """ - ), - dedent( - """\ - # Convenience: the snap command can be omitted when specifying commands - # as a list and 'snap' will automatically be prepended. - # The following commands are equivalent: - snap: - commands: - 00: ['install', 'vlc'] - 01: ['snap', 'install', 'vlc'] - 02: snap install vlc - 03: 'snap install vlc' - """ - ), - dedent( - """\ - # You can use a list of commands - snap: - commands: - - ['install', 'vlc'] - - ['snap', 'install', 'vlc'] - - snap install vlc - - 'snap install vlc' - """ - ), - dedent( - """\ - # You can use a list of assertions - snap: - assertions: - - signed_assertion_blob_here - - | - signed_assertion_blob_here - """ - ), - ], + "distros": ["ubuntu"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["snap"], -} - - -__doc__ = get_meta_doc(meta) +} # type: ignore SNAP_CMD = "snap" def add_assertions(assertions, assertions_file): - """Import list of assertions. + r"""Import list of assertions. Import assertions by concatenating each assertion into a string separated by a '\n'. Write this string to a instance file and diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py index 08514f252..6b364aa93 100644 --- a/cloudinit/config/cc_spacewalk.py +++ b/cloudinit/config/cc_spacewalk.py @@ -2,48 +2,22 @@ """Spacewalk: Install and configure spacewalk""" import logging -from textwrap import dedent from cloudinit import subp from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module installs spacewalk and applies basic configuration. If the -``spacewalk`` config key is present spacewalk will be installed. The server to -connect to after installation must be provided in the ``server`` in spacewalk -configuration. A proxy to connect through and a activation key may optionally -be specified. - -For more information about spacewalk see: https://fedorahosted.org/spacewalk/ -""" - meta: MetaSchema = { "id": "cc_spacewalk", - "name": "Spacewalk", - "title": "Install and configure spacewalk", - "description": MODULE_DESCRIPTION, "distros": ["rhel", "fedora", "openeuler"], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - spacewalk: - server: - proxy: - activation_key: - """ - ) - ], "activate_by_schema_keys": ["spacewalk"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) - distros = ["redhat", "fedora"] required_packages = ["rhn-setup"] def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT" diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index f69e49c16..00687cf86 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -12,150 +12,27 @@ import os import re import sys -from textwrap import dedent from typing import List, Optional, Sequence from cloudinit import ssh_util, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, ug_util from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module handles most configuration for SSH and both host and authorized SSH -keys. - -**Authorized keys** - -Authorized keys are a list of public SSH keys that are allowed to connect to -a user account on a system. They are stored in `.ssh/authorized_keys` in that -account's home directory. Authorized keys for the default user defined in -``users`` can be specified using ``ssh_authorized_keys``. Keys -should be specified as a list of public keys. - -.. note:: - See the ``cc_set_passwords`` module documentation to enable/disable SSH - password authentication. - -Root login can be enabled/disabled using the ``disable_root`` config key. Root -login options can be manually specified with ``disable_root_opts``. - -Supported public key types for the ``ssh_authorized_keys`` are: - - - rsa - - ecdsa - - ed25519 - - ecdsa-sha2-nistp256-cert-v01@openssh.com - - ecdsa-sha2-nistp256 - - ecdsa-sha2-nistp384-cert-v01@openssh.com - - ecdsa-sha2-nistp384 - - ecdsa-sha2-nistp521-cert-v01@openssh.com - - ecdsa-sha2-nistp521 - - sk-ecdsa-sha2-nistp256-cert-v01@openssh.com - - sk-ecdsa-sha2-nistp256@openssh.com - - sk-ssh-ed25519-cert-v01@openssh.com - - sk-ssh-ed25519@openssh.com - - ssh-ed25519-cert-v01@openssh.com - - ssh-ed25519 - - ssh-rsa-cert-v01@openssh.com - - ssh-rsa - - ssh-xmss-cert-v01@openssh.com - - ssh-xmss@openssh.com - -.. note:: - this list has been filtered out from the supported keytypes of - `OpenSSH`_ source, where the sigonly keys are removed. Please see - ``ssh_util`` for more information. - - ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy, - as they are valid public keys in some old distros. They can possibly - be removed in the future when support for the older distros are dropped - -.. _OpenSSH: https://github.com/openssh/openssh-portable/blob/master/sshkey.c - -**Host keys** - -Host keys are for authenticating a specific instance. Many images have default -host SSH keys, which can be removed using ``ssh_deletekeys``. - -Host keys can be added using the ``ssh_keys`` configuration key. - -When host keys are generated the output of the ssh-keygen command(s) can be -displayed on the console using the ``ssh_quiet_keygen`` configuration key. - -.. note:: - When specifying private host keys in cloud-config, care should be taken to - ensure that the communication between the data source and the instance is - secure. - - -If no host keys are specified using ``ssh_keys``, then keys will be generated -using ``ssh-keygen``. By default one public/private pair of each supported -host key type will be generated. The key types to generate can be specified -using the ``ssh_genkeytypes`` config flag, which accepts a list of host key -types to use. For each host key type for which this module has been instructed -to create a keypair, if a key of the same type is already present on the -system (i.e. if ``ssh_deletekeys`` was false), no key will be generated. - -Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` -config flags are: - - - ecdsa - - ed25519 - - rsa - -Unsupported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` -config flags are: - - - ecdsa-sk - - ed25519-sk -""" - # Note: We do not support *-sk key types because: # 1) In the autogeneration case user interaction with the device is needed # which does not fit with a cloud-context. # 2) This type of keys are user-based, not hostkeys. - meta: MetaSchema = { "id": "cc_ssh", - "name": "SSH", - "title": "Configure SSH and SSH keys", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - ssh_keys: - rsa_private: | - -----BEGIN RSA PRIVATE KEY----- - MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco - ... - -----END RSA PRIVATE KEY----- - rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ... - rsa_certificate: | - ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ... - ssh_authorized_keys: - - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... - - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... - ssh_deletekeys: true - ssh_genkeytypes: [rsa, ecdsa, ed25519] - disable_root: true - disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding - allow_public_ssh_keys: true - ssh_quiet_keygen: true - ssh_publish_hostkeys: - enabled: true - blacklist: [rsa] - """ # noqa: E501 - ) - ], "activate_by_schema_keys": [], -} +} # type:ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) GENERATE_KEY_NAMES = ["rsa", "ecdsa", "ed25519"] @@ -184,6 +61,42 @@ KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"' +def set_redhat_keyfile_perms(keyfile: str) -> None: + """ + For fedora 37, centos 9 stream and below: + - sshd version is earlier than version 9. + - 'ssh_keys' group is present and owns the private keys. + - private keys have permission 0o640. + For fedora 38, centos 10 stream and above: + - ssh version is atleast version 9. + - 'ssh_keys' group is absent. 'root' group owns the keys. + - private keys have permission 0o600, same as upstream. + Public keys in all cases have permission 0o644. + """ + permissions_public = 0o644 + ssh_version = ssh_util.get_opensshd_upstream_version() + if ssh_version and ssh_version < util.Version(9, 0): + # fedora 37, centos 9 stream and below has sshd + # versions less than 9 and private key permissions are + # set to 0o640 from sshd-keygen. + # See sanitize permissions" section in sshd-keygen. + permissions_private = 0o640 + else: + # fedora 38, centos 10 stream and above. sshd-keygen sets + # private key persmissions to 0o600. + permissions_private = 0o600 + + gid = util.get_group_id("ssh_keys") + if gid != -1: + # 'ssh_keys' group exists for fedora 37, centos 9 stream + # and below. On these distros, 'ssh_keys' group own the private + # keys. When 'ssh_keys' group is absent for newer distros, + # 'root' group owns the private keys which is the default. + os.chown(keyfile, -1, gid) + os.chmod(keyfile, permissions_private) + os.chmod(f"{keyfile}.pub", permissions_public) + + def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: # remove the static keys from the pristine image @@ -280,16 +193,8 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: ): sys.stdout.write(util.decode_binary(out)) - gid = util.get_group_id("ssh_keys") - if gid != -1: - # perform same "sanitize permissions" as sshd-keygen - permissions_private = 0o600 - ssh_version = ssh_util.get_opensshd_upstream_version() - if ssh_version and ssh_version < util.Version(9, 0): - permissions_private = 0o640 - os.chown(keyfile, -1, gid) - os.chmod(keyfile, permissions_private) - os.chmod(f"{keyfile}.pub", 0o644) + if cloud.distro.osfamily == "redhat": + set_redhat_keyfile_perms(keyfile) except subp.ProcessExecutionError as e: err = util.decode_binary(e.stderr).lower() if e.exit_code == 1 and err.lower().startswith( diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 31c2717f9..106b3cbd0 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -12,32 +12,18 @@ from cloudinit import ssh_util, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, ug_util from cloudinit.settings import PER_INSTANCE from cloudinit.simpletable import SimpleTable -MODULE_DESCRIPTION = """\ -Write fingerprints of authorized keys for each user to log. This is enabled by -default, but can be disabled using ``no_ssh_fingerprints``. The hash type for -the keys can be specified, but defaults to ``sha256``. -""" - meta: MetaSchema = { "id": "cc_ssh_authkey_fingerprints", - "name": "SSH AuthKey Fingerprints", - "title": "Log fingerprints of user SSH keys", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [ - "no_ssh_fingerprints: true", - "authkey_hash: sha512", - ], "activate_by_schema_keys": [], -} +} # type:ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index a4ca1b981..8abf3914f 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -9,47 +9,25 @@ import logging import pwd -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ug_util from cloudinit.settings import PER_INSTANCE # https://launchpad.net/ssh-import-id -distros = ["alpine", "cos", "debian", "ubuntu"] SSH_IMPORT_ID_BINARY = "ssh-import-id" -MODULE_DESCRIPTION = """\ -This module imports SSH keys from either a public keyserver, usually launchpad -or github using ``ssh-import-id``. Keys are referenced by the username they are -associated with on the keyserver. The keyserver can be specified by prepending -either ``lp:`` for launchpad or ``gh:`` for github to the username. -""" meta: MetaSchema = { "id": "cc_ssh_import_id", - "name": "SSH Import ID", - "title": "Import SSH id", - "description": MODULE_DESCRIPTION, - "distros": distros, + "distros": ["alpine", "cos", "debian", "ubuntu"], "frequency": PER_INSTANCE, - "examples": [ - dedent( - """\ - ssh_import_id: - - user - - gh:user - - lp:user - """ - ) - ], "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py index 5d3c04d30..db9d168b1 100644 --- a/cloudinit/config/cc_timezone.py +++ b/cloudinit/config/cc_timezone.py @@ -12,29 +12,20 @@ from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -Sets the system `timezone `_ based on the -value provided. -""" - meta: MetaSchema = { "id": "cc_timezone", - "name": "Timezone", - "title": "Set the system timezone", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, "examples": [ "timezone: US/Eastern", ], "activate_by_schema_keys": ["timezone"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_ubuntu_autoinstall.py b/cloudinit/config/cc_ubuntu_autoinstall.py index e9d241075..218f48760 100644 --- a/cloudinit/config/cc_ubuntu_autoinstall.py +++ b/cloudinit/config/cc_ubuntu_autoinstall.py @@ -6,6 +6,7 @@ import re from textwrap import dedent +from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import ( @@ -15,7 +16,6 @@ get_meta_doc, ) from cloudinit.settings import PER_ONCE -from cloudinit.subp import subp LOG = logging.getLogger(__name__) @@ -83,7 +83,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: ) return - snap_list, _ = subp(["snap", "list"]) + snap_list, _ = subp.subp(["snap", "list"]) installer_present = None for snap_name in LIVE_INSTALLER_SNAPS: if re.search(snap_name, snap_list): diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py index faa82a4d7..b4c5e4e1a 100644 --- a/cloudinit/config/cc_ubuntu_drivers.py +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -4,7 +4,6 @@ import logging import os -from textwrap import dedent from cloudinit.cloud import Cloud from cloudinit.distros import Distro @@ -20,37 +19,17 @@ from cloudinit import subp, temp_utils, type_utils, util from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -distros = ["ubuntu"] - meta: MetaSchema = { "id": "cc_ubuntu_drivers", - "name": "Ubuntu Drivers", - "title": "Interact with third party drivers in Ubuntu.", - "description": dedent( - """\ - This module interacts with the 'ubuntu-drivers' command to install - third party driver packages.""" - ), - "distros": distros, - "examples": [ - dedent( - """\ - drivers: - nvidia: - license-accepted: true - """ - ) - ], + "distros": ["ubuntu"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["drivers"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( "ubuntu-drivers: error: argument : invalid choice: 'install'" diff --git a/cloudinit/config/cc_ubuntu_pro.py b/cloudinit/config/cc_ubuntu_pro.py index 36bceb9de..a82c1b8d5 100644 --- a/cloudinit/config/cc_ubuntu_pro.py +++ b/cloudinit/config/cc_ubuntu_pro.py @@ -5,134 +5,24 @@ import json import logging import re -from textwrap import dedent from typing import Any, List from urllib.parse import urlparse from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE PRO_URL = "https://ubuntu.com/pro" - -distros = ["ubuntu"] - DEPRECATED_KEYS = set(["ubuntu-advantage", "ubuntu_advantage"]) meta: MetaSchema = { "id": "cc_ubuntu_pro", - "name": "Ubuntu Pro", - "title": "Configure Ubuntu Pro support services", - "description": dedent( - """\ - Attach machine to an existing Ubuntu Pro support contract and - enable or disable support services such as Livepatch, ESM, - FIPS and FIPS Updates. When attaching a machine to Ubuntu Pro, - one can also specify services to enable. When the 'enable' - list is present, only named services will be activated. Whereas - if the 'enable' list is not present, the contract's default - services will be enabled. - - On Pro instances, when ``ubuntu_pro`` config is provided to - cloud-init, Pro's auto-attach feature will be disabled and cloud-init - will perform the Pro auto-attach ignoring the ``token`` key. - The ``enable`` and ``enable_beta`` values will strictly determine what - services will be enabled, ignoring contract defaults. - - Note that when enabling FIPS or FIPS updates you will need to schedule - a reboot to ensure the machine is running the FIPS-compliant kernel. - See `Power State Change`_ for information on how to configure - cloud-init to perform this reboot. - """ - ), - "distros": distros, - "examples": [ - dedent( - """\ - # Attach the machine to an Ubuntu Pro support contract with a - # Pro contract token obtained from %s. - ubuntu_pro: - token: - """ - % PRO_URL - ), - dedent( - """\ - # Attach the machine to an Ubuntu Pro support contract enabling - # only fips and esm services. Services will only be enabled if - # the environment supports said service. Otherwise warnings will - # be logged for incompatible services specified. - ubuntu_pro: - token: - enable: - - fips - - esm - """ - ), - dedent( - """\ - # Attach the machine to an Ubuntu Pro support contract and enable - # the FIPS service. Perform a reboot once cloud-init has - # completed. - power_state: - mode: reboot - ubuntu_pro: - token: - enable: - - fips - """ - ), - dedent( - """\ - # Set a http(s) proxy before attaching the machine to an - # Ubuntu Pro support contract and enabling the FIPS service. - ubuntu_pro: - token: - config: - http_proxy: 'http://some-proxy:8088' - https_proxy: 'https://some-proxy:8088' - global_apt_https_proxy: 'https://some-global-apt-proxy:8088/' - global_apt_http_proxy: 'http://some-global-apt-proxy:8088/' - ua_apt_http_proxy: 'http://10.0.10.10:3128' - ua_apt_https_proxy: 'https://10.0.10.10:3128' - enable: - - fips - """ - ), - dedent( - """\ - # On Ubuntu PRO instances, auto-attach but enable no PRO services. - ubuntu_pro: - enable: [] - enable_beta: [] - """ - ), - dedent( - """\ - # Enable esm and beta realtime-kernel services in Ubuntu Pro instances. - ubuntu_pro: - enable: - - esm - enable_beta: - - realtime-kernel - """ - ), - dedent( - """\ - # Disable auto-attach in Ubuntu Pro instances. - ubuntu_pro: - features: - disable_auto_attach: True - """ - ), - ], + "distros": ["ubuntu"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["ubuntu_pro"] + list(DEPRECATED_KEYS), -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) REDACTED = "REDACTED" @@ -261,7 +151,7 @@ def set_pro_config(pro_config: Any = None): def configure_pro(token, enable=None): - """Call ua commandline client to attach and/or enable services.""" + """Call ua command line client to attach and/or enable services.""" if enable is None: enable = [] elif isinstance(enable, str): diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 7ac0d955e..45bb2df7d 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -9,92 +9,20 @@ """Update Etc Hosts: Update the hosts file (usually ``/etc/hosts``)""" import logging -from textwrap import dedent from cloudinit import templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_ALWAYS -MODULE_DESCRIPTION = """\ -This module will update the contents of the local hosts database (hosts file; -usually ``/etc/hosts``) based on the hostname/fqdn specified in config. -Management of the hosts file is controlled using ``manage_etc_hosts``. If this -is set to false, cloud-init will not manage the hosts file at all. This is the -default behavior. - -If set to ``true``, cloud-init will generate the hosts file -using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the -``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` and -``$fqdn`` will be replaced with the hostname and fqdn respectively. - -If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not -rewrite the hosts file entirely, but rather will ensure that a entry for the -fqdn with a distribution dependent ip is present (i.e. ``ping `` will -ping ``127.0.0.1`` or ``127.0.1.1`` or other ip). - -.. note:: - if ``manage_etc_hosts`` is set ``true``, the contents - of the hosts file will be updated every boot. To make any changes to - the hosts file persistent they must be made in - ``/etc/cloud/templates/hosts.tmpl`` - -.. note:: - for instructions on specifying hostname and fqdn, see documentation for - ``cc_set_hostname`` -""" - -distros = ["all"] - meta: MetaSchema = { "id": "cc_update_etc_hosts", - "name": "Update Etc Hosts", - "title": "Update the hosts file (usually ``/etc/hosts``)", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - dedent( - """\ - # Do not update or manage /etc/hosts at all. This is the default behavior. - # - # Whatever is present at instance boot time will be present after boot. - # User changes will not be overwritten. - manage_etc_hosts: false - """ - ), - dedent( - """\ - # Manage /etc/hosts with cloud-init. - # On every boot, /etc/hosts will be re-written from - # ``/etc/cloud/templates/hosts.tmpl``. - # - # The strings '$hostname' and '$fqdn' are replaced in the template - # with the appropriate values either from the config-config ``fqdn`` or - # ``hostname`` if provided. When absent, the cloud metadata will be - # checked for ``local-hostname` which can be split into .. - # - # To make modifications persistent across a reboot, you must modify - # ``/etc/cloud/templates/hosts.tmpl``. - manage_etc_hosts: true - """ - ), - dedent( - """\ - # Update /etc/hosts every boot providing a "localhost" 127.0.1.1 entry - # with the latest hostname and fqdn as provided by either IMDS or - # cloud-config. - # All other entries will be left as is. - # 'ping `hostname`' will ping 127.0.1.1 - manage_etc_hosts: localhost - """ - ), - ], + "distros": ["all"], "frequency": PER_ALWAYS, "activate_by_schema_keys": ["manage_etc_hosts"], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index aaeb978c1..eaba9f334 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -10,85 +10,20 @@ import logging import os -from textwrap import dedent from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_ALWAYS -MODULE_DESCRIPTION = """\ -This module will update the system hostname and fqdn. If ``preserve_hostname`` -is set ``true``, then the hostname will not be altered. - -.. note:: - for instructions on specifying hostname and fqdn, see documentation for - ``cc_set_hostname`` -""" - -distros = ["all"] - meta: MetaSchema = { "id": "cc_update_hostname", - "name": "Update Hostname", - "title": "Update hostname and fqdn", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - dedent( - """\ - # By default: when ``preserve_hostname`` is not specified cloud-init - # updates ``/etc/hostname`` per-boot based on the cloud provided - # ``local-hostname`` setting. If you manually change ``/etc/hostname`` - # after boot cloud-init will no longer modify it. - # - # This default cloud-init behavior is equivalent to this cloud-config: - preserve_hostname: false - """ - ), - dedent( - """\ - # Prevent cloud-init from updating the system hostname. - preserve_hostname: true - """ - ), - dedent( - """\ - # Prevent cloud-init from updating ``/etc/hostname`` - preserve_hostname: true - """ - ), - dedent( - """\ - # Set hostname to "external.fqdn.me" instead of "myhost" - fqdn: external.fqdn.me - hostname: myhost - prefer_fqdn_over_hostname: true - create_hostname_file: true - """ - ), - dedent( - """\ - # Set hostname to "external" instead of "external.fqdn.me" when - # cloud metadata provides the ``local-hostname``: "external.fqdn.me". - prefer_fqdn_over_hostname: false - """ - ), - dedent( - """\ - # On a machine without an ``/etc/hostname`` file, don't create it - # In most clouds, this will result in a DHCP-configured hostname - # provided by the cloud - create_hostname_file: false - """ - ), - ], + "distros": ["all"], "frequency": PER_ALWAYS, "activate_by_schema_keys": [], -} +} # type: ignore -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 0f3791136..ace17733c 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -4,10 +4,9 @@ # # This file is part of cloud-init. See LICENSE file for license information. -"Users and Groups: Configure users and groups" +"""Users and Groups: Configure users and groups""" import logging -from textwrap import dedent from cloudinit.cloud import Cloud @@ -15,175 +14,16 @@ # since the module attribute 'distros' # is a list of distros that are supported, not a sub-module from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.distros import ug_util from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module configures users and groups. For more detailed information on user -options, see the :ref:`Including users and groups` config -example. - -Groups to add to the system can be specified under the ``groups`` key as -a string of comma-separated groups to create, or a list. Each item in -the list should either contain a string of a single group to create, -or a dictionary with the group name as the key and string of a single user as -a member of that group or a list of users who should be members of the group. - -.. note:: - Groups are added before users, so any users in a group list must - already exist on the system. - -Users to add can be specified as a string or list under the ``users`` key. -Each entry in the list should either be a string or a dictionary. If a string -is specified, that string can be comma-separated usernames to create or the -reserved string ``default`` which represents the primary admin user used to -access the system. The ``default`` user varies per distribution and is -generally configured in ``/etc/cloud/cloud.cfg`` by the ``default_user`` key. - -Each ``users`` dictionary item must contain either a ``name`` or ``snapuser`` -key, otherwise it will be ignored. Omission of ``default`` as the first item -in the ``users`` list skips creation the default user. If no ``users`` key is -provided the default behavior is to create the default user via this config:: - - users: - - default - -.. note:: - Specifying a hash of a user's password with ``passwd`` is a security risk - if the cloud-config can be intercepted. SSH authentication is preferred. - -.. note:: - If specifying a doas rule for a user, ensure that the syntax for the rule - is valid, as the only checking performed by cloud-init is to ensure that - the user referenced in the rule is the correct user. - -.. note:: - If specifying a sudo rule for a user, ensure that the syntax for the rule - is valid, as it is not checked by cloud-init. - -.. note:: - Most of these configuration options will not be honored if the user - already exists. The following options are the exceptions; they are applied - to already-existing users: ``plain_text_passwd``, ``doas``, - ``hashed_passwd``, ``lock_passwd``, ``sudo``, ``ssh_authorized_keys``, - ``ssh_redirect_user``. - -The ``user`` key can be used to override the ``default_user`` configuration -defined in ``/etc/cloud/cloud.cfg``. The ``user`` value should be a dictionary -which supports the same config keys as the ``users`` dictionary items. -""" - meta: MetaSchema = { "id": "cc_users_groups", - "name": "Users and Groups", - "title": "Configure users and groups", - "description": MODULE_DESCRIPTION, "distros": ["all"], - "examples": [ - dedent( - """\ - # Add the ``default_user`` from /etc/cloud/cloud.cfg. - # This is also the default behavior of cloud-init when no `users` key - # is provided. - users: - - default - """ - ), - dedent( - """\ - # Add the 'admingroup' with members 'root' and 'sys' and an empty - # group cloud-users. - groups: - - admingroup: [root,sys] - - cloud-users - """ - ), - dedent( - """\ - # Skip creation of the user and only create newsuper. - # Password-based login is rejected, but the github user TheRealFalcon - # and the launchpad user falcojr can SSH as newsuper. The default - # shell for newsuper is bash instead of system default. - users: - - name: newsuper - gecos: Big Stuff - groups: users, admin - sudo: ALL=(ALL) NOPASSWD:ALL - shell: /bin/bash - lock_passwd: true - ssh_import_id: - - lp:falcojr - - gh:TheRealFalcon - """ - ), - dedent( - """\ - # Skip creation of the user and only create newsuper. - # Password-based login is rejected, but the github user TheRealFalcon - # and the launchpad user falcojr can SSH as newsuper. doas/opendoas - # is configured to permit this user to run commands as other users - # (without being prompted for a password) except not as root. - users: - - name: newsuper - gecos: Big Stuff - groups: users, admin - doas: - - permit nopass newsuper - - deny newsuper as root - lock_passwd: true - ssh_import_id: - - lp:falcojr - - gh:TheRealFalcon - """ - ), - dedent( - """\ - # On a system with SELinux enabled, add youruser and set the - # SELinux user to 'staff_u'. When omitted on SELinux, the system will - # select the configured default SELinux user. - users: - - default - - name: youruser - selinux_user: staff_u - """ - ), - dedent( - """\ - # To redirect a legacy username to the user for a - # distribution, ssh_redirect_user will accept an SSH connection and - # emit a message telling the client to ssh as the user. - # SSH clients will get the message: - users: - - default - - name: nosshlogins - ssh_redirect_user: true - """ - ), - dedent( - """\ - # Override any ``default_user`` config in /etc/cloud/cloud.cfg with - # supplemental config options. - # This config will make the default user to mynewdefault and change - # the user to not have sudo rights. - ssh_import_id: [chad.smith] - user: - name: mynewdefault - sudo: null - """ - ), - dedent( - """\ - # Avoid creating any ``default_user``. - users: [] - """ - ), - ], "frequency": PER_INSTANCE, "activate_by_schema_keys": [], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_wireguard.py b/cloudinit/config/cc_wireguard.py index e78c3e777..0cacd1338 100644 --- a/cloudinit/config/cc_wireguard.py +++ b/cloudinit/config/cc_wireguard.py @@ -5,93 +5,19 @@ import logging import re -from textwrap import dedent from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = dedent( - """\ -Wireguard module provides a dynamic interface for configuring -Wireguard (as a peer or server) in an easy way. - -This module takes care of: - - writing interface configuration files - - enabling and starting interfaces - - installing wireguard-tools package - - loading wireguard kernel module - - executing readiness probes - -What's a readiness probe?\n -The idea behind readiness probes is to ensure Wireguard connectivity -before continuing the cloud-init process. This could be useful if you -need access to specific services like an internal APT Repository Server -(e.g Landscape) to install/update packages. - -Example:\n -An edge device can't access the internet but uses cloud-init modules which -will install packages (e.g landscape, packages, ubuntu_advantage). Those -modules will fail due to missing internet connection. The "wireguard" module -fixes that problem as it waits until all readinessprobes (which can be -arbitrary commands - e.g. checking if a proxy server is reachable over -Wireguard network) are finished before continuing the cloud-init -"config" stage. - -.. note:: - In order to use DNS with Wireguard you have to install ``resolvconf`` - package or symlink it to systemd's ``resolvectl``, otherwise ``wg-quick`` - commands will throw an error message that executable ``resolvconf`` is - missing which leads wireguard module to fail. -""" -) - meta: MetaSchema = { "id": "cc_wireguard", - "name": "Wireguard", - "title": "Module to configure Wireguard tunnel", - "description": MODULE_DESCRIPTION, "distros": ["ubuntu"], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["wireguard"], - "examples": [ - dedent( - """\ - # Configure one or more WG interfaces and provide optional readinessprobes - wireguard: - interfaces: - - name: wg0 - config_path: /etc/wireguard/wg0.conf - content: | - [Interface] - PrivateKey = - Address =
- [Peer] - PublicKey = - Endpoint = : - AllowedIPs = , , ... - - name: wg1 - config_path: /etc/wireguard/wg1.conf - content: | - [Interface] - PrivateKey = - Address =
- [Peer] - PublicKey = - Endpoint = : - AllowedIPs = - readinessprobe: - - 'systemctl restart service' - - 'curl https://webhook.endpoint/example' - - 'nc -zv some-service-fqdn 443' - """ - ), - ], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index bcd51c62d..c97d1225a 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -9,12 +9,11 @@ import base64 import logging import os -from textwrap import dedent from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE DEFAULT_PERMS = 0o644 @@ -25,97 +24,10 @@ meta: MetaSchema = { "id": "cc_write_files", - "name": "Write Files", - "title": "write arbitrary files", - "description": dedent( - """\ - Write out arbitrary content to files, optionally setting permissions. - Parent folders in the path are created if absent. - Content can be specified in plain text or binary. Data encoded with - either base64 or binary gzip data can be specified and will be decoded - before being written. For empty file creation, content can be omitted. - - .. note:: - If multiline data is provided, care should be taken to ensure that it - follows yaml formatting standards. To specify binary data, use the yaml - option ``!!binary`` - - .. note:: - Do not write files under /tmp during boot because of a race with - systemd-tmpfiles-clean that can cause temp files to get cleaned during - the early boot process. Use /run/somedir instead to avoid race - LP:1707222. - - .. warning:: - Existing files will be overridden.""" - ), "distros": ["all"], - "examples": [ - dedent( - """\ - # Write out base64 encoded content to /etc/sysconfig/selinux - write_files: - - encoding: b64 - content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4... - owner: root:root - path: /etc/sysconfig/selinux - permissions: '0644' - """ - ), - dedent( - """\ - # Appending content to an existing file - write_files: - - content: | - 15 * * * * root ship_logs - path: /etc/crontab - append: true - """ - ), - dedent( - """\ - # Provide gzipped binary content - write_files: - - encoding: gzip - content: !!binary | - H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= - path: /usr/bin/hello - permissions: '0755' - """ - ), - dedent( - """\ - # Create an empty file on the system - write_files: - - path: /root/CLOUD_INIT_WAS_HERE - """ - ), - dedent( - """\ - # Defer writing the file until after the package (Nginx) is - # installed and its user is created alongside - write_files: - - path: /etc/nginx/conf.d/example.com.conf - content: | - server { - server_name example.com; - listen 80; - root /var/www; - location / { - try_files $uri $uri/ $uri.html =404; - } - } - owner: 'nginx:nginx' - permissions: '0640' - defer: true - """ - ), - ], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["write_files"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py index d18fee7e5..0dc0662e1 100644 --- a/cloudinit/config/cc_write_files_deferred.py +++ b/cloudinit/config/cc_write_files_deferred.py @@ -14,28 +14,14 @@ from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -This module is based on `'Write Files' `__, and -will handle all files from the write_files list, that have been -marked as deferred and thus are not being processed by the -write_files module. - -*Please note that his module is not exposed to the user through -its own dedicated top-level directive.* -""" meta: MetaSchema = { "id": "cc_write_files_deferred", - "name": "Write Files Deferred", - "title": "Defer writing certain files", - "description": __doc__, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, - "examples": [], "activate_by_schema_keys": ["write_files"], -} +} # type: ignore # This module is undocumented in our schema docs -__doc__ = "" LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 1ab5008f0..9a717af3d 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -4,44 +4,19 @@ # # This file is part of cloud-init. See LICENSE file for license information. -"Yum Add Repo: Add yum repository configuration to the system" +"""Yum Add Repo: Add yum repository configuration to the system""" import io import logging import os from configparser import ConfigParser -from textwrap import dedent from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_INSTANCE -MODULE_DESCRIPTION = """\ -Add yum repository configuration to ``/etc/yum.repos.d``. Configuration files -are named based on the opaque dictionary key under the ``yum_repos`` they are -specified with. If a config file already exists with the same name as a config -entry, the config entry will be skipped. -""" - -distros = [ - "almalinux", - "centos", - "cloudlinux", - "eurolinux", - "fedora", - "mariner", - "openeuler", - "OpenCloudOS", - "openmandriva", - "photon", - "rhel", - "rocky", - "TencentOS", - "virtuozzo", -] - COPR_BASEURL = ( "https://download.copr.fedorainfracloud.org/results/@cloud-init/" "cloud-init-dev/epel-8-$basearch/" @@ -57,72 +32,28 @@ meta: MetaSchema = { "id": "cc_yum_add_repo", - "name": "Yum Add Repo", - "title": "Add yum repository configuration to the system", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - dedent( - """\ - yum_repos: - my_repo: - baseurl: http://blah.org/pub/epel/testing/5/$basearch/ - yum_repo_dir: /store/custom/yum.repos.d - """ - ), - dedent( - f"""\ - # Enable cloud-init upstream's daily testing repo for EPEL 8 to - # install latest cloud-init from tip of `main` for testing. - yum_repos: - cloud-init-daily: - name: Copr repo for cloud-init-dev owned by @cloud-init - baseurl: {COPR_BASEURL} - type: rpm-md - skip_if_unavailable: true - gpgcheck: true - gpgkey: {COPR_GPG_URL} - enabled_metadata: 1 - """ - ), - dedent( - f"""\ - # Add the file /etc/yum.repos.d/epel_testing.repo which can then - # subsequently be used by yum for later operations. - yum_repos: - # The name of the repository - epel-testing: - baseurl: {EPEL_TESTING_BASEURL} - enabled: false - failovermethod: priority - gpgcheck: true - gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL - name: Extra Packages for Enterprise Linux 5 - Testing - """ - ), - dedent( - """\ - # Any yum repo configuration can be passed directly into - # the repository file created. See: man yum.conf for supported - # config keys. - # - # Write /etc/yum.conf.d/my-package-stream.repo with gpgkey checks - # on the repo data of the repository enabled. - yum_repos: - my package stream: - baseurl: http://blah.org/pub/epel/testing/5/$basearch/ - mirrorlist: http://some-url-to-list-of-baseurls - repo_gpgcheck: 1 - enable_gpgcheck: true - gpgkey: https://url.to.ascii-armored-gpg-key - """ - ), + "distros": [ + "almalinux", + "azurelinux", + "centos", + "cloudlinux", + "eurolinux", + "fedora", + "mariner", + "openeuler", + "OpenCloudOS", + "openmandriva", + "photon", + "rhel", + "rocky", + "TencentOS", + "virtuozzo", ], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["yum_repos"], -} +} # type: ignore + -__doc__ = get_meta_doc(meta) LOG = logging.getLogger(__name__) @@ -210,24 +141,22 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: n_repo_config[k] = v repo_config = n_repo_config missing_required = 0 - for req_field in ["baseurl"]: + req_fields = ["baseurl", "metalink"] + for req_field in req_fields: if req_field not in repo_config: - LOG.warning( - "Repository %s does not contain a %s" - " configuration 'required' entry", - repo_id, - req_field, - ) missing_required += 1 - if not missing_required: - repo_configs[canon_repo_id] = repo_config - repo_locations[canon_repo_id] = repo_fn_pth - else: + + if missing_required == len(req_fields): LOG.warning( - "Repository %s is missing %s required fields, skipping!", + "Repository %s should contain atleast one of the" + " following configuration entries: %s, skipping!", repo_id, - missing_required, + ", ".join(req_fields), ) + else: + repo_configs[canon_repo_id] = repo_config + repo_locations[canon_repo_id] = repo_fn_pth + for (c_repo_id, path) in repo_locations.items(): repo_blob = _format_repository_config( c_repo_id, repo_configs.get(c_repo_id) diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index c82a1b485..02b372dcc 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -7,76 +7,29 @@ import logging import os -from textwrap import dedent import configobj from cloudinit import util from cloudinit.cloud import Cloud from cloudinit.config import Config -from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.config.schema import MetaSchema from cloudinit.settings import PER_ALWAYS -distros = [ - "opensuse", - "opensuse-microos", - "opensuse-tumbleweed", - "opensuse-leap", - "sle_hpc", - "sle-micro", - "sles", -] - -MODULE_DESCRIPTION = """\ -Zypper behavior can be configured using the ``config`` key, which will modify -``/etc/zypp/zypp.conf``. The configuration writer will only append the -provided configuration options to the configuration file. Any duplicate -options will be resolved by the way the zypp.conf INI file is parsed. - -.. note:: - Setting ``configdir`` is not supported and will be skipped. - -The ``repos`` key may be used to add repositories to the system. Beyond the -required ``id`` and ``baseurl`` attributions, no validation is performed -on the ``repos`` entries. It is assumed the user is familiar with the -zypper repository file format. This configuration is also applicable for -systems with transactional-updates. -""" meta: MetaSchema = { "id": "cc_zypper_add_repo", - "name": "Zypper Add Repo", - "title": "Configure zypper behavior and add zypper repositories", - "description": MODULE_DESCRIPTION, - "distros": distros, - "examples": [ - dedent( - """\ - zypper: - repos: - - id: opensuse-oss - name: os-oss - baseurl: http://dl.opensuse.org/dist/leap/v/repo/oss/ - enabled: 1 - autorefresh: 1 - - id: opensuse-oss-update - name: os-oss-up - baseurl: http://dl.opensuse.org/dist/leap/v/update - # any setting per - # https://en.opensuse.org/openSUSE:Standards_RepoInfo - # enable and autorefresh are on by default - config: - reposdir: /etc/zypp/repos.dir - servicesdir: /etc/zypp/services.d - download.use_deltarpm: true - # any setting in /etc/zypp/zypp.conf - """ - ) + "distros": [ + "opensuse", + "opensuse-microos", + "opensuse-tumbleweed", + "opensuse-leap", + "sle_hpc", + "sle-micro", + "sles", ], "frequency": PER_ALWAYS, "activate_by_schema_keys": ["zypper"], -} - -__doc__ = get_meta_doc(meta) +} # type: ignore LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py index e1fec671e..f775802d7 100644 --- a/cloudinit/config/modules.py +++ b/cloudinit/config/modules.py @@ -208,7 +208,7 @@ def _fixup_modules(self, raw_mods) -> List[ModuleDetails]: util.deprecate( deprecated=( f"Module has been renamed from {mod_name} to " - f"{RENAMED_MODULES[mod_name][1]}. Update any" + f"{RENAMED_MODULES[mod_name]}. Update any" " references in /etc/cloud/cloud.cfg" ), deprecated_version="24.1", diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 54c1708ee..062ab92ec 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. """schema.py: Set of module functions for processing cloud-config schema.""" import argparse +import glob import json import logging import os @@ -30,7 +31,7 @@ import yaml -from cloudinit import importer, safeyaml +from cloudinit import features, importer, safeyaml from cloudinit.cmd.devel import read_cfg_paths from cloudinit.handlers import INCLUSION_TYPES_MAP, type_from_starts_with from cloudinit.helpers import Paths @@ -40,6 +41,8 @@ error, get_modules_from_dir, load_text_file, + load_yaml, + should_log_deprecation, write_file, ) @@ -66,6 +69,7 @@ # 3. Add the new version definition to versions.schema.cloud-config.json USERDATA_SCHEMA_FILE = "schema-cloud-config-v1.json" NETWORK_CONFIG_V1_SCHEMA_FILE = "schema-network-config-v1.json" +NETWORK_CONFIG_V2_SCHEMA_FILE = "schema-network-config-v2.json" _YAML_MAP = {True: "true", False: "false", None: "null"} SCHEMA_DOC_TMPL = """ @@ -98,16 +102,12 @@ {examples} """ -SCHEMA_PROPERTY_HEADER = "" SCHEMA_PROPERTY_TMPL = "{prefix}* **{prop_name}:** ({prop_type}){description}" SCHEMA_LIST_ITEM_TMPL = ( "{prefix}* Each object in **{prop_name}** list supports " "the following keys:" ) -SCHEMA_EXAMPLES_HEADER = "" -SCHEMA_EXAMPLES_SPACER_TEMPLATE = "\n # --- Example{example_count} ---\n\n" DEPRECATED_KEY = "deprecated" -DEPRECATED_PREFIX = "DEPRECATED: " # user-data files typically must begin with a leading '#' USERDATA_VALID_HEADERS = sorted( @@ -122,12 +122,12 @@ from typing_extensions import NotRequired, TypedDict class MetaSchema(TypedDict): - name: str id: str + name: str title: str description: str distros: typing.List[str] - examples: typing.List[str] + examples: typing.List[Union[dict, str]] frequency: str activate_by_schema_keys: NotRequired[List[str]] @@ -136,7 +136,14 @@ class MetaSchema(TypedDict): class SchemaDeprecationError(ValidationError): - pass + def __init__( + self, + message: str, + version: str, + **kwargs, + ): + super().__init__(message, **kwargs) + self.version: str = version class SchemaProblem(NamedTuple): @@ -160,6 +167,8 @@ class SchemaType(Enum): CLOUD_CONFIG = "cloud-config" NETWORK_CONFIG = "network-config" + NETWORK_CONFIG_V1 = "network-config-v1" + NETWORK_CONFIG_V2 = "network-config-v2" # Placeholders for versioned schema and schema file locations. @@ -169,8 +178,14 @@ class SchemaType(Enum): "latest": USERDATA_SCHEMA_FILE, }, SchemaType.NETWORK_CONFIG: { + "latest": NETWORK_CONFIG_V2_SCHEMA_FILE, + }, + SchemaType.NETWORK_CONFIG_V1: { "latest": NETWORK_CONFIG_V1_SCHEMA_FILE, }, + SchemaType.NETWORK_CONFIG_V2: { + "latest": NETWORK_CONFIG_V2_SCHEMA_FILE, + }, } @@ -347,14 +362,14 @@ def _validator( ): """Jsonschema validator for `deprecated` items. - It raises a instance of `error_type` if deprecated that must be handled, + It yields an instance of `error_type` if deprecated that must be handled, otherwise the instance is consider faulty. """ if deprecated: msg = _add_deprecated_changed_or_new_msg( schema, annotate=True, filter_key=[filter_key] ) - yield error_type(msg) + yield error_type(msg, schema.get("deprecated_version", "devel")) _validator_deprecated = partial(_validator, filter_key="deprecated") @@ -617,7 +632,9 @@ def netplan_validate_network_schema( try: from netplan import NetplanParserException, Parser # type: ignore except ImportError: - LOG.debug("Skipping netplan schema validation. No netplan available") + LOG.debug( + "Skipping netplan schema validation. No netplan API available" + ) return False # netplan Parser looks at all *.yaml files in the target directory underA @@ -668,15 +685,16 @@ def netplan_validate_network_schema( message = _format_schema_problems( errors, prefix=( - f"Invalid {SchemaType.NETWORK_CONFIG.value} provided:\n" + f"{SchemaType.NETWORK_CONFIG.value} failed " + "schema validation!\n" ), separator="\n", ) else: message = ( - f"Invalid {SchemaType.NETWORK_CONFIG.value} provided: " - "Please run 'sudo cloud-init schema --system' to " - "see the schema errors." + f"{SchemaType.NETWORK_CONFIG.value} failed schema validation! " + "You may run 'sudo cloud-init schema --system' to " + "check the details." ) LOG.warning(message) return True @@ -699,7 +717,8 @@ def validate_cloudconfig_schema( for the cloud config module (config.cc_*). If None, validate against global schema. @param schema_type: Optional SchemaType. - One of: SchemaType.CLOUD_CONFIG or SchemaType.NETWORK_CONFIG. + One of: SchemaType.CLOUD_CONFIG or SchemaType.NETWORK_CONFIG_V1 or + SchemaType.NETWORK_CONFIG_V2 Default: SchemaType.CLOUD_CONFIG @param strict: Boolean, when True raise SchemaValidationErrors instead of logging warnings. @@ -714,17 +733,31 @@ def validate_cloudconfig_schema( against the provided schema. @raises: RuntimeError when provided config sourced from YAML is not a dict. @raises: ValueError on invalid schema_type not in CLOUD_CONFIG or - NETWORK_CONFIG + NETWORK_CONFIG_V1 or NETWORK_CONFIG_V2 """ + from cloudinit.net.netplan import available as netplan_available + if schema_type == SchemaType.NETWORK_CONFIG: - if network_schema_version(config) == 2: - if netplan_validate_network_schema( - network_config=config, strict=strict, log_details=log_details - ): - # Schema was validated by netplan - return True - # network-config schema version 2 but no netplan. - # TODO(add JSON schema definition for network version 2) + network_version = network_schema_version(config) + if network_version == 2: + schema_type = SchemaType.NETWORK_CONFIG_V2 + elif network_version == 1: + schema_type = SchemaType.NETWORK_CONFIG_V1 + schema = get_schema(schema_type) + + if schema_type == SchemaType.NETWORK_CONFIG_V2: + if netplan_validate_network_schema( + network_config=config, strict=strict, log_details=log_details + ): + # Schema was validated by netplan + return True + elif netplan_available(): + # We found no netplan API on netplan system, do not perform schema + # validation against cloud-init's network v2 schema because netplan + # supports more config keys than in cloud-init's netv2 schema. + # This may result in schema warnings for valid netplan config + # which would be successfully rendered by netplan but doesn't + # adhere to cloud-init's network v2. return False if schema is None: @@ -743,6 +776,7 @@ def validate_cloudconfig_schema( errors: SchemaProblems = [] deprecations: SchemaProblems = [] + info_deprecations: SchemaProblems = [] for schema_error in sorted( validator.iter_errors(config), key=lambda e: e.path ): @@ -758,37 +792,49 @@ def validate_cloudconfig_schema( ) if prop_match: path = prop_match["name"] - problem = (SchemaProblem(path, schema_error.message),) if isinstance( schema_error, SchemaDeprecationError ): # pylint: disable=W1116 - deprecations += problem + if schema_error.version == "devel" or should_log_deprecation( + schema_error.version, features.DEPRECATION_INFO_BOUNDARY + ): + deprecations.append(SchemaProblem(path, schema_error.message)) + else: + info_deprecations.append( + SchemaProblem(path, schema_error.message) + ) else: - errors += problem + errors.append(SchemaProblem(path, schema_error.message)) - if log_deprecations and deprecations: - message = _format_schema_problems( - deprecations, - prefix="Deprecated cloud-config provided:\n", - separator="\n", - ) - # This warning doesn't fit the standardized util.deprecated() utility - # format, but it is a deprecation log, so log it directly. - LOG.deprecated(message) # type: ignore - if strict and (errors or deprecations): - raise SchemaValidationError(errors, deprecations) + if log_deprecations: + if info_deprecations: + message = _format_schema_problems( + info_deprecations, + prefix="Deprecated cloud-config provided: ", + ) + LOG.info(message) + if deprecations: + message = _format_schema_problems( + deprecations, + prefix="Deprecated cloud-config provided: ", + ) + # This warning doesn't fit the standardized util.deprecated() + # utility format, but it is a deprecation log, so log it directly. + LOG.deprecated(message) # type: ignore + if strict and (errors or deprecations or info_deprecations): + raise SchemaValidationError(errors, deprecations + info_deprecations) if errors: if log_details: details = _format_schema_problems( errors, - prefix=f"Invalid {schema_type.value} provided:\n", + prefix=f"{schema_type.value} failed schema validation!\n", separator="\n", ) else: details = ( - f"Invalid {schema_type.value} provided: " - "Please run 'sudo cloud-init schema --system' to " - "see the schema errors." + f"{schema_type.value} failed schema validation! " + "You may run 'sudo cloud-init schema --system' to " + "check the details." ) LOG.warning(details) return True @@ -893,16 +939,6 @@ def annotate( if not schema_errors and not schema_deprecations: return self._original_content lines = self._original_content.split("\n") - if not isinstance(self._cloudconfig, dict): - # Return a meaningful message on empty cloud-config - return "\n".join( - lines - + [ - self._build_footer( - "Errors", ["# E1: Cloud-config is not a YAML dict."] - ) - ] - ) errors_by_line = self._build_errors_by_line(schema_errors) deprecations_by_line = self._build_errors_by_line(schema_deprecations) annotated_content = self._annotate_content( @@ -968,7 +1004,7 @@ def process_merged_cloud_config_part_problems( def _get_config_type_and_rendered_userdata( config_path: str, content: str, - instance_data_path: str = None, + instance_data_path: Optional[str] = None, ) -> UserDataTypeAndDecodedContent: """ Return tuple of user-data-type and rendered content. @@ -1039,7 +1075,7 @@ def validate_cloudconfig_file( schema: dict, schema_type: SchemaType = SchemaType.CLOUD_CONFIG, annotate: bool = False, - instance_data_path: str = None, + instance_data_path: Optional[str] = None, ) -> bool: """Validate cloudconfig file adheres to a specific jsonschema. @@ -1056,6 +1092,8 @@ def validate_cloudconfig_file( :raises SchemaValidationError containing any of schema_errors encountered. :raises RuntimeError when config_path does not exist. """ + from cloudinit.net.netplan import available as netplan_available + decoded_content = load_text_file(config_path) if not decoded_content: print( @@ -1083,7 +1121,7 @@ def validate_cloudconfig_file( if annotate: cloudconfig, marks = safeyaml.load_with_marks(content) else: - cloudconfig = safeyaml.load(content) + cloudconfig = yaml.safe_load(content) marks = {} except yaml.YAMLError as e: line = column = 1 @@ -1121,22 +1159,28 @@ def validate_cloudconfig_file( return False network_version = network_schema_version(cloudconfig) if network_version == 2: + schema_type = SchemaType.NETWORK_CONFIG_V2 if netplan_validate_network_schema( network_config=cloudconfig, strict=True, annotate=annotate ): return True # schema validation performed by netplan - if network_version != 1: - # Validation requires JSON schema definition in - # cloudinit/config/schemas/schema-network-config-v1.json - print( - "Skipping network-config schema validation." - " No network schema for version:" - f" {network_schema_version(cloudconfig)}" - ) - return False + elif netplan_available(): + print( + "Skipping network-config schema validation for version: 2." + " No netplan API available." + ) + return False + elif network_version == 1: + schema_type = SchemaType.NETWORK_CONFIG_V1 + # refresh schema since NETWORK_CONFIG defaults to V2 + schema = get_schema(schema_type) try: if not validate_cloudconfig_schema( - cloudconfig, schema=schema, strict=True, log_deprecations=False + cloudconfig, + schema=schema, + schema_type=schema_type, + strict=True, + log_deprecations=False, ): print( f"Skipping {schema_type.value} schema validation." @@ -1210,7 +1254,7 @@ def _get_property_type(property_dict: dict, defs: dict) -> str: """Return a string representing a property type from a given jsonschema. """ - _flatten_schema_refs(property_dict, defs) + flatten_schema_refs(property_dict, defs) property_types = property_dict.get("type", []) if not isinstance(property_types, list): property_types = [property_types] @@ -1272,7 +1316,7 @@ def _parse_description(description, prefix) -> str: return description -def _flatten_schema_refs(src_cfg: dict, defs: dict): +def flatten_schema_refs(src_cfg: dict, defs: dict): """Flatten schema: replace $refs in src_cfg with definitions from $defs.""" if "$ref" in src_cfg: reference = src_cfg.pop("$ref").replace("#/$defs/", "") @@ -1298,7 +1342,7 @@ def _flatten_schema_refs(src_cfg: dict, defs: dict): sub_schema.update(defs[reference]) -def _flatten_schema_all_of(src_cfg: dict): +def flatten_schema_all_of(src_cfg: dict): """Flatten schema: Merge allOf. If a schema as allOf, then all of the sub-schemas must hold. Therefore @@ -1368,8 +1412,8 @@ def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str: for prop_schema in property_schemas: for prop_key, prop_config in prop_schema.items(): - _flatten_schema_refs(prop_config, defs) - _flatten_schema_all_of(prop_config) + flatten_schema_refs(prop_config, defs) + flatten_schema_all_of(prop_config) if prop_config.get("hidden") is True: continue # document nothing for this property @@ -1387,7 +1431,7 @@ def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str: ) items = prop_config.get("items") if items: - _flatten_schema_refs(items, defs) + flatten_schema_refs(items, defs) if items.get("properties") or items.get("patternProperties"): properties.append( SCHEMA_LIST_ITEM_TMPL.format( @@ -1425,14 +1469,22 @@ def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str: def _get_examples(meta: MetaSchema) -> str: """Return restructured text describing the meta examples if present.""" + paths = read_cfg_paths() + module_docs_dir = os.path.join(paths.docs_dir, "module-docs") examples = meta.get("examples") if not examples: return "" - rst_content = SCHEMA_EXAMPLES_HEADER - for count, example in enumerate(examples, 1): - rst_content += SCHEMA_EXAMPLES_SPACER_TEMPLATE.format( - example_count=count - ) + rst_content: str = "" + for example in examples: + # FIXME(drop conditional when all mods have rtd/module-doc/*/data.yaml) + if isinstance(example, dict): + if example["comment"]: + comment = f"# {example['comment']}\n" + else: + comment = "" + example = comment + load_text_file( + os.path.join(module_docs_dir, example["file"]) + ) indented_lines = textwrap.indent(example, " ").split("\n") rst_content += "\n".join(indented_lines) return rst_content @@ -1542,14 +1594,21 @@ def load_doc(requested_modules: list) -> str: ), sys_exit=True, ) - for mod_name in all_modules: + module_docs = get_module_docs() + schema = get_schema() + for mod_name in sorted(all_modules): if "all" in requested_modules or mod_name in requested_modules: (mod_locs, _) = importer.find_module( mod_name, ["cloudinit.config"], ["meta"] ) if mod_locs: mod = importer.import_module(mod_locs[0]) - docs += mod.__doc__ or "" + if module_docs.get(mod.meta["id"]): + # Include docs only when module id is in module_docs + mod.meta.update(module_docs.get(mod.meta["id"], {})) + docs += get_meta_doc(mod.meta, schema) or "" + else: + docs += mod.__doc__ or "" return docs @@ -1557,6 +1616,16 @@ def get_schema_dir() -> str: return os.path.join(os.path.dirname(os.path.abspath(__file__)), "schemas") +def get_module_docs() -> dict: + """Return a dict keyed on cc_ with documentation info""" + paths = read_cfg_paths() + mod_docs = {} + module_docs_dir = os.path.join(paths.docs_dir, "module-docs") + for mod_doc in glob.glob(f"{module_docs_dir}/*/data.yaml"): + mod_docs.update(load_yaml(load_text_file(mod_doc))) + return mod_docs + + def get_schema(schema_type: SchemaType = SchemaType.CLOUD_CONFIG) -> dict: """Return jsonschema for a specific type. @@ -1671,7 +1740,7 @@ def get_config_paths_from_args( ) -> Tuple[str, List[InstanceDataPart]]: """Return appropriate instance-data.json and instance data parts - Based on commandline args, and user permissions, determine the + Based on command line args, and user permissions, determine the appropriate instance-data.json to source for jinja templates and a list of applicable InstanceDataParts such as user-data, vendor-data and network-config for which to validate schema. Avoid returning any diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index ff61dcaa6..f5609c539 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -267,9 +267,9 @@ "patternProperties": { "^.+$": { "label": "", - "description": "When providing an object for users.groups the ```` keys are the groups to add this user to", "deprecated": true, "deprecated_version": "23.1", + "deprecated_description": "The use of ``object`` type is deprecated. Use ``string`` or ``array`` of ``string`` instead.", "type": [ "null" ], @@ -292,9 +292,7 @@ "type": "string" }, "lock-passwd": { - "default": true, "type": "boolean", - "description": "Default: ``true``", "deprecated": true, "deprecated_version": "22.3", "deprecated_description": "Use ``lock_passwd`` instead." @@ -304,16 +302,34 @@ "description": "Disable password login. Default: ``true``", "type": "boolean" }, + "no-create-home": { + "type": "boolean", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``no_create_home`` instead." + }, "no_create_home": { "default": false, "description": "Do not create home directory. Default: ``false``", "type": "boolean" }, + "no-log-init": { + "type": "boolean", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``no_log_init`` instead." + }, "no_log_init": { "default": false, "description": "Do not initialize lastlog and faillog for user. Default: ``false``", "type": "boolean" }, + "no-user-group": { + "type": "boolean", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``no_user_group`` instead." + }, "no_user_group": { "default": false, "description": "Do not create group named after user. Default: ``false``", @@ -323,24 +339,54 @@ "description": "Hash of user password applied when user does not exist. This will NOT be applied if the user already exists. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000`` **Note:** Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", "type": "string" }, + "hashed-passwd": { + "type": "string", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``hashed_passwd`` instead." + }, "hashed_passwd": { "description": "Hash of user password to be applied. This will be applied even if the user is preexisting. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000``. **Note:** Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", "type": "string" }, + "plain-text-passwd": { + "type": "string", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``plain_text_passwd`` instead." + }, "plain_text_passwd": { "description": "Clear text of user password to be applied. This will be applied even if the user is preexisting. **Note:** SSH keys or certificates are a safer choice for logging in to your system. For local escalation, supplying a hashed password is a safer choice than plain text. Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. An exposed plain text password is an immediate security concern. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", "type": "string" }, + "create-groups": { + "type": "boolean", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``create_groups`` instead." + }, "create_groups": { "default": true, "description": "Boolean set ``false`` to disable creation of specified user ``groups``. Default: ``true``.", "type": "boolean" }, + "primary-group": { + "type": "string", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``primary_group`` instead." + }, "primary_group": { "default": "````", "description": "Primary group for user. Default: ````", "type": "string" }, + "selinux-user": { + "type": "string", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``selinux_user`` instead." + }, "selinux_user": { "description": "SELinux user for user's login. Default: the default SELinux user.", "type": "string" @@ -361,6 +407,26 @@ }, "minItems": 1 }, + "ssh-authorized-keys": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "deprecated": true, + "deprecated_version": "18.3", + "deprecated_description": "Use ``ssh_authorized_keys`` instead." + }, + "ssh-import-id": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``ssh_import_id`` instead." + }, "ssh_import_id": { "description": "List of ssh ids to import for user. Can not be combined with ``ssh_redirect_user``. See the man page[1] for more details. [1] https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html", "type": "array", @@ -369,6 +435,12 @@ }, "minItems": 1 }, + "ssh-redirect-user": { + "type": "boolean", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use ``ssh_redirect_user`` instead." + }, "ssh_redirect_user": { "type": "boolean", "default": false, @@ -388,6 +460,15 @@ ], "description": "Sudo rule to use or false. Absence of a sudo value or ``null`` will result in no sudo rules added for this user." }, + { + "type": "array", + "items": { + "type": [ + "string", + "null" + ] + } + }, { "type": "boolean", "changed": true, @@ -462,7 +543,6 @@ "properties": { "remove-defaults": { "type": "boolean", - "default": false, "deprecated": true, "deprecated_version": "22.3", "deprecated_description": "Use ``remove_defaults`` instead." @@ -577,6 +657,12 @@ }, "merge_type": { "$ref": "#/$defs/merge_definition" + }, + "system_info": { + "type": "object", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "System and/or distro specific settings. This is not intended to be overridden by user data or vendor data." } } }, @@ -627,7 +713,7 @@ "distro", "pip" ], - "description": "The type of installation for ansible. It can be one of the following values:\n\n - ``distro``\n - ``pip``" + "description": "The type of installation for ansible. It can be one of the following values:\n- ``distro``\n- ``pip``" }, "run_user": { "type": "string", @@ -663,6 +749,7 @@ "run_ansible": { "type": "array", "items": { + "type": "object", "properties": { "playbook_name": { "type": "string" @@ -835,6 +922,13 @@ "vault_password_file": { "type": "string" }, + "verify_commit": { + "type": "boolean", + "default": false + }, + "inventory": { + "type": "string" + }, "module_name": { "type": "string" }, @@ -867,7 +961,7 @@ "preserve_repositories": { "type": "boolean", "default": false, - "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``." + "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\nThe ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``." }, "alpine_repo": { "type": [ @@ -928,11 +1022,11 @@ }, "minItems": 1, "uniqueItems": true, - "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out." + "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n- ``updates`` => ``$RELEASE-updates``\n- ``backports`` => ``$RELEASE-backports``\n- ``security`` => ``$RELEASE-security``\n- ``proposed`` => ``$RELEASE-proposed``\n- ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out." }, "primary": { "$ref": "#/$defs/apt_configure.mirror", - "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``-mirror``, then it is assumed that there is a distro mirror at ``http://-mirror./``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``" + "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``-mirror`` in each of the following:\n- fqdn of this host per cloud metadata,\n- localdomain,\n- domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``-mirror``, then it is assumed that there is a distro mirror at ``http://-mirror./``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n- ``keyid``: a key to import via shortid or fingerprint.\n- ``key``: a raw PGP key.\n- ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n- ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n- ``security`` => ``http://security.ubuntu.com/ubuntu``" }, "security": { "$ref": "#/$defs/apt_configure.mirror", @@ -952,11 +1046,11 @@ "type": "string" } }, - "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n\n - ``pkgname`` is the name of the package.\n - ``question`` the name of the questions.\n - ``type`` is the type of question.\n - ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``" + "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n- ``pkgname`` is the name of the package.\n- ``question`` the name of the questions.\n- ``type`` is the type of question.\n- ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``" }, "sources_list": { "type": "string", - "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n\n - ``$MIRROR``\n - ``$RELEASE``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$KEY_FILE``" + "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n- ``$MIRROR``\n- ``$RELEASE``\n- ``$PRIMARY``\n- ``$SECURITY``\n- ``$KEY_FILE``" }, "conf": { "type": "string", @@ -1009,7 +1103,7 @@ "minProperties": 1 } }, - "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file.\n - ``append``: If ``true``, append to sources file, otherwise overwrite it. Default: ``true``.\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``" + "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n- ``source``: a sources.list entry (some variable replacements apply).\n- ``keyid``: a key to import via shortid or fingerprint.\n- ``key``: a raw PGP key.\n- ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n- ``filename``: specify the name of the list file.\n- ``append``: If ``true``, append to sources file, otherwise overwrite it. Default: ``true``.\n\nThe ``source`` key supports variable replacements for the following strings:\n- ``$MIRROR``\n- ``$PRIMARY``\n- ``$SECURITY``\n- ``$RELEASE``\n- ``$KEY_FILE``" } } } @@ -1125,7 +1219,7 @@ }, "minItems": 1, "uniqueItems": true, - "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``" + "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n- ``/etc/chef``\n- ``/var/log/chef``\n- ``/var/lib/chef``\n- ``/var/cache/chef``\n- ``/var/backups/chef``\n- ``/var/run/chef``" }, "validation_cert": { "type": "string", @@ -1221,7 +1315,7 @@ "ssl_verify_mode": { "type": "string", "default": ":verify_none", - "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n\n - ``:verify_none``: No validation of SSL certificates.\n - ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``." + "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n- ``:verify_none``: No validation of SSL certificates.\n- ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``." }, "validation_name": { "type": "string", @@ -1247,7 +1341,7 @@ "gems", "omnibus" ], - "description": "The type of installation for chef. It can be one of the following values:\n\n - ``packages``\n - ``gems``\n - ``omnibus``" + "description": "The type of installation for chef. It can be one of the following values:\n- ``packages``\n- ``gems``\n- ``omnibus``" }, "run_list": { "type": "array", @@ -1464,7 +1558,7 @@ "properties": { "mode": { "default": "auto", - "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``off`` - Take no action", + "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``'off'`` - Take no action", "oneOf": [ { "enum": [ @@ -1480,7 +1574,7 @@ ], "changed": true, "changed_version": "22.3", - "changed_description": "Specifying a boolean ``false`` value for ``mode`` is deprecated. Use ``off`` instead." + "changed_description": "Specifying a boolean ``false`` value for ``mode`` is deprecated. Use the string ``'off'`` instead." } ] }, @@ -1541,7 +1635,6 @@ }, "grub-dpkg": { "type": "object", - "description": "An alias for ``grub_dpkg``", "deprecated": true, "deprecated_version": "22.2", "deprecated_description": "Use ``grub_dpkg`` instead." @@ -1924,12 +2017,12 @@ }, "mount_default_fields": { "type": "array", - "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.requires=cloud-init.service,_netdev``", + "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init.service,_netdev``", "default": [ null, null, "auto", - "defaults,nofail,x-systemd.requires=cloud-init.service", + "defaults,nofail,x-systemd.after=cloud-init.service", "0", "2" ], @@ -2004,7 +2097,7 @@ "format": "hostname" }, "uniqueItems": true, - "description": "List of ntp pools. If both pools and servers are\nempty, 4 default pool servers will be provided of\nthe format ``{0-3}.{distro}.pool.ntp.org``. NOTE:\nfor Alpine Linux when using the Busybox NTP client\nthis setting will be ignored due to the limited\nfunctionality of Busybox's ntpd." + "description": "List of ntp pools. If both pools and servers are empty, 4 default pool servers will be provided of the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: for Alpine Linux when using the Busybox NTP client this setting will be ignored due to the limited functionality of Busybox's ntpd." }, "servers": { "type": "array", @@ -2013,7 +2106,7 @@ "format": "hostname" }, "uniqueItems": true, - "description": "List of ntp servers. If both pools and servers are\nempty, 4 default pool servers will be provided with\nthe format ``{0-3}.{distro}.pool.ntp.org``." + "description": "List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with the format ``{0-3}.{distro}.pool.ntp.org``." }, "peers": { "type": "array", @@ -2035,26 +2128,26 @@ "ntp_client": { "type": "string", "default": "auto", - "description": "Name of an NTP client to use to configure system NTP.\nWhen unprovided or 'auto' the default client preferred\nby the distribution will be used. The following\nbuilt-in client names can be used to override existing\nconfiguration defaults: chrony, ntp, openntpd,\nntpdate, systemd-timesyncd." + "description": "Name of an NTP client to use to configure system NTP. When unprovided or 'auto' the default client preferred by the distribution will be used. The following built-in client names can be used to override existing configuration defaults: chrony, ntp, openntpd, ntpdate, systemd-timesyncd." }, "enabled": { "type": "boolean", "default": true, - "description": "Attempt to enable ntp clients if set to True. If set\nto False, ntp client will not be configured or\ninstalled" + "description": "Attempt to enable ntp clients if set to True. If set to ``false``, ntp client will not be configured or installed" }, "config": { - "description": "Configuration settings or overrides for the\n``ntp_client`` specified.", + "description": "Configuration settings or overrides for the ``ntp_client`` specified.", "type": "object", "minProperties": 1, "additionalProperties": false, "properties": { "confpath": { "type": "string", - "description": "The path to where the ``ntp_client``\nconfiguration is written." + "description": "The path to where the ``ntp_client`` configuration is written." }, "check_exe": { "type": "string", - "description": "The executable name for the ``ntp_client``.\nFor example, ntp service ``check_exe`` is\n'ntpd' because it runs the ntpd binary." + "description": "The executable name for the ``ntp_client``. For example, ntp service ``check_exe`` is 'ntpd' because it runs the ntpd binary." }, "packages": { "type": "array", @@ -2062,15 +2155,15 @@ "type": "string" }, "uniqueItems": true, - "description": "List of packages needed to be installed for the\nselected ``ntp_client``." + "description": "List of packages needed to be installed for the selected ``ntp_client``." }, "service_name": { "type": "string", - "description": "The systemd or sysvinit service name used to\nstart and stop the ``ntp_client``\nservice." + "description": "The systemd or sysvinit service name used to start and stop the ``ntp_client`` service." }, "template": { "type": "string", - "description": "Inline template allowing users to customize their ``ntp_client`` configuration with the use of the Jinja templating engine.\nThe template content should start with ``## template:jinja``.\nWithin the template, you can utilize any of the following ntp module config keys: ``servers``, ``pools``, ``allow``, and ``peers``.\nEach cc_ntp schema config key and expected value type is defined above." + "description": "Inline template allowing users to customize their ``ntp_client`` configuration with the use of the Jinja templating engine. The template content should start with ``## template:jinja``. Within the template, you can utilize any of the following ntp module config keys: ``servers``, ``pools``, ``allow``, and ``peers``. Each cc_ntp schema config key and expected value type is defined above." } } } @@ -2128,24 +2221,18 @@ }, "apt_update": { "type": "boolean", - "default": false, - "description": "Default: ``false``.", "deprecated": true, "deprecated_version": "22.2", "deprecated_description": "Use ``package_update`` instead." }, "apt_upgrade": { "type": "boolean", - "default": false, - "description": "Default: ``false``.", "deprecated": true, "deprecated_version": "22.2", "deprecated_description": "Use ``package_upgrade`` instead." }, "apt_reboot_if_required": { "type": "boolean", - "default": false, - "description": "Default: ``false``.", "deprecated": true, "deprecated_version": "22.2", "deprecated_description": "Use ``package_reboot_if_required`` instead." @@ -2448,8 +2535,18 @@ "description": "The activation key to use. Must be used with ``org``. Should not be used with ``username`` or ``password``" }, "org": { - "type": "integer", - "description": "The organization number to use. Must be used with ``activation-key``. Should not be used with ``username`` or ``password``" + "description": "The organization to use. Must be used with ``activation-key``. Should not be used with ``username`` or ``password``", + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer", + "deprecated": true, + "deprecated_version": "24.2", + "deprecated_description": "Use of type integer for this value is deprecated. Use a string instead." + } + ] }, "auto-attach": { "type": "boolean", @@ -2843,7 +2940,6 @@ } ], "minItems": 1, - "description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.", "deprecated": true, "deprecated_version": "22.2", "deprecated_description": "Use ``users`` instead." @@ -3869,6 +3965,7 @@ "ssh_pwauth": {}, "ssh_quiet_keygen": {}, "swap": {}, + "system_info": {}, "timezone": {}, "ubuntu_advantage": {}, "ubuntu_pro": {}, diff --git a/cloudinit/config/schemas/schema-network-config-v1.json b/cloudinit/config/schemas/schema-network-config-v1.json index 78628178e..6f97f5718 100644 --- a/cloudinit/config/schemas/schema-network-config-v1.json +++ b/cloudinit/config/schemas/schema-network-config-v1.json @@ -532,6 +532,14 @@ "items": { "$ref": "#/$defs/anyOf_type_route" } + }, + "ipv4": { + "type": "boolean", + "description": "Indicate if the subnet is IPv4. If not specified, it will be inferred from the subnet type or address. This exists for compatibility with OpenStack's ``network_data.json`` when rendered through sysconfig." + }, + "ipv6": { + "type": "boolean", + "description": "Indicate if the subnet is IPv6. If not specified, it will be inferred from the subnet type or address. This is exists for compatibility with OpenStack's ``network_data.json`` when rendered through sysconfig." } } }, diff --git a/cloudinit/config/schemas/schema-network-config-v2.json b/cloudinit/config/schemas/schema-network-config-v2.json new file mode 100644 index 000000000..0a3741d65 --- /dev/null +++ b/cloudinit/config/schemas/schema-network-config-v2.json @@ -0,0 +1,457 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$defs": { + "renderer": { + "type": "string", + "description": "Use the given networking backend for this definition. Default is networkd.", + "enum": [ + "networkd", + "NetworkManager" + ] + }, + "dhcp-overrides": { + "type": "object", + "description": "DHCP behaviour overrides. Overrides will only have an effect if the corresponding DHCP type is enabled.", + "additionalProperties": false, + "properties": { + "hostname": { + "type": "string", + "description": "Unsupported for dhcp6-overrides when used with the networkd renderer." + }, + "route-metric": { + "type": "integer", + "description": "Unsupported for dhcp6-overrides when used with the networkd renderer." + }, + "send-hostname": { + "type": "boolean", + "description": "Unsupported for dhcp6-overrides when used with the networkd renderer." + }, + "use-dns": { + "type": "boolean" + }, + "use-domains": { + "type": "string" + }, + "use-hostname": { + "type": "boolean" + }, + "use-mtu": { + "type": "boolean", + "description": "Unsupported for dhcp6-overrides when used with the networkd renderer." + }, + "use-ntp": { + "type": "boolean" + }, + "use-routes": { + "type": "boolean", + "description": "Unsupported for dhcp6-overrides when used with the networkd renderer." + } + } + }, + "gateway": { + "type": "string", + "description": "Deprecated, see Netplan#default-routes. Set default gateway for IPv4/6, for manual address configuration. This requires setting addresses too. Gateway IPs must be in a form recognised by inet_pton(3)." + }, + "mapping": { + "type": "object", + "properties": { + "renderer": { + "$ref": "#/$defs/renderer" + }, + "dhcp4": { + "type": [ + "boolean", + "string" + ], + "description": "Enable DHCP for IPv4. Off by default.", + "enum": [ + "yes", + "no", + true, + false + ] + }, + "dhcp6": { + "type": [ + "boolean", + "string" + ], + "description": "Enable DHCP for IPv6. Off by default.", + "enum": [ + "yes", + "no", + true, + false + ] + }, + "dhcp4-overrides": { + "$ref": "#/$defs/dhcp-overrides" + }, + "dhcp6-overrides": { + "$ref": "#/$defs/dhcp-overrides" + }, + "addresses": { + "type": "array", + "description": "Add static addresses to the interface in addition to the ones received through DHCP or RA. Each sequence entry is in CIDR notation, i.e., of the form addr/prefixlen. addr is an IPv4 or IPv6 address as recognised by inet_pton(3) and prefixlen the number of bits of the subnet.", + "items": { + "type": "string" + } + }, + "gateway4": { + "$ref": "#/$defs/gateway" + }, + "gateway6": { + "$ref": "#/$defs/gateway" + }, + "mtu": { + "type": "integer", + "description": "The MTU key represents a device\u2019s Maximum Transmission Unit, the largest size packet or frame, specified in octets (eight-bit bytes), that can be sent in a packet- or frame-based network. Specifying mtu is optional." + }, + "nameservers": { + "type": "object", + "additionalProperties": false, + "description": "Set DNS servers and search domains, for manual address configuration. There are two supported fields: addresses: is a list of IPv4 or IPv6 addresses similar to gateway*, and search: is a list of search domains.", + "properties": { + "search": { + "type": "array", + "items": { + "type": "string" + } + }, + "addresses": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "routes": { + "type": "array", + "description": "", + "items": { + "type": "object", + "required": [ + "to" + ], + "additionalProperties": false, + "properties": { + "to": { + "type": "string" + }, + "via": { + "type": "string" + }, + "metric": { + "type": "integer" + } + } + } + } + } + }, + "mapping_physical": { + "allOf": [ + { + "$ref": "#/$defs/mapping" + } + ], + "properties": { + "match": { + "type": "object", + "description": "This selects a subset of available physical devices by various hardware properties. The following configuration will then apply to all matching devices, as soon as they appear. All specified properties must match.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "Current interface name. Globs are supported, and the primary use case for matching on names, as selecting one fixed name can be more easily achieved with having no match: at all and just using the ID (see above). Note that currently only networkd supports globbing, NetworkManager does not." + }, + "macaddress": { + "type": "string", + "description": "Device\u2019s MAC address in the form xx:xx:xx:xx:xx:xx. Globs are not allowed. Letters must be lowercase." + }, + "driver": { + "type": "string", + "description": "Kernel driver name, corresponding to the DRIVER udev property. Globs are supported. Matching on driver is only supported with networkd." + } + } + }, + "set-name": { + "type": "string", + "description": "When matching on unique properties such as path or MAC, or with additional assumptions such as ''there will only ever be one wifi device'', match rules can be written so that they only match one device. Then this property can be used to give that device a more specific/desirable/nicer name than the default from udev\u2019s ifnames. Any additional device that satisfies the match rules will then fail to get renamed and keep the original kernel name (and dmesg will show an error)." + }, + "wakeonlan": { + "type": "boolean", + "description": "Enable wake on LAN. Off by default." + } + } + }, + "mapping_bond": { + "allOf": [ + { + "$ref": "#/$defs/mapping" + } + ], + "properties": { + "interfaces": { + "type": "array", + "description": "All devices matching this ID list will be added to the bond.", + "items": { + "type": "string" + } + }, + "parameters": { + "type": "object", + "description": "Customisation parameters for special bonding options. Time values are specified in seconds unless otherwise specified.", + "properties": { + "mode": { + "type": "string", + "description": "Set the bonding mode used for the interfaces. The default is balance-rr (round robin).", + "enum": [ + "balance-rr", + "active-backup", + "balance-xor", + "broadcast", + "802.3ad", + "balance-tlb", + "balance-alb" + ] + }, + "lacp-rate": { + "type": "string", + "description": "Set the rate at which LACPDUs are transmitted. This is only useful in 802.3ad mode. Possible values are slow (30 seconds, default), and fast (every second).", + "enum": [ + "fast", + "slow" + ] + }, + "mii-monitor-interval": { + "type": "string", + "description": "Specifies the interval for MII monitoring (verifying if an interface of the bond has carrier). The default is 0; which disables MII monitoring." + }, + "min-links": { + "type": "integer", + "description": "The minimum number of links up in a bond to consider the bond interface to be up." + }, + "transmit-hash-policy": { + "type": "string", + "description": "Specifies the transmit hash policy for the selection of slaves. This is only useful in balance-xor, 802.3ad and balance-tlb modes.", + "enum": [ + "layer2", + "layer3+4", + "layer2+3", + "encap2+3", + "encap3+4" + ] + }, + "ad-select": { + "type": "string", + "description": "Set the aggregation selection mode. This option is only used in 802.3ad mode.", + "enum": [ + "stable", + "bandwidth", + "count" + ] + }, + "all-slaves-active": { + "type": "boolean", + "description": "If the bond should drop duplicate frames received on inactive ports, set this option to false. If they should be delivered, set this option to true. The default value is false, and is the desirable behaviour in most situations." + }, + "arp-interval": { + "type": "integer", + "description": "Set the interval value for how frequently ARP link monitoring should happen. The default value is 0, which disables ARP monitoring." + }, + "arp-ip-targets": { + "type": "array", + "description": "IPs of other hosts on the link which should be sent ARP requests in order to validate that a slave is up. This option is only used when arp-interval is set to a value other than 0. At least one IP address must be given for ARP link monitoring to function. Only IPv4 addresses are supported. You can specify up to 16 IP addresses. The default value is an empty list.", + "items": { + "type": "string" + } + }, + "arp-validate": { + "type": "string", + "description": "Configure how ARP replies are to be validated when using ARP link monitoring.", + "enum": [ + "none", + "active", + "backup", + "all" + ] + }, + "arp-all-targets": { + "type": "string", + "description": "Specify whether to use any ARP IP target being up as sufficient for a slave to be considered up; or if all the targets must be up. This is only used for active-backup mode when arp-validate is enabled.", + "enum": [ + "any", + "all" + ] + }, + "up-delay": { + "type": "integer", + "description": "Specify the delay before enabling a link once the link is physically up. The default value is 0." + }, + "down-delay": { + "type": "integer", + "description": "Specify the delay before enabling a link once the link has been lost. The default value is 0." + }, + "fail-over-mac-policy": { + "type": "string", + "description": "Set whether to set all slaves to the same MAC address when adding them to the bond, or how else the system should handle MAC addresses.", + "enum": [ + "none", + "active", + "follow" + ] + }, + "gratuitous-arp": { + "type": "integer", + "description": "Specify how many ARP packets to send after failover. Once a link is up on a new slave, a notification is sent and possibly repeated if this value is set to a number greater than 1. The default value is 1 and valid values are between 1 and 255. This only affects active-backup mode." + }, + "packets-per-slave": { + "type": "integer", + "description": "In balance-rr mode, specifies the number of packets to transmit on a slave before switching to the next. When this value is set to 0, slaves are chosen at random. Allowable values are between 0 and 65535. The default value is 1. This setting is only used in balance-rr mode." + }, + "primary-reselect-policy": { + "type": "string", + "description": "Set the reselection policy for the primary slave. On failure of the active slave, the system will use this policy to decide how the new active slave will be chosen and how recovery will be handled.", + "enum": [ + "always", + "better", + "failure" + ] + }, + "learn-packet-interval": { + "type": "string", + "description": "Specify the interval between sending Learning packets to each slave. The value range is between 1 and 0x7fffffff. The default value is 1. This option only affects balance-tlb and balance-alb modes." + } + } + } + } + }, + "mapping_bridge": { + "allOf": [ + { + "$ref": "#/$defs/mapping" + } + ], + "properties": { + "interfaces": { + "type": "array", + "description": "All devices matching this ID list will be added to the bridge.", + "items": { + "type": "string" + } + }, + "parameters": { + "type": "object", + "description": "Customisation parameters for special bridging options. Time values are specified in seconds unless otherwise stated.", + "properties": { + "ageing-time": { + "type": "integer", + "description": "Set the period of time to keep a MAC address in the forwarding database after a packet is received." + }, + "priority": { + "type": "integer", + "description": "Set the priority value for the bridge. This value should be a number between 0 and 65535. Lower values mean higher priority. The bridge with the higher priority will be elected as the root bridge." + }, + "forward-delay": { + "type": "integer", + "description": "Specify the period of time the bridge will remain in Listening and Learning states before getting to the Forwarding state. This value should be set in seconds for the systemd backend, and in milliseconds for the NetworkManager backend." + }, + "hello-time": { + "type": "integer", + "description": "Specify the interval between two hello packets being sent out from the root and designated bridges. Hello packets communicate information about the network topology." + }, + "max-age": { + "type": "integer", + "description": "Set the maximum age of a hello packet. If the last hello packet is older than that value, the bridge will attempt to become the root bridge." + }, + "path-cost": { + "type": "integer", + "description": "Set the cost of a path on the bridge. Faster interfaces should have a lower cost. This allows a finer control on the network topology so that the fastest paths are available whenever possible." + }, + "stp": { + "type": "boolean", + "description": "Define whether the bridge should use Spanning Tree Protocol. The default value is \u201ctrue\u201d, which means that Spanning Tree should be used." + } + } + } + } + }, + "mapping_vlan": { + "allOf": [ + { + "$ref": "#/$defs/mapping" + } + ], + "properties": { + "id": { + "type": "integer", + "description": "VLAN ID, a number between 0 and 4094." + }, + "link": { + "type": "string", + "description": "ID of the underlying device definition on which this VLAN gets created." + } + } + }, + "network_config_version2": { + "type": "object", + "additionalProperties": false, + "required": [ + "version" + ], + "properties": { + "version": { + "type": "integer", + "enum": [ + 2 + ] + }, + "renderer": { + "$ref": "#/$defs/renderer" + }, + "ethernets": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/mapping_physical" + } + }, + "bonds": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/mapping_bond" + } + }, + "bridges": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/mapping_bridge" + } + }, + "vlans": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/mapping_vlan" + } + } + } + } + }, + "oneOf": [ + { + "$ref": "#/$defs/network_config_version2" + }, + { + "required": [ + "network" + ], + "properties": { + "network": { + "$ref": "#/$defs/network_config_version2" + } + }, + "additionalProperties": false + } + ] +} diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 8c193f2f5..4557d4320 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -50,6 +50,7 @@ from cloudinit.distros.parsers import hosts from cloudinit.features import ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES from cloudinit.net import activators, dhcp, renderers +from cloudinit.net.netops import NetOps from cloudinit.net.network_state import parse_net_config_data from cloudinit.net.renderer import Renderer @@ -68,6 +69,7 @@ "redhat": [ "almalinux", "amazon", + "azurelinux", "centos", "cloudlinux", "eurolinux", @@ -141,7 +143,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): # This is used by self.shutdown_command(), and can be overridden in # subclasses shutdown_options_map = {"halt": "-H", "poweroff": "-P", "reboot": "-r"} - net_ops = iproute2.Iproute2 + net_ops: Type[NetOps] = iproute2.Iproute2 _ci_pkl_version = 1 prefer_fqdn = False @@ -395,7 +397,7 @@ def package_command(self, command, args=None, pkgs=None): # managers. raise NotImplementedError() - def update_package_sources(self): + def update_package_sources(self, *, force=False): for manager in self.package_managers: if not manager.available(): LOG.debug( @@ -404,7 +406,7 @@ def update_package_sources(self): ) continue try: - manager.update_package_sources() + manager.update_package_sources(force=force) except Exception as e: LOG.error( "Failed to update package using %s: %s", manager.name, e @@ -845,7 +847,7 @@ def create_user(self, name, **kwargs): util.deprecate( deprecated=f"The value of 'false' in user {name}'s " "'sudo' config", - deprecated_version="22.3", + deprecated_version="22.2", extra_message="Use 'null' instead.", ) @@ -920,8 +922,8 @@ def set_passwd(self, user, passwd, hashed=False): if hashed: # Need to use the short option name '-e' instead of '--encrypted' - # (which would be more descriptive) since SLES 11 doesn't know - # about long names. + # (which would be more descriptive) since Busybox and SLES 11 + # chpasswd don't know about long names. cmd.append("-e") try: @@ -1017,9 +1019,12 @@ def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"): # it actually exists as a directory sudoers_contents = "" base_exists = False + system_sudo_base = "/usr/etc/sudoers" if os.path.exists(sudo_base): sudoers_contents = util.load_text_file(sudo_base) base_exists = True + elif os.path.exists(system_sudo_base): + sudoers_contents = util.load_text_file(system_sudo_base) found_include = False for line in sudoers_contents.splitlines(): line = line.strip() @@ -1044,7 +1049,9 @@ def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"): "#includedir %s" % (path), "", ] - sudoers_contents = "\n".join(lines) + if sudoers_contents: + LOG.info("Using content from '%s'", system_sudo_base) + sudoers_contents += "\n".join(lines) util.write_file(sudo_base, sudoers_contents, 0o440) else: lines = [ @@ -1134,9 +1141,10 @@ def create_group(self, name, members=None): subp.subp(["usermod", "-a", "-G", name, member]) LOG.info("Added user '%s' to group '%s'", member, name) - def shutdown_command(self, *, mode, delay, message): + @classmethod + def shutdown_command(cls, *, mode, delay, message): # called from cc_power_state_change.load_power_state - command = ["shutdown", self.shutdown_options_map[mode]] + command = ["shutdown", cls.shutdown_options_map[mode]] try: if delay != "now": delay = "+%d" % int(delay) @@ -1344,6 +1352,60 @@ def eject_media(device: str) -> None: ) subp.subp(cmd) + @staticmethod + def get_mapped_device(blockdev: str) -> Optional[str]: + """Returns underlying block device for a mapped device. + + If it is mapped, blockdev will usually take the form of + /dev/mapper/some_name + + If blockdev is a symlink pointing to a /dev/dm-* device, return + the device pointed to. Otherwise, return None. + """ + realpath = os.path.realpath(blockdev) + if realpath.startswith("/dev/dm-"): + LOG.debug( + "%s is a mapped device pointing to %s", blockdev, realpath + ) + return realpath + return None + + @staticmethod + def device_part_info(devpath: str) -> tuple: + """convert an entry in /dev/ to parent disk and partition number + + input of /dev/vdb or /dev/disk/by-label/foo + rpath is hopefully a real-ish path in /dev (vda, sdb..) + """ + rpath = os.path.realpath(devpath) + + bname = os.path.basename(rpath) + syspath = "/sys/class/block/%s" % bname + + if not os.path.exists(syspath): + raise ValueError("%s had no syspath (%s)" % (devpath, syspath)) + + ptpath = os.path.join(syspath, "partition") + if not os.path.exists(ptpath): + raise TypeError("%s not a partition" % devpath) + + ptnum = util.load_text_file(ptpath).rstrip() + + # for a partition, real syspath is something like: + # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1 + rsyspath = os.path.realpath(syspath) + disksyspath = os.path.dirname(rsyspath) + + diskmajmin = util.load_text_file( + os.path.join(disksyspath, "dev") + ).rstrip() + diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin) + + # diskdevpath has something like 253:0 + # and udev has put links in /dev/block/253:0 to the device + # name in /dev/ + return diskdevpath, ptnum + def _apply_hostname_transformations_to_url(url: str, transformations: list): """ diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py index e4120740d..a1d0d900c 100644 --- a/cloudinit/distros/alpine.py +++ b/cloudinit/distros/alpine.py @@ -8,10 +8,14 @@ import logging import os +import re +import stat +from datetime import datetime +from typing import Any, Dict, Optional from cloudinit import distros, helpers, subp, util from cloudinit.distros.parsers.hostname import HostnameConf -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -30,6 +34,7 @@ class Distro(distros.Distro): keymap_path = "/usr/share/bkeymaps/" locale_conf_fn = "/etc/profile.d/50-cloud-init-locale.sh" network_conf_fn = "/etc/network/interfaces" + shadow_fn = "/etc/shadow" renderer_configs = { "eni": {"eni_path": network_conf_fn, "eni_header": NETWORK_FILE_HEADER} } @@ -184,12 +189,12 @@ def package_command(self, command, args=None, pkgs=None): # Allow the output of this to flow outwards (ie not be captured) subp.subp(cmd, capture=False) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( "update-sources", self.package_command, ["update"], - freq=PER_INSTANCE, + freq=PER_ALWAYS if force else PER_INSTANCE, ) @property @@ -200,6 +205,339 @@ def preferred_ntp_clients(self): return self._preferred_ntp_clients + def add_user(self, name, **kwargs): + """ + Add a user to the system using standard tools + + On Alpine this may use either 'useradd' or 'adduser' depending + on whether the 'shadow' package is installed. + """ + if util.is_user(name): + LOG.info("User %s already exists, skipping.", name) + return + + if "selinux_user" in kwargs: + LOG.warning("Ignoring selinux_user parameter for Alpine Linux") + del kwargs["selinux_user"] + + # If 'useradd' is available then use the generic + # add_user function from __init__.py instead. + if subp.which("useradd"): + return super().add_user(name, **kwargs) + + create_groups = kwargs.pop("create_groups", True) + + adduser_cmd = ["adduser", "-D"] + + # Since we are creating users, we want to carefully validate + # the inputs. If something goes wrong, we can end up with a + # system that nobody can login to. + adduser_opts = { + "gecos": "-g", + "homedir": "-h", + "primary_group": "-G", + "shell": "-s", + "uid": "-u", + } + + adduser_flags = {"system": "-S"} + + # support kwargs having groups=[list] or groups="g1,g2" + groups = kwargs.get("groups") + if groups: + if isinstance(groups, str): + groups = groups.split(",") + elif isinstance(groups, dict): + util.deprecate( + deprecated=f"The user {name} has a 'groups' config value " + "of type dict", + deprecated_version="22.3", + extra_message="Use a comma-delimited string or " + "array instead: group1,group2.", + ) + + # remove any white spaces in group names, most likely + # that came in as a string like: groups: group1, group2 + groups = [g.strip() for g in groups] + + # kwargs.items loop below wants a comma delimited string + # that can go right through to the command. + kwargs["groups"] = ",".join(groups) + + if kwargs.get("primary_group"): + groups.append(kwargs["primary_group"]) + + if create_groups and groups: + for group in groups: + if not util.is_group(group): + self.create_group(group) + LOG.debug("created group '%s' for user '%s'", group, name) + if "uid" in kwargs: + kwargs["uid"] = str(kwargs["uid"]) + + unsupported_busybox_values: Dict[str, Any] = { + "groups": [], + "expiredate": None, + "inactive": None, + "passwd": None, + } + + # Check the values and create the command + for key, val in sorted(kwargs.items()): + if key in adduser_opts and val and isinstance(val, str): + adduser_cmd.extend([adduser_opts[key], val]) + elif ( + key in unsupported_busybox_values + and val + and isinstance(val, str) + ): + # Busybox's 'adduser' does not support specifying these + # options so store them for use via alternative means. + if key == "groups": + unsupported_busybox_values[key] = val.split(",") + else: + unsupported_busybox_values[key] = val + elif key in adduser_flags and val: + adduser_cmd.append(adduser_flags[key]) + + # Don't create the home directory if directed so + # or if the user is a system user + if kwargs.get("no_create_home") or kwargs.get("system"): + adduser_cmd.append("-H") + + # Busybox's 'adduser' puts username at end of command + adduser_cmd.append(name) + + # Run the command + LOG.debug("Adding user %s", name) + try: + subp.subp(adduser_cmd) + except subp.ProcessExecutionError as e: + LOG.warning("Failed to create user %s", name) + raise e + + # Process remaining options that Busybox's 'adduser' does not support + + # Separately add user to each additional group as Busybox's + # 'adduser' does not support specifying additional groups. + for addn_group in unsupported_busybox_values[ + "groups" + ]: # pylint: disable=E1133 + LOG.debug("Adding user to group %s", addn_group) + try: + subp.subp(["addgroup", name, addn_group]) + except subp.ProcessExecutionError as e: + util.logexc( + LOG, "Failed to add user %s to group %s", name, addn_group + ) + raise e + + if unsupported_busybox_values["passwd"]: + # Separately set password as Busybox's 'adduser' does + # not support passing password as CLI option. + super().set_passwd( + name, unsupported_busybox_values["passwd"], hashed=True + ) + + # Busybox's 'adduser' is hardcoded to always set the following field + # values (numbered from "0") in /etc/shadow unlike 'useradd': + # + # Field Value set + # + # 3 minimum password age 0 (no min age) + # 4 maximum password age 99999 (days) + # 5 warning period 7 (warn days before max age) + # + # so modify these fields to be empty. + # + # Also set expiredate (field '7') and/or inactive (field '6') + # values directly in /etc/shadow file as Busybox's 'adduser' + # does not support passing these as CLI options. + + expiredate = unsupported_busybox_values["expiredate"] + inactive = unsupported_busybox_values["inactive"] + + shadow_contents = None + shadow_file = self.shadow_fn + try: + shadow_contents = util.load_text_file(shadow_file) + except FileNotFoundError as e: + LOG.warning("Failed to read %s file, file not found", shadow_file) + raise e + + # Find the line in /etc/shadow for the user + original_line = None + for line in shadow_contents.splitlines(): + new_line_parts = line.split(":") + if new_line_parts[0] == name: + original_line = line + break + + if original_line: + # Modify field(s) in copy of user's shadow file entry + update_type = "" + + # Minimum password age + new_line_parts[3] = "" + # Maximum password age + new_line_parts[4] = "" + # Password warning period + new_line_parts[5] = "" + update_type = "password aging" + + if expiredate is not None: + # Convert date into number of days since 1st Jan 1970 + days = ( + datetime.fromisoformat(expiredate) + - datetime.fromisoformat("1970-01-01") + ).days + new_line_parts[7] = str(days) + if update_type != "": + update_type = update_type + " & " + update_type = update_type + "acct expiration date" + if inactive is not None: + new_line_parts[6] = inactive + if update_type != "": + update_type = update_type + " & " + update_type = update_type + "inactivity period" + + # Replace existing line for user with modified line + shadow_contents = shadow_contents.replace( + original_line, ":".join(new_line_parts) + ) + LOG.debug("Updating %s for user %s", update_type, name) + try: + util.write_file( + shadow_file, shadow_contents, omode="w", preserve_mode=True + ) + except IOError as e: + util.logexc(LOG, "Failed to update %s file", shadow_file) + raise e + else: + util.logexc( + LOG, "Failed to update %s for user %s", shadow_file, name + ) + + def lock_passwd(self, name): + """ + Lock the password of a user, i.e., disable password logins + """ + + # Check whether Shadow's or Busybox's version of 'passwd'. + # If Shadow's 'passwd' is available then use the generic + # lock_passwd function from __init__.py instead. + if not os.path.islink( + "/usr/bin/passwd" + ) or "bbsuid" not in os.readlink("/usr/bin/passwd"): + return super().lock_passwd(name) + + cmd = ["passwd", "-l", name] + # Busybox's 'passwd', unlike Shadow's 'passwd', errors + # if password is already locked: + # + # "passwd: password for user2 is already locked" + # + # with exit code 1 + try: + (_out, err) = subp.subp(cmd, rcs=[0, 1]) + if re.search(r"is already locked", err): + return True + except subp.ProcessExecutionError as e: + util.logexc(LOG, "Failed to disable password for user %s", name) + raise e + + def expire_passwd(self, user): + # Check whether Shadow's or Busybox's version of 'passwd'. + # If Shadow's 'passwd' is available then use the generic + # expire_passwd function from __init__.py instead. + if not os.path.islink( + "/usr/bin/passwd" + ) or "bbsuid" not in os.readlink("/usr/bin/passwd"): + return super().expire_passwd(user) + + # Busybox's 'passwd' does not provide an expire option + # so have to manipulate the shadow file directly. + shadow_contents = None + shadow_file = self.shadow_fn + try: + shadow_contents = util.load_text_file(shadow_file) + except FileNotFoundError as e: + LOG.warning("Failed to read %s file, file not found", shadow_file) + raise e + + # Find the line in /etc/shadow for the user + original_line = None + for line in shadow_contents.splitlines(): + new_line_parts = line.split(":") + if new_line_parts[0] == user: + LOG.debug("Found /etc/shadow line matching user %s", user) + original_line = line + break + + if original_line: + # Replace existing line for user with modified line + # + # Field '2' (numbered from '0') in /etc/shadow + # is the "date of last password change". + if new_line_parts[2] != "0": + # Busybox's 'adduser' always expires password so only + # need to expire it now if this is not a new user. + new_line_parts[2] = "0" + shadow_contents = shadow_contents.replace( + original_line, ":".join(new_line_parts), 1 + ) + + LOG.debug("Expiring password for user %s", user) + try: + util.write_file( + shadow_file, + shadow_contents, + omode="w", + preserve_mode=True, + ) + except IOError as e: + util.logexc(LOG, "Failed to update %s file", shadow_file) + raise e + else: + LOG.debug("Password for user %s is already expired", user) + else: + util.logexc(LOG, "Failed to set 'expire' for %s", user) + + def create_group(self, name, members=None): + # If 'groupadd' is available then use the generic + # create_group function from __init__.py instead. + if subp.which("groupadd"): + return super().create_group(name, members) + + group_add_cmd = ["addgroup", name] + if not members: + members = [] + + # Check if group exists, and then add if it doesn't + if util.is_group(name): + LOG.warning("Skipping creation of existing group '%s'", name) + else: + try: + subp.subp(group_add_cmd) + LOG.info("Created new group %s", name) + except subp.ProcessExecutionError: + util.logexc(LOG, "Failed to create group %s", name) + + # Add members to the group, if so defined + if len(members) > 0: + for member in members: + if not util.is_user(member): + LOG.warning( + "Unable to add group member '%s' to group '%s'" + "; user does not exist.", + member, + name, + ) + continue + + subp.subp(["addgroup", member, name]) + LOG.info("Added user '%s' to group '%s'", member, name) + def shutdown_command(self, mode="poweroff", delay="now", message=None): # called from cc_power_state_change.load_power_state # Alpine has halt/poweroff/reboot, with the following specifics: @@ -260,3 +598,46 @@ def manage_service( } cmd = list(cmds[action]) return subp.subp(cmd, capture=True, rcs=rcs) + + @staticmethod + def get_mapped_device(blockdev: str) -> Optional[str]: + """Returns underlying block device for a mapped device. + + If it is mapped, blockdev will usually take the form of + /dev/mapper/some_name + + If blockdev is a symlink pointing to a /dev/dm-* device, return + the device pointed to. Otherwise, return None. + """ + realpath = os.path.realpath(blockdev) + + if blockdev.startswith("/dev/mapper"): + # For Alpine systems a /dev/mapper/ entry is *not* a + # symlink to the related /dev/dm-X block device, + # rather it is a block device itself. + + # Get the major/minor of the /dev/mapper block device + major = os.major(os.stat(blockdev).st_rdev) + minor = os.minor(os.stat(blockdev).st_rdev) + + # Find the /dev/dm-X device with the same major/minor + with os.scandir("/dev/") as it: + for deventry in it: + if deventry.name.startswith("dm-"): + res = os.lstat(deventry.path) + if stat.S_ISBLK(res.st_mode): + if ( + os.major(os.stat(deventry.path).st_rdev) + == major + and os.minor(os.stat(deventry.path).st_rdev) + == minor + ): + realpath = os.path.realpath(deventry.path) + break + + if realpath.startswith("/dev/dm-"): + LOG.debug( + "%s is a mapped device pointing to %s", blockdev, realpath + ) + return realpath + return None diff --git a/cloudinit/distros/amazon.py b/cloudinit/distros/amazon.py index 52f47097d..bdb2c08dc 100644 --- a/cloudinit/distros/amazon.py +++ b/cloudinit/distros/amazon.py @@ -20,5 +20,5 @@ class Distro(rhel.Distro): dhclient_lease_directory = "/var/lib/dhcp" dhclient_lease_file_regex = r"dhclient-[\w-]+\.lease" - def update_package_sources(self): + def update_package_sources(self, *, force=False): return None diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 521c014d7..f09d34ca5 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -10,7 +10,7 @@ from cloudinit.distros import PackageList from cloudinit.distros.parsers.hostname import HostnameConf from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -141,7 +141,10 @@ def package_command(self, command, args=None, pkgs=None): # Allow the output of this to flow outwards (ie not be captured) subp.subp(cmd, capture=False) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( - "update-sources", self.package_command, ["-y"], freq=PER_INSTANCE + "update-sources", + self.package_command, + ["-y"], + freq=PER_ALWAYS if force else PER_INSTANCE, ) diff --git a/cloudinit/distros/azurelinux.py b/cloudinit/distros/azurelinux.py new file mode 100644 index 000000000..5098a4594 --- /dev/null +++ b/cloudinit/distros/azurelinux.py @@ -0,0 +1,72 @@ +# Copyright (C) 2024 Microsoft Corporation +# +# Author: Dan Streetman +# +# This file is part of cloud-init. See LICENSE file for license information. + +import logging + +from cloudinit import subp, util +from cloudinit.distros import rhel +from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE + +LOG = logging.getLogger(__name__) + +NETWORK_FILE_HEADER = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +""" + + +class Distro(rhel.Distro): + def __init__(self, name, cfg, paths): + super().__init__(name, cfg, paths) + self.osfamily = "azurelinux" + + self.network_conf_dir = "/etc/systemd/network/" + self.systemd_locale_conf_fn = "/etc/locale.conf" + self.resolve_conf_fn = "/etc/systemd/resolved.conf" + self.init_cmd = ["systemctl"] + + self.network_conf_fn = {"netplan": CLOUDINIT_NETPLAN_FILE} + self.renderer_configs = { + "networkd": { + "resolv_conf_fn": self.resolve_conf_fn, + "network_conf_dir": self.network_conf_dir, + }, + "netplan": { + "netplan_path": self.network_conf_fn["netplan"], + "netplan_header": NETWORK_FILE_HEADER, + "postcmds": "True", + }, + } + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + if subp.which("dnf"): + LOG.debug("Using DNF for package management") + cmd = ["dnf"] + else: + LOG.debug("Using TDNF for package management") + cmd = ["tdnf"] + # Determines whether or not dnf/tdnf prompts for confirmation + # of critical actions. We don't want to prompt... + cmd.append("-y") + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + cmd.append(command) + + pkglist = util.expand_package_list("%s-%s", pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + subp.subp(cmd, capture=False) diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py index 995a1ba2a..25b374ba3 100644 --- a/cloudinit/distros/bsd.py +++ b/cloudinit/distros/bsd.py @@ -1,5 +1,6 @@ import logging import platform +import re from typing import List, Optional import cloudinit.net.netops.bsd_netops as bsd_netops @@ -27,7 +28,7 @@ class BSD(distros.Distro): # There is no update/upgrade on OpenBSD pkg_cmd_update_prefix: Optional[List[str]] = None pkg_cmd_upgrade_prefix: Optional[List[str]] = None - net_ops = bsd_netops.BsdNetOps # type: ignore + net_ops = bsd_netops.BsdNetOps def __init__(self, name, cfg, paths): super().__init__(name, cfg, paths) @@ -120,6 +121,8 @@ def package_command(self, command, args=None, pkgs=None): if not self.pkg_cmd_upgrade_prefix: return cmd = self.pkg_cmd_upgrade_prefix + else: + cmd = [] if args and isinstance(args, str): cmd.append(args) @@ -149,3 +152,24 @@ def get_proc_ppid(pid): """ ppid, _ = subp.subp(["ps", "-oppid=", "-p", str(pid)]) return int(ppid.strip()) + + @staticmethod + def get_mapped_device(blockdev: str) -> Optional[str]: + return None + + @staticmethod + def device_part_info(devpath: str) -> tuple: + # FreeBSD doesn't know of sysfs so just get everything we need from + # the device, like /dev/vtbd0p2. + part = util.find_freebsd_part(devpath) + if part: + fpart = f"/dev/{part}" + # Handle both GPT partitions and MBR slices with partitions + m = re.search( + r"^(?P/dev/.+)[sp](?P\d+[a-z]*)$", fpart + ) + if m: + return m["dev"], m["part_slice"] + + # the input is bogus and we need to bail + raise ValueError(f"Invalid value for devpath: '{devpath}'") diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 103b8ed58..cef6e1fb8 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -27,7 +27,6 @@ # network: {config: disabled} """ -NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init" LOCALE_CONF_FN = "/etc/default/locale" diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 53b4ac4eb..ba35b2e61 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -12,7 +12,7 @@ import cloudinit.distros.bsd from cloudinit import subp, util from cloudinit.distros.networking import FreeBSDNetworking -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -121,14 +121,10 @@ def add_user(self, name, **kwargs): pw_useradd_cmd.append("-d/nonexistent") log_pw_useradd_cmd.append("-d/nonexistent") else: - pw_useradd_cmd.append( - "-d{home_dir}/{name}".format(home_dir=self.home_dir, name=name) - ) + homedir = kwargs.get("homedir", f"{self.home_dir}/{name}") + pw_useradd_cmd.append("-d" + homedir) pw_useradd_cmd.append("-m") - log_pw_useradd_cmd.append( - "-d{home_dir}/{name}".format(home_dir=self.home_dir, name=name) - ) - + log_pw_useradd_cmd.append("-d" + homedir) log_pw_useradd_cmd.append("-m") # Run the command @@ -207,12 +203,12 @@ def _get_pkg_cmd_environ(self): operations""" return {"ASSUME_ALWAYS_YES": "YES"} - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( "update-sources", self.package_command, ["update"], - freq=PER_INSTANCE, + freq=PER_ALWAYS if force else PER_INSTANCE, ) @staticmethod diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index 37ac7b68c..5ab41bbd9 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -11,7 +11,7 @@ from cloudinit import distros, helpers, subp, util from cloudinit.distros import PackageList from cloudinit.distros.parsers.hostname import HostnameConf -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -132,10 +132,10 @@ def package_command(self, command, args=None, pkgs=None): # Allow the output of this to flow outwards (ie not be captured) subp.subp(cmd, capture=False) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( "update-sources", self.package_command, ["--sync"], - freq=PER_INSTANCE, + freq=PER_ALWAYS if force else PER_INSTANCE, ) diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py index e8b9bcd5b..da8c19040 100644 --- a/cloudinit/distros/netbsd.py +++ b/cloudinit/distros/netbsd.py @@ -12,7 +12,7 @@ from cloudinit import subp, util try: - import crypt + import crypt # pylint: disable=W4901 salt = crypt.METHOD_BLOWFISH # pylint: disable=E1101 blowfish_hash: Any = functools.partial( @@ -153,7 +153,7 @@ def _get_pkg_cmd_environ(self): ) } - def update_package_sources(self): + def update_package_sources(self, *, force=False): pass diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py index e63d2177d..af9584bdf 100644 --- a/cloudinit/distros/networking.py +++ b/cloudinit/distros/networking.py @@ -4,6 +4,7 @@ from cloudinit import net, subp, util from cloudinit.distros.parsers import ifconfig +from cloudinit.net.netops.iproute2 import Iproute2 LOG = logging.getLogger(__name__) @@ -299,5 +300,5 @@ def try_set_link_up(self, devname: DeviceName) -> bool: """Try setting the link to up explicitly and return if it is up. Not guaranteed to bring the interface up. The caller is expected to add wait times before retrying.""" - subp.subp(["ip", "link", "set", devname, "up"]) + Iproute2.link_up(devname) return self.is_up(devname) diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 8cf41b5d6..91620a98e 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -15,7 +15,7 @@ from cloudinit.distros import PackageList from cloudinit.distros import rhel_util as rhutil from cloudinit.distros.parsers.hostname import HostnameConf -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -26,8 +26,6 @@ class Distro(distros.Distro): init_cmd = ["service"] locale_conf_fn = "/etc/sysconfig/language" network_conf_fn = "/etc/sysconfig/network/config" - network_script_tpl = "/etc/sysconfig/network/ifcfg-%s" - route_conf_tpl = "/etc/sysconfig/network/ifroute-%s" systemd_hostname_conf_fn = "/etc/hostname" systemd_locale_conf_fn = "/etc/locale.conf" tz_local_fn = "/etc/localtime" @@ -150,12 +148,12 @@ def set_timezone(self, tz): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( "update-sources", self.package_command, ["refresh"], - freq=PER_INSTANCE, + freq=PER_ALWAYS if force else PER_INSTANCE, ) def _read_hostname(self, filename, default=None): diff --git a/cloudinit/distros/package_management/apt.py b/cloudinit/distros/package_management/apt.py index 16f3b3dca..e8b8a4590 100644 --- a/cloudinit/distros/package_management/apt.py +++ b/cloudinit/distros/package_management/apt.py @@ -12,7 +12,7 @@ PackageManager, UninstalledPackages, ) -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -108,12 +108,12 @@ def from_config(cls, runner: helpers.Runners, cfg: Mapping) -> "Apt": def available(self) -> bool: return bool(subp.which(self.apt_get_command[0])) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self.runner.run( "update-sources", self.run_package_command, ["update"], - freq=PER_INSTANCE, + freq=PER_ALWAYS if force else PER_INSTANCE, ) @functools.lru_cache(maxsize=1) @@ -182,14 +182,12 @@ def run_package_command(self, command, args=None, pkgs=None): }, ) - def _apt_lock_available(self, lock_files=None): + def _apt_lock_available(self): """Determines if another process holds any apt locks. If all locks are clear, return True else False. """ - if lock_files is None: - lock_files = APT_LOCK_FILES - for lock in lock_files: + for lock in APT_LOCK_FILES: if not os.path.exists(lock): # Only wait for lock files that already exist continue @@ -208,9 +206,9 @@ def _wait_for_apt_command( short_cmd: Name of command like "upgrade" or "install" subp_kwargs: kwargs to pass to subp """ - start_time = time.time() + start_time = time.monotonic() LOG.debug("Waiting for APT lock") - while time.time() - start_time < timeout: + while time.monotonic() - start_time < timeout: if not self._apt_lock_available(): time.sleep(1) continue diff --git a/cloudinit/distros/package_management/package_manager.py b/cloudinit/distros/package_management/package_manager.py index d92b11d5a..32c4cac24 100644 --- a/cloudinit/distros/package_management/package_manager.py +++ b/cloudinit/distros/package_management/package_manager.py @@ -22,7 +22,7 @@ def available(self) -> bool: """Return if package manager is installed on system.""" @abstractmethod - def update_package_sources(self): + def update_package_sources(self, *, force=False): ... @abstractmethod diff --git a/cloudinit/distros/package_management/snap.py b/cloudinit/distros/package_management/snap.py index a5fc2a89d..baab9e3ca 100644 --- a/cloudinit/distros/package_management/snap.py +++ b/cloudinit/distros/package_management/snap.py @@ -17,7 +17,7 @@ class Snap(PackageManager): def available(self) -> bool: return bool(subp.which("snap")) - def update_package_sources(self): + def update_package_sources(self, *, force=False): pass def install_packages(self, pkglist: Iterable) -> UninstalledPackages: diff --git a/cloudinit/distros/parsers/ifconfig.py b/cloudinit/distros/parsers/ifconfig.py index d671df1f5..1fc72d9e6 100644 --- a/cloudinit/distros/parsers/ifconfig.py +++ b/cloudinit/distros/parsers/ifconfig.py @@ -14,8 +14,6 @@ LOG = logging.getLogger(__name__) -MAC_RE = r"""([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}""" - class Ifstate: """ @@ -210,14 +208,12 @@ def _parse_inet(self, toks: list) -> Tuple[str, dict]: if "/" in toks[1]: ip = IPv4Interface(toks[1]) netmask = str(ip.netmask) - if "broadcast" in toks: - broadcast = toks[toks.index("broadcast") + 1] else: netmask = str(IPv4Address(int(toks[3], 0))) - if "broadcast" in toks: - broadcast = toks[toks.index("broadcast") + 1] ip = IPv4Interface("%s/%s" % (toks[1], netmask)) + if "broadcast" in toks: + broadcast = toks[toks.index("broadcast") + 1] prefixlen = ip.with_prefixlen.split("/")[1] return ( str(ip.ip), diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py index 2678be0a2..a9b1fc05d 100644 --- a/cloudinit/distros/photon.py +++ b/cloudinit/distros/photon.py @@ -7,7 +7,7 @@ from cloudinit import distros, helpers, net, subp, util from cloudinit.distros import PackageList from cloudinit.distros import rhel_util as rhutil -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -156,10 +156,10 @@ def package_command(self, command, args=None, pkgs=None): if ret: LOG.error("Error while installing packages: %s", err) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( "update-sources", self.package_command, ["makecache"], - freq=PER_INSTANCE, + freq=PER_ALWAYS if force else PER_INSTANCE, ) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 3254cbe01..0e2141507 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -13,7 +13,7 @@ from cloudinit import distros, helpers, subp, util from cloudinit.distros import PackageList, rhel_util from cloudinit.distros.parsers.hostname import HostnameConf -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -26,7 +26,6 @@ class Distro(distros.Distro): network_conf_fn = "/etc/sysconfig/network" hostname_conf_fn = "/etc/sysconfig/network" systemd_hostname_conf_fn = "/etc/hostname" - network_script_tpl = "/etc/sysconfig/network-scripts/ifcfg-%s" tz_local_fn = "/etc/localtime" usr_lib_exec = "/usr/libexec" # RHEL and derivatives use NetworkManager DHCP client by default. @@ -209,10 +208,10 @@ def package_command(self, command, args=None, pkgs=None): # Allow the output of this to flow outwards (ie not be captured) subp.subp(cmd, capture=False) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( "update-sources", self.package_command, ["makecache"], - freq=PER_INSTANCE, + freq=PER_ALWAYS if force else PER_INSTANCE, ) diff --git a/cloudinit/features.py b/cloudinit/features.py index d16eb0a6e..63a4ca516 100644 --- a/cloudinit/features.py +++ b/cloudinit/features.py @@ -87,6 +87,35 @@ to write /etc/apt/sources.list directly. """ +DEPRECATION_INFO_BOUNDARY = "20.1" +""" +DEPRECATION_INFO_BOUNDARY is used by distros to configure at which upstream +version to start logging deprecations at a level higher than INFO. + +The default value "devel" tells cloud-init to log all deprecations higher +than INFO. This value may be overriden by downstreams in order to maintain +stable behavior across releases. + +Jsonschema key deprecations and inline logger deprecations include a +deprecated_version key. When the variable below is set to a version, +cloud-init will use that version as a demarcation point. Deprecations which +are added after this version will be logged as at an INFO level. Deprecations +which predate this version will be logged at the higher DEPRECATED level. +Downstreams that want stable log behavior may set the variable below to the +first version released in their stable distro. By doing this, they can expect +that newly added deprecations will be logged at INFO level. The implication of +the different log levels is that logs at DEPRECATED level result in a return +code of 2 from `cloud-init status`. + +format: + + :: = | + ::= "devel" + ::= "." ["." ] + +where , , and are positive integers +""" + def get_features() -> Dict[str, bool]: """Return a dict of applicable features/overrides and their values.""" diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py index fca316821..8ade82185 100644 --- a/cloudinit/gpg.py +++ b/cloudinit/gpg.py @@ -8,132 +8,229 @@ """gpg.py - Collection of gpg key related functions""" import logging +import os +import re +import signal import time +from tempfile import TemporaryDirectory +from typing import Dict, Optional from cloudinit import subp LOG = logging.getLogger(__name__) -GPG_LIST = [ - "gpg", - "--with-fingerprint", - "--no-default-keyring", - "--list-keys", - "--keyring", -] - - -def export_armour(key): - """Export gpg key, armoured key gets returned""" - try: - (armour, _) = subp.subp( - ["gpg", "--export", "--armour", key], capture=True - ) - except subp.ProcessExecutionError as error: - # debug, since it happens for any key not on the system initially - LOG.debug('Failed to export armoured key "%s": %s', key, error) - armour = None - return armour - - -def dearmor(key): - """Dearmor gpg key, dearmored key gets returned - - note: man gpg(1) makes no mention of an --armour spelling, only --armor - """ - return subp.subp(["gpg", "--dearmor"], data=key, decode=False).stdout - - -def list(key_file, human_output=False): - """List keys from a keyring with fingerprints. Default to a stable machine - parseable format. - - @param key_file: a string containing a filepath to a key - @param human_output: return output intended for human parsing - """ - cmd = [] - cmd.extend(GPG_LIST) - if not human_output: - cmd.append("--with-colons") - - cmd.append(key_file) - (stdout, stderr) = subp.subp(cmd, capture=True) - if stderr: - LOG.warning('Failed to export armoured key "%s": %s', key_file, stderr) - return stdout - - -def recv_key(key, keyserver, retries=(1, 1)): - """Receive gpg key from the specified keyserver. - - Retries are done by default because keyservers can be unreliable. - Additionally, there is no way to determine the difference between - a non-existent key and a failure. In both cases gpg (at least 2.2.4) - exits with status 2 and stderr: "keyserver receive failed: No data" - It is assumed that a key provided to cloud-init exists on the keyserver - so re-trying makes better sense than failing. - - @param key: a string key fingerprint (as passed to gpg --recv-keys). - @param keyserver: the keyserver to request keys from. - @param retries: an iterable of sleep lengths for retries. - Use None to indicate no retries.""" - LOG.debug("Importing key '%s' from keyserver '%s'", key, keyserver) - cmd = ["gpg", "--no-tty", "--keyserver=%s" % keyserver, "--recv-keys", key] - if retries is None: - retries = [] - trynum = 0 - error = None - sleeps = iter(retries) - while True: - trynum += 1 +HOME = "GNUPGHOME" + + +class GPG: + def __init__(self): + self.gpg_started = False + self._env = {} + self.temp_dir = TemporaryDirectory() + + def __enter__(self): + return self + + @property + def env(self) -> Dict[str, str]: + """when this env property gets invoked, set up our temporary + directory, and also set gpg_started to tell the cleanup() + method whether or not + + why put this here and not in __init__? pytest seems unhappy + and it's not obvious how to work around it + """ + if self._env: + return self._env + self.gpg_started = True + self._env = {HOME: self.temp_dir.name} + return self._env + + def __exit__(self, exc_typ, exc_value, traceback): + self.cleanup() + + def cleanup(self) -> None: + """cleanup the gpg temporary directory and kill gpg""" + self.kill_gpg() + if self.temp_dir and os.path.isdir(self.temp_dir.name): + self.temp_dir.cleanup() + + def export_armour(self, key: str) -> Optional[str]: + """Export gpg key, armoured key gets returned""" try: - subp.subp(cmd, capture=True) - LOG.debug( - "Imported key '%s' from keyserver '%s' on try %d", - key, - keyserver, - trynum, + return subp.subp( + ["gpg", "--export", "--armour", key], + capture=True, + update_env=self.env, + ).stdout + except subp.ProcessExecutionError as error: + # debug, since it happens for any key not on the system initially + LOG.debug('Failed to export armoured key "%s": %s', key, error) + return None + + def dearmor(self, key: str) -> str: + """Dearmor gpg key, dearmored key gets returned + + note: man gpg(1) makes no mention of an --armour spelling, only --armor + """ + return subp.subp( + ["gpg", "--dearmor"], data=key, decode=False, update_env=self.env + ).stdout + + def list_keys(self, key_file: str, human_output=False) -> str: + """List keys from a keyring with fingerprints. Default to a + stable machine parseable format. + + @param key_file: a string containing a filepath to a key + @param human_output: return output intended for human parsing + """ + cmd = [ + "gpg", + "--no-options", + "--with-fingerprint", + "--no-default-keyring", + "--list-keys", + "--keyring", + ] + if not human_output: + cmd.append("--with-colons") + + cmd.append(key_file) + stdout, stderr = subp.subp(cmd, update_env=self.env, capture=True) + if stderr: + LOG.warning( + 'Failed to export armoured key "%s": %s', key_file, stderr ) - return - except subp.ProcessExecutionError as e: - error = e + return stdout + + def recv_key(self, key: str, keyserver: str, retries=(1, 1)) -> None: + """Receive gpg key from the specified keyserver. + + Retries are done by default because keyservers can be unreliable. + Additionally, there is no way to determine the difference between + a non-existent key and a failure. In both cases gpg (at least 2.2.4) + exits with status 2 and stderr: "keyserver receive failed: No data" + It is assumed that a key provided to cloud-init exists on the keyserver + so re-trying makes better sense than failing. + + @param key: a string key fingerprint (as passed to gpg --recv-keys). + @param keyserver: the keyserver to request keys from. + @param retries: an iterable of sleep lengths for retries. + Use None to indicate no retries.""" + LOG.debug("Importing key '%s' from keyserver '%s'", key, keyserver) + trynum = 0 + error = None + sleeps = iter(retries or []) + while True: + trynum += 1 + try: + subp.subp( + [ + "gpg", + "--no-tty", + "--keyserver=%s" % keyserver, + "--recv-keys", + key, + ], + capture=True, + update_env=self.env, + ) + LOG.debug( + "Imported key '%s' from keyserver '%s' on try %d", + key, + keyserver, + trynum, + ) + return + except subp.ProcessExecutionError as e: + error = e + try: + naplen = next(sleeps) + LOG.debug( + "Import failed with exit code %d, will try again in %ss", + error.exit_code, + naplen, + ) + time.sleep(naplen) + except StopIteration as e: + raise ValueError( + "Failed to import key '%s' from keyserver '%s' " + "after %d tries: %s" % (key, keyserver, trynum, error) + ) from e + + def delete_key(self, key: str) -> None: + """Delete the specified key from the local gpg ring""" try: - naplen = next(sleeps) - LOG.debug( - "Import failed with exit code %d, will try again in %ss", - error.exit_code, - naplen, + subp.subp( + ["gpg", "--batch", "--yes", "--delete-keys", key], + capture=True, + update_env=self.env, ) - time.sleep(naplen) - except StopIteration as e: - raise ValueError( - "Failed to import key '%s' from keyserver '%s' " - "after %d tries: %s" % (key, keyserver, trynum, error) - ) from e - - -def delete_key(key): - """Delete the specified key from the local gpg ring""" - try: - subp.subp( - ["gpg", "--batch", "--yes", "--delete-keys", key], capture=True - ) - except subp.ProcessExecutionError as error: - LOG.warning('Failed delete key "%s": %s', key, error) - - -def getkeybyid(keyid, keyserver="keyserver.ubuntu.com"): - """get gpg keyid from keyserver""" - armour = export_armour(keyid) - if not armour: + except subp.ProcessExecutionError as error: + LOG.warning('Failed delete key "%s": %s', key, error) + + def getkeybyid( + self, keyid: str, keyserver: str = "keyserver.ubuntu.com" + ) -> Optional[str]: + """get gpg keyid from keyserver""" + armour = self.export_armour(keyid) + if not armour: + try: + self.recv_key(keyid, keyserver=keyserver) + armour = self.export_armour(keyid) + except ValueError: + LOG.exception("Failed to obtain gpg key %s", keyid) + raise + finally: + # delete just imported key to leave environment as it + # was before + self.delete_key(keyid) + return armour + + def kill_gpg(self) -> None: + """killing with gpgconf is best practice, but when it isn't available + failover is possible + + GH: 4344 - stop gpg-agent/dirmgr daemons spawned by gpg + key imports. Daemons spawned by cloud-config.service on systemd + v253 report (running) + """ try: - recv_key(keyid, keyserver=keyserver) - armour = export_armour(keyid) - except ValueError: - LOG.exception("Failed to obtain gpg key %s", keyid) - raise - finally: - # delete just imported key to leave environment as it was before - delete_key(keyid) - - return armour + if not self.gpg_started: + return + if subp.which("gpgconf"): + gpg_process_out = subp.subp( + ["gpgconf", "--kill", "all"], + capture=True, + update_env=self.env, + ).stdout + else: + gpg_process_out = subp.subp( + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], + capture=True, + rcs=[0, 1], + ).stdout + gpg_pids = re.findall( + r"(?P\d+)\s+(?P\d+)", gpg_process_out + ) + root_gpg_pids = [ + int(pid[1]) for pid in gpg_pids if pid[0] == "1" + ] + if root_gpg_pids: + LOG.debug( + "Killing gpg-agent and dirmngr pids: %s", root_gpg_pids + ) + for gpg_pid in root_gpg_pids: + os.kill(gpg_pid, signal.SIGKILL) + except subp.ProcessExecutionError as e: + LOG.warning("Failed to clean up gpg process: %s", e) diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py index b8de78b70..388588d80 100644 --- a/cloudinit/handlers/jinja_template.py +++ b/cloudinit/handlers/jinja_template.py @@ -93,7 +93,7 @@ def handle_part(self, data, ctype, filename, payload, frequency, headers): def render_jinja_payload_from_file( payload, payload_fn, instance_data_file, debug=False ): - """Render a jinja template payload sourcing variables from jinja_vars_path. + r"""Render a jinja template sourcing variables from jinja_vars_path. @param payload: String of jinja template content. Should begin with ## template: jinja\n. diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 2c25dfc2c..d12944258 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -15,7 +15,7 @@ from io import StringIO from time import time -from cloudinit import persistence, type_utils, util +from cloudinit import persistence, settings, type_utils, util from cloudinit.settings import CFG_ENV_NAME, PER_ALWAYS, PER_INSTANCE, PER_ONCE LOG = logging.getLogger(__name__) @@ -307,7 +307,10 @@ def __init__(self, path_cfgs: dict, ds=None): self.cfgs = path_cfgs # Populate all the initial paths self.cloud_dir: str = path_cfgs.get("cloud_dir", "/var/lib/cloud") - self.run_dir: str = path_cfgs.get("run_dir", "/run/cloud-init") + self.docs_dir: str = path_cfgs.get( + "docs_dir", "/usr/share/doc/cloud-init/" + ) + self.run_dir: str = path_cfgs.get("run_dir", settings.DEFAULT_RUN_DIR) self.instance_link: str = os.path.join(self.cloud_dir, "instance") self.boot_finished: str = os.path.join( self.instance_link, "boot-finished" diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 2141dc5fe..3a3838035 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -15,6 +15,7 @@ from urllib.parse import urlparse from cloudinit import subp, util +from cloudinit.net.netops.iproute2 import Iproute2 from cloudinit.url_helper import UrlError, readurl LOG = logging.getLogger(__name__) @@ -242,7 +243,7 @@ def get_dev_features(devname): def has_netfail_standby_feature(devname): - """ Return True if VIRTIO_NET_F_STANDBY bit (62) is set. + """Return True if VIRTIO_NET_F_STANDBY bit (62) is set. https://github.com/torvalds/linux/blob/ \ 089cf7f6ecb266b6a4164919a2e69bd2f938374a/ \ @@ -641,7 +642,7 @@ def interface_has_own_mac(ifname, strict=False): are bonds or vlans that inherit their mac from another device. Possible values are: 0: permanent address 2: stolen from another device - 1: randomly generated 3: set using dev_set_mac_address""" + 1: randomly generated 3: set using dev_set_mac_address""" assign_type = read_sys_net_int(ifname, "addr_assign_type") if assign_type is None: @@ -721,16 +722,7 @@ def _rename_interfaces( LOG.debug("Detected interfaces %s", cur_info) def update_byname(bymac): - return dict((data["name"], data) for data in cur_info.values()) - - def rename(cur, new): - subp.subp(["ip", "link", "set", cur, "name", new], capture=True) - - def down(name): - subp.subp(["ip", "link", "set", name, "down"], capture=True) - - def up(name): - subp.subp(["ip", "link", "set", name, "up"], capture=True) + return dict((data["name"], data) for data in bymac.values()) ops = [] errors = [] @@ -835,15 +827,23 @@ def find_entry(mac, driver, device_id): cur_byname = update_byname(cur_info) ops += cur_ops - opmap = {"rename": rename, "down": down, "up": up} + opmap = { + "rename": Iproute2.link_rename, + "down": Iproute2.link_down, + "up": Iproute2.link_up, + } if len(ops) + len(ups) == 0: if len(errors): - LOG.debug("unable to do any work for renaming of %s", renames) + LOG.warning( + "Unable to rename interfaces: %s due to errors: %s", + renames, + errors, + ) else: LOG.debug("no work necessary for renaming of %s", renames) else: - LOG.debug("achieving renaming of %s with ops %s", renames, ops + ups) + LOG.debug("Renamed %s with ops %s", renames, ops + ups) for op, mac, new_name, params in ops + ups: try: @@ -1283,6 +1283,48 @@ def is_ipv6_network(address: str) -> bool: ) +def is_ip_in_subnet(address: str, subnet: str) -> bool: + """Returns a bool indicating if ``s`` is in subnet. + + :param address: + The string of IP address. + + :param subnet: + The string of subnet. + + :return: + A bool indicating if the string is in subnet. + """ + ip_address = ipaddress.ip_address(address) + subnet_network = ipaddress.ip_network(subnet, strict=False) + return ip_address in subnet_network + + +def should_add_gateway_onlink_flag(gateway: str, subnet: str) -> bool: + """Returns a bool indicating if should add gateway onlink flag. + + :param gateway: + The string of gateway address. + + :param subnet: + The string of subnet. + + :return: + A bool indicating if the string is in subnet. + """ + try: + return not is_ip_in_subnet(gateway, subnet) + except ValueError as e: + LOG.warning( + "Failed to check whether gateway %s" + " is contained within subnet %s: %s", + gateway, + subnet, + e, + ) + return False + + def subnet_is_ipv6(subnet) -> bool: """Common helper for checking network_state subnets for ipv6.""" # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py index a84d965f6..b9f27cff9 100644 --- a/cloudinit/net/activators.py +++ b/cloudinit/net/activators.py @@ -1,10 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging from abc import ABC, abstractmethod -from typing import Dict, Iterable, List, Optional, Type, Union +from functools import partial +from typing import Callable, Dict, Iterable, List, Optional, Type, Union from cloudinit import subp, util from cloudinit.net.eni import available as eni_available +from cloudinit.net.netops.iproute2 import Iproute2 from cloudinit.net.netplan import available as netplan_available from cloudinit.net.network_manager import available as nm_available from cloudinit.net.network_state import NetworkState @@ -17,15 +19,29 @@ class NoActivatorException(Exception): pass -def _alter_interface(cmd, device_name) -> bool: - LOG.debug("Attempting command %s for device %s", cmd, device_name) +def _alter_interface( + cmd: list, device_name: str, warn_on_stderr: bool = True +) -> bool: + """Attempt to alter an interface using a command list""" + return _alter_interface_callable(partial(subp.subp, cmd), warn_on_stderr) + + +def _alter_interface_callable( + callable: Callable, warn_on_stderr: bool = True +) -> bool: + """Attempt to alter an interface using a callable + + this function standardizes logging and response to failure for + various activators + """ try: - (_out, err) = subp.subp(cmd) + _out, err = callable() if len(err): - LOG.warning("Running %s resulted in stderr output: %s", cmd, err) + log_stderr = LOG.warning if warn_on_stderr else LOG.debug + log_stderr("Received stderr output: %s", err) return True - except subp.ProcessExecutionError: - util.logexc(LOG, "Running interface command %s failed", cmd) + except subp.ProcessExecutionError as e: + util.logexc(LOG, "Running interface command %s failed", e.cmd) return False @@ -108,7 +124,7 @@ def available(target=None) -> bool: """Return true if ifconfig can be used on this system.""" expected = "ifconfig" search = ["/sbin"] - return subp.which(expected, search=search, target=target) + return bool(subp.which(expected, search=search, target=target)) @staticmethod def bring_up_interface(device_name: str) -> bool: @@ -187,7 +203,9 @@ def bring_up_interface(device_name: str) -> bool: "Calling 'netplan apply' rather than " "altering individual interfaces" ) - return _alter_interface(NetplanActivator.NETPLAN_CMD, "all") + return _alter_interface( + NetplanActivator.NETPLAN_CMD, "all", warn_on_stderr=False + ) @staticmethod def bring_up_interfaces(device_names: Iterable[str]) -> bool: @@ -199,7 +217,9 @@ def bring_up_interfaces(device_names: Iterable[str]) -> bool: "Calling 'netplan apply' rather than " "altering individual interfaces" ) - return _alter_interface(NetplanActivator.NETPLAN_CMD, "all") + return _alter_interface( + NetplanActivator.NETPLAN_CMD, "all", warn_on_stderr=False + ) @staticmethod def bring_up_all_interfaces(network_state: NetworkState) -> bool: @@ -207,7 +227,9 @@ def bring_up_all_interfaces(network_state: NetworkState) -> bool: Return True is successful, otherwise return False """ - return _alter_interface(NetplanActivator.NETPLAN_CMD, "all") + return _alter_interface( + NetplanActivator.NETPLAN_CMD, "all", warn_on_stderr=False + ) @staticmethod def bring_down_interface(device_name: str) -> bool: @@ -219,7 +241,9 @@ def bring_down_interface(device_name: str) -> bool: "Calling 'netplan apply' rather than " "altering individual interfaces" ) - return _alter_interface(NetplanActivator.NETPLAN_CMD, "all") + return _alter_interface( + NetplanActivator.NETPLAN_CMD, "all", warn_on_stderr=False + ) class NetworkdActivator(NetworkActivator): @@ -231,8 +255,9 @@ def available(target=None) -> bool: @staticmethod def bring_up_interface(device_name: str) -> bool: """Return True is successful, otherwise return False""" - cmd = ["ip", "link", "set", "up", device_name] - return _alter_interface(cmd, device_name) + return _alter_interface_callable( + partial(Iproute2.link_up, device_name) + ) @staticmethod def bring_up_all_interfaces(network_state: NetworkState) -> bool: @@ -243,8 +268,9 @@ def bring_up_all_interfaces(network_state: NetworkState) -> bool: @staticmethod def bring_down_interface(device_name: str) -> bool: """Return True is successful, otherwise return False""" - cmd = ["ip", "link", "set", "down", device_name] - return _alter_interface(cmd, device_name) + return _alter_interface_callable( + partial(Iproute2.link_down, device_name) + ) # This section is mostly copied and pasted from renderers.py. An abstract diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py index 0b0ff1592..450a6668f 100644 --- a/cloudinit/net/bsd.py +++ b/cloudinit/net/bsd.py @@ -18,6 +18,7 @@ class BSDRenderer(renderer.Renderer): rc_conf_fn = "etc/rc.conf" interface_routes = "" route_names = "" + route6_names = "" def get_rc_config_value(self, key): fn = subp.target_path(self.target, self.rc_conf_fn) diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 903300fbb..c2c1d5af4 100644 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -101,9 +101,9 @@ def _klibc_to_config_entry(content, mac_addrs=None): provided here. There is no good documentation on this unfortunately. DEVICE= is expected/required and PROTO should indicate if - this is 'none' (static) or 'dhcp' or 'dhcp6' (LP: #1621507). - note that IPV6PROTO is also written by newer code to address the - possibility of both ipv4 and ipv6 getting addresses. + this is 'none' (static) or 'dhcp' or 'dhcp6' (LP: #1621507) or 'static' + or 'off' (LP: 2065787). Note that IPV6PROTO is also written to address + the possibility of both ipv4 and ipv6 getting addresses. Full syntax is documented at: https://git.kernel.org/pub/scm/libs/klibc/klibc.git/plain/usr/kinit/ipconfig/README.ipconfig @@ -127,6 +127,9 @@ def _klibc_to_config_entry(content, mac_addrs=None): else: proto = "none" + if proto in ("static", "off"): + proto = "none" + if proto not in ("none", "dhcp", "dhcp6"): raise ValueError("Unexpected value for PROTO: %s" % proto) @@ -261,7 +264,7 @@ def _b64dgz(data): blob = base64.b64decode(data) except (TypeError, ValueError): LOG.error( - "Expected base64 encoded kernel commandline parameter" + "Expected base64 encoded kernel command line parameter" " network-config. Ignoring network-config=%s.", data, ) diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 83b99803b..6b3aabc86 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -82,7 +82,9 @@ class NoDHCPLeaseMissingDhclientError(NoDHCPLeaseError): """Raised when unable to find dhclient.""" -def maybe_perform_dhcp_discovery(distro, nic=None, dhcp_log_func=None): +def maybe_perform_dhcp_discovery( + distro, nic=None, dhcp_log_func=None +) -> Dict[str, Any]: """Perform dhcp discovery if nic valid and dhclient command exists. If the nic is invalid or undiscoverable or dhclient command is not found, @@ -453,14 +455,27 @@ def parse_static_routes(routes: str) -> List[Tuple[str, str]]: ("0.0.0.0/0", "192.168.128.1") ] + # unknown-121 option format + sr3 = parse_static_routes(\ + "0:a:0:0:1:20:a8:3f:81:10:a:0:0:1:20:a9:fe:a9:fe:a:0:0:1") + sr3 = [ + ("0.0.0.0/0", "10.0.0.1"), + ("168.63.129.16/32", "10.0.0.1"), + ("169.254.169.254/32", "10.0.0.1"), + ] + Python version of isc-dhclient's hooks: /etc/dhcp/dhclient-exit-hooks.d/rfc3442-classless-routes """ # raw strings from dhcp lease may end in semi-colon rfc3442 = routes.rstrip(";") - tokens = [tok for tok in re.split(r"[, .]", rfc3442) if tok] + tokens = [tok for tok in re.split(r"[, . :]", rfc3442) if tok] static_routes: List[Tuple[str, str]] = [] + # Handle unknown-121 format by converting hex to base 10. + if ":" in rfc3442: + tokens = [str(int(tok, 16)) for tok in tokens] + def _trunc_error(cidr, required, remain): msg = ( "RFC3442 string malformed. Current route has CIDR of %s " @@ -596,6 +611,7 @@ def get_key_from_latest_lease(self, distro, key: str): class Dhcpcd(DhcpClient): client_name = "dhcpcd" + timeout = 300 def dhcp_discovery( self, @@ -782,14 +798,29 @@ def parse_dhcpcd_lease(lease_dump: str, interface: str) -> Dict: subnet_cidr='20' subnet_mask='255.255.240.0' """ + LOG.debug( + "Parsing dhcpcd lease for interface %s: %r", interface, lease_dump + ) # create a dict from dhcpcd dump output - remove single quotes - lease = dict( - [ - a.split("=") - for a in lease_dump.strip().replace("'", "").split("\n") - ] - ) + try: + lease = dict( + [ + a.split("=", maxsplit=1) + for a in lease_dump.strip().replace("'", "").split("\n") + if "=" in a + ] + ) + if not lease: + msg = ( + "No valid DHCP lease configuration " + "found in dhcpcd lease: %r" + ) + LOG.error(msg, lease_dump) + raise InvalidDHCPLeaseFileError(msg % lease_dump) + except ValueError as error: + LOG.error("Error parsing dhcpcd lease: %r", lease_dump) + raise InvalidDHCPLeaseFileError from error # this is expected by cloud-init's code lease["interface"] = interface diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py index 46b1c653b..c8730fb1e 100644 --- a/cloudinit/net/ephemeral.py +++ b/cloudinit/net/ephemeral.py @@ -8,11 +8,8 @@ from typing import Any, Callable, Dict, List, Optional import cloudinit.net as net -from cloudinit.net.dhcp import ( - Dhcpcd, - NoDHCPLeaseError, - maybe_perform_dhcp_discovery, -) +import cloudinit.netinfo as netinfo +from cloudinit.net.dhcp import NoDHCPLeaseError, maybe_perform_dhcp_discovery from cloudinit.subp import ProcessExecutionError LOG = logging.getLogger(__name__) @@ -36,8 +33,8 @@ def __init__( ip, prefix_or_mask, broadcast, + interface_addrs_before_dhcp: dict, router=None, - connectivity_url_data: Optional[Dict[str, Any]] = None, static_routes=None, ): """Setup context manager and validate call signature. @@ -48,8 +45,6 @@ def __init__( prefix. @param broadcast: Broadcast address for the IPv4 network. @param router: Optionally the default gateway IP. - @param connectivity_url_data: Optionally, a URL to verify if a usable - connection already exists. @param static_routes: Optionally a list of static routes from DHCP """ if not all([interface, ip, prefix_or_mask, broadcast]): @@ -66,7 +61,6 @@ def __init__( "netmask: {0}".format(e) ) from e - self.connectivity_url_data = connectivity_url_data self.interface = interface self.ip = ip self.broadcast = broadcast @@ -75,20 +69,26 @@ def __init__( # List of commands to run to cleanup state. self.cleanup_cmds: List[Callable] = [] self.distro = distro + self.cidr = f"{self.ip}/{self.prefix}" + self.interface_addrs_before_dhcp = interface_addrs_before_dhcp.get( + self.interface, {} + ) def __enter__(self): - """Perform ephemeral network setup if interface is not connected.""" - if self.connectivity_url_data: - if net.has_url_connectivity(self.connectivity_url_data): - LOG.debug( - "Skip ephemeral network setup, instance has connectivity" - " to %s", - self.connectivity_url_data["url"], - ) - return + """Set up ephemeral network if interface is not connected. + + This context manager handles the lifecycle of the network interface, + addresses, routes, etc + """ try: - self._bringup_device() + try: + self._bringup_device() + except ProcessExecutionError as e: + if "File exists" not in str( + e.stderr + ) and "Address already assigned" not in str(e.stderr): + raise # rfc3442 requires us to ignore the router config *if* # classless static routes are provided. @@ -116,27 +116,58 @@ def __exit__(self, excp_type, excp_value, excp_traceback): cmd() def _bringup_device(self): - """Perform the ip commands to fully setup the device.""" - cidr = "{0}/{1}".format(self.ip, self.prefix) + """Perform the ip commands to fully set up the device. + + Dhcp clients behave differently in how they leave link state and ip + address assignment. + + Attempt assigning address and setting up link if needed to be done. + Set cleanup_cmds to return the interface state to how it was prior + to execution of the dhcp client. + """ LOG.debug( "Attempting setup of ephemeral network on %s with %s brd %s", self.interface, - cidr, + self.cidr, self.broadcast, ) - try: - self.distro.net_ops.add_addr(self.interface, cidr, self.broadcast) - except ProcessExecutionError as e: - if "File exists" not in str(e.stderr): - raise + interface_addrs_after_dhcp = netinfo.netdev_info().get( + self.interface, {} + ) + has_link = interface_addrs_after_dhcp.get("up") + had_link = self.interface_addrs_before_dhcp.get("up") + has_ip = self.ip in [ + ip.get("ip") for ip in interface_addrs_after_dhcp.get("ipv4", {}) + ] + had_ip = self.ip in [ + ip.get("ip") + for ip in self.interface_addrs_before_dhcp.get("ipv4", {}) + ] + + if has_ip: LOG.debug( - "Skip ephemeral network setup, %s already has address %s", + "Skip adding ip address: %s already has address %s", self.interface, self.ip, ) else: - # Address creation success, bring up device and queue cleanup + self.distro.net_ops.add_addr( + self.interface, self.cidr, self.broadcast + ) + if has_link: + LOG.debug( + "Skip bringing up network link: interface %s is already up", + self.interface, + ) + else: self.distro.net_ops.link_up(self.interface, family="inet") + if had_link: + LOG.debug( + "Not queueing link down: link [%s] was up prior before " + "receiving a dhcp lease", + self.interface, + ) + else: self.cleanup_cmds.append( partial( self.distro.net_ops.link_down, @@ -144,8 +175,17 @@ def _bringup_device(self): family="inet", ) ) + if had_ip: + LOG.debug( + "Not queueing address removal: address %s was assigned before " + "receiving a dhcp lease", + self.ip, + ) + else: self.cleanup_cmds.append( - partial(self.distro.net_ops.del_addr, self.interface, cidr) + partial( + self.distro.net_ops.del_addr, self.interface, self.cidr + ) ) def _bringup_static_routes(self): @@ -245,11 +285,12 @@ def __init__( dhcp_log_func=None, ): self.iface = iface - self._ephipv4 = None - self.lease = None + self._ephipv4: Optional[EphemeralIPv4Network] = None + self.lease: Optional[Dict[str, Any]] = None self.dhcp_log_func = dhcp_log_func self.connectivity_url_data = connectivity_url_data self.distro = distro + self.interface_addrs_before_dhcp = netinfo.netdev_info() def __enter__(self): """Setup sandboxed dhcp context, unless connectivity_url can already be @@ -305,6 +346,7 @@ def obtain_lease(self): "rfc3442-classless-static-routes", "classless-static-routes", "static_routes", + "unknown-121", ], "router": "routers", } @@ -319,12 +361,11 @@ def obtain_lease(self): ] = self.distro.dhcp_client.parse_static_routes( kwargs["static_routes"] ) - if self.connectivity_url_data: - kwargs["connectivity_url_data"] = self.connectivity_url_data - if isinstance(self.distro.dhcp_client, Dhcpcd): - ephipv4 = DhcpcdEphemeralIPv4Network(self.distro, **kwargs) - else: - ephipv4 = EphemeralIPv4Network(self.distro, **kwargs) + ephipv4 = EphemeralIPv4Network( + self.distro, + interface_addrs_before_dhcp=self.interface_addrs_before_dhcp, + **kwargs, + ) ephipv4.__enter__() self._ephipv4 = ephipv4 return self.lease @@ -348,16 +389,6 @@ def get_first_option_value( result[internal_mapping] = self.lease.get(different_names) -class DhcpcdEphemeralIPv4Network(EphemeralIPv4Network): - """dhcpcd sets up its own ephemeral network and routes""" - - def __enter__(self): - return - - def __exit__(self, excp_type, excp_value, excp_traceback): - return - - class EphemeralIPNetwork: """Combined ephemeral context manager for IPv4 and IPv6 diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py index 75189018f..d47b28bda 100644 --- a/cloudinit/net/freebsd.py +++ b/cloudinit/net/freebsd.py @@ -76,10 +76,20 @@ def set_route(self, network, netmask, gateway): self.set_rc_config_value("ipv6_defaultrouter", gateway) else: route_name = f"net{self._route_cpt}" - route_cmd = f"-net {network} -netmask {netmask} {gateway}" - self.set_rc_config_value("route_" + route_name, route_cmd) - self.route_names = f"{self.route_names} {route_name}" - self.set_rc_config_value("static_routes", self.route_names.strip()) + if ":" in network: + route_cmd = f"-net {network}/{netmask} {gateway}" + self.set_rc_config_value("ipv6_route_" + route_name, route_cmd) + self.route6_names = f"{self.route6_names} {route_name}" + self.set_rc_config_value( + "ipv6_static_routes", self.route6_names.strip() + ) + else: + route_cmd = f"-net {network} -netmask {netmask} {gateway}" + self.set_rc_config_value("route_" + route_name, route_cmd) + self.route_names = f"{self.route_names} {route_name}" + self.set_rc_config_value( + "static_routes", self.route_names.strip() + ) self._route_cpt += 1 diff --git a/cloudinit/net/netops/__init__.py b/cloudinit/net/netops/__init__.py index 0a862918e..7b9591787 100644 --- a/cloudinit/net/netops/__init__.py +++ b/cloudinit/net/netops/__init__.py @@ -1,13 +1,19 @@ from typing import Optional +from cloudinit.subp import SubpResult + class NetOps: @staticmethod - def link_up(interface: str): + def link_up(interface: str) -> SubpResult: + pass + + @staticmethod + def link_down(interface: str) -> SubpResult: pass @staticmethod - def link_down(interface: str): + def link_rename(current_name: str, new_name: str): pass @staticmethod @@ -39,9 +45,15 @@ def get_default_route() -> str: pass @staticmethod - def add_addr(interface: str, address: str, broadcast: str): + def add_addr( + interface: str, address: str, broadcast: Optional[str] = None + ): pass @staticmethod def del_addr(interface: str, address: str): pass + + @staticmethod + def flush_addr(interface: str): + pass diff --git a/cloudinit/net/netops/bsd_netops.py b/cloudinit/net/netops/bsd_netops.py index fd9ea8ca5..55f6fae52 100644 --- a/cloudinit/net/netops/bsd_netops.py +++ b/cloudinit/net/netops/bsd_netops.py @@ -6,12 +6,12 @@ class BsdNetOps(netops.NetOps): @staticmethod - def link_up(interface: str): - subp.subp(["ifconfig", interface, "up"]) + def link_up(interface: str) -> subp.SubpResult: + return subp.subp(["ifconfig", interface, "up"]) @staticmethod - def link_down(interface: str): - subp.subp(["ifconfig", interface, "down"]) + def link_down(interface: str) -> subp.SubpResult: + return subp.subp(["ifconfig", interface, "down"]) @staticmethod def add_route( @@ -50,14 +50,18 @@ def get_default_route() -> str: return std.splitlines()[-1].strip() @staticmethod - def add_addr(interface: str, address: str, broadcast: str): + def add_addr( + interface: str, address: str, broadcast: Optional[str] = None + ): + broadcast_args = [] + if broadcast: + broadcast_args = ["broadcast", broadcast] subp.subp( [ "ifconfig", interface, address, - "broadcast", - broadcast, + *broadcast_args, "alias", ], ) diff --git a/cloudinit/net/netops/iproute2.py b/cloudinit/net/netops/iproute2.py index 08d79b187..46633b6d5 100644 --- a/cloudinit/net/netops/iproute2.py +++ b/cloudinit/net/netops/iproute2.py @@ -1,26 +1,36 @@ from typing import Optional -import cloudinit.net.netops as netops from cloudinit import subp +from cloudinit.net.netops import NetOps -class Iproute2(netops.NetOps): +class Iproute2(NetOps): @staticmethod - def link_up(interface: str, family: Optional[str] = None): - subp.subp( - ["ip"] - + (["-family", family] if family else []) - + ["link", "set", "dev", interface, "up"] + def link_up( + interface: str, family: Optional[str] = None + ) -> subp.SubpResult: + family_args = [] + if family: + family_args = ["-family", family] + return subp.subp( + ["ip", *family_args, "link", "set", "dev", interface, "up"] ) @staticmethod - def link_down(interface: str, family: Optional[str] = None): - subp.subp( - ["ip"] - + (["-family", family] if family else []) - + ["link", "set", "dev", interface, "down"] + def link_down( + interface: str, family: Optional[str] = None + ) -> subp.SubpResult: + family_args = [] + if family: + family_args = ["-family", family] + return subp.subp( + ["ip", *family_args, "link", "set", "dev", interface, "down"] ) + @staticmethod + def link_rename(current_name: str, new_name: str): + subp.subp(["ip", "link", "set", current_name, "name", new_name]) + @staticmethod def add_route( interface: str, @@ -29,22 +39,42 @@ def add_route( gateway: Optional[str] = None, source_address: Optional[str] = None, ): + gateway_args = [] + source_args = [] + if gateway and gateway != "0.0.0.0": + gateway_args = ["via", gateway] + if source_address: + source_args = ["src", source_address] subp.subp( - ["ip", "-4", "route", "add", route] - + (["via", gateway] if gateway and gateway != "0.0.0.0" else []) - + [ + [ + "ip", + "-4", + "route", + "replace", + route, + *gateway_args, "dev", interface, + *source_args, ] - + (["src", source_address] if source_address else []), ) @staticmethod def append_route(interface: str, address: str, gateway: str): + gateway_args = [] + if gateway and gateway != "0.0.0.0": + gateway_args = ["via", gateway] subp.subp( - ["ip", "-4", "route", "append", address] - + (["via", gateway] if gateway and gateway != "0.0.0.0" else []) - + ["dev", interface] + [ + "ip", + "-4", + "route", + "append", + address, + *gateway_args, + "dev", + interface, + ] ) @staticmethod @@ -55,11 +85,24 @@ def del_route( gateway: Optional[str] = None, source_address: Optional[str] = None, ): + gateway_args = [] + source_args = [] + if gateway and gateway != "0.0.0.0": + gateway_args = ["via", gateway] + if source_address: + source_args = ["src", source_address] subp.subp( - ["ip", "-4", "route", "del", address] - + (["via", gateway] if gateway and gateway != "0.0.0.0" else []) - + ["dev", interface] - + (["src", source_address] if source_address else []) + [ + "ip", + "-4", + "route", + "del", + address, + *gateway_args, + "dev", + interface, + *source_args, + ] ) @staticmethod @@ -69,7 +112,12 @@ def get_default_route() -> str: ).stdout @staticmethod - def add_addr(interface: str, address: str, broadcast: str): + def add_addr( + interface: str, address: str, broadcast: Optional[str] = None + ): + broadcast_args = [] + if broadcast: + broadcast_args = ["broadcast", broadcast] subp.subp( [ "ip", @@ -78,8 +126,7 @@ def add_addr(interface: str, address: str, broadcast: str): "addr", "add", address, - "broadcast", - broadcast, + *broadcast_args, "dev", interface, ], @@ -91,3 +138,7 @@ def del_addr(interface: str, address: str): subp.subp( ["ip", "-family", "inet", "addr", "del", address, "dev", interface] ) + + @staticmethod + def flush_addr(interface: str): + subp.subp(["ip", "flush", "dev", interface]) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 6730e3a48..84a8b112c 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -2,11 +2,11 @@ import copy import io -import ipaddress import logging import os import textwrap -from typing import Optional, cast +from tempfile import SpooledTemporaryFile +from typing import Callable, List, Optional from cloudinit import features, safeyaml, subp, util from cloudinit.net import ( @@ -14,6 +14,7 @@ SYS_CLASS_NET, get_devicelist, renderer, + should_add_gateway_onlink_flag, subnet_is_ipv6, ) from cloudinit.net.network_state import NET_CONFIG_TO_V2, NetworkState @@ -47,7 +48,7 @@ def _get_params_dict_by_match(config, match): ) -def _extract_addresses(config: dict, entry: dict, ifname, features=None): +def _extract_addresses(config: dict, entry: dict, ifname, features: Callable): """This method parse a cloudinit.net.network_state dictionary (config) and maps netstate keys/values into a dictionary (entry) to represent netplan yaml. (config v1 -> netplan) @@ -97,8 +98,6 @@ def _listify(obj, token=" "): obj, ] - if features is None: - features = [] addresses = [] routes = [] nameservers = [] @@ -123,28 +122,17 @@ def _listify(obj, token=" "): "via": subnet.get("gateway"), "to": "::/0" if ":" in subnet["gateway"] else "0.0.0.0/0", } - try: - subnet_gateway = ipaddress.ip_address(subnet["gateway"]) - subnet_network = ipaddress.ip_network(addr, strict=False) - # If the gateway is not contained within the subnet's - # network, mark it as on-link so that it can still be - # reached. - if subnet_gateway not in subnet_network: - LOG.debug( - "Gateway %s is not contained within subnet %s," - " adding on-link flag", - subnet["gateway"], - addr, - ) - new_route["on-link"] = True - except ValueError as e: - LOG.warning( - "Failed to check whether gateway %s" - " is contained within subnet %s: %s", + # If the gateway is not contained within the subnet's + # network, mark it as on-link so that it can still be + # reached. + if should_add_gateway_onlink_flag(subnet["gateway"], addr): + LOG.debug( + "Gateway %s is not contained within subnet %s," + " adding on-link flag", subnet["gateway"], addr, - e, ) + new_route["on-link"] = True routes.append(new_route) if "dns_nameservers" in subnet: nameservers += _listify(subnet.get("dns_nameservers", [])) @@ -152,7 +140,7 @@ def _listify(obj, token=" "): searchdomains += _listify(subnet.get("dns_search", [])) if "mtu" in subnet: mtukey = "mtu" - if subnet_is_ipv6(subnet) and "ipv6-mtu" in features: + if subnet_is_ipv6(subnet) and "ipv6-mtu" in features(): mtukey = "ipv6-mtu" entry.update({mtukey: subnet.get("mtu")}) for route in subnet.get("routes", []): @@ -235,6 +223,79 @@ def _clean_default(target=None): os.unlink(f) +def netplan_api_write_yaml_file(net_config_content: str) -> bool: + """Use netplan.State._write_yaml_file to write netplan config + + Where netplan python API exists, prefer to use of the private + _write_yaml_file to ensure proper permissions and file locations + are chosen by the netplan python bindings in the environment. + + By calling the netplan API, allow netplan versions to change behavior + related to file permissions and treatment of sensitive configuration + under the API call to _write_yaml_file. + + In future netplan releases, security-sensitive config may be written to + separate file or directory paths than world-readable configuration parts. + """ + try: + from netplan.parser import Parser # type: ignore + from netplan.state import State # type: ignore + except ImportError: + LOG.debug( + "No netplan python module. Fallback to write %s", + CLOUDINIT_NETPLAN_FILE, + ) + return False + try: + with SpooledTemporaryFile(mode="w") as f: + f.write(net_config_content) + f.flush() + f.seek(0, io.SEEK_SET) + parser = Parser() + parser.load_yaml(f) + state_output_file = State() + state_output_file.import_parser_results(parser) + + # Write our desired basename 50-cloud-init.yaml, allow netplan to + # determine default root-dir /etc/netplan and/or specialized + # filenames or read permissions based on whether this config + # contains secrets. + state_output_file._write_yaml_file( + os.path.basename(CLOUDINIT_NETPLAN_FILE) + ) + except Exception as e: + LOG.warning( + "Unable to render network config using netplan python module." + " Fallback to write %s. %s", + CLOUDINIT_NETPLAN_FILE, + e, + ) + return False + LOG.debug("Rendered netplan config using netplan python API") + return True + + +def has_netplan_config_changed(cfg_file: str, content: str) -> bool: + """Return True when new netplan config has changed vs previous.""" + if not os.path.exists(cfg_file): + # This is our first write of netplan's cfg_file, representing change. + return True + # Check prev cfg vs current cfg. Ignore comments + prior_cfg = util.load_yaml(util.load_text_file(cfg_file)) + return prior_cfg != util.load_yaml(content) + + +def fallback_write_netplan_yaml(cfg_file: str, content: str): + """Write netplan config to cfg_file because python API was unavailable.""" + mode = 0o600 if features.NETPLAN_CONFIG_ROOT_READ_ONLY else 0o644 + if os.path.exists(cfg_file): + current_mode = util.get_permissions(cfg_file) + if current_mode & mode == current_mode: + # preserve mode if existing perms are more strict + mode = current_mode + util.write_file(cfg_file, content, mode=mode) + + class Renderer(renderer.Renderer): """Renders network information in a /etc/netplan/network.yaml format.""" @@ -248,11 +309,10 @@ def __init__(self, config=None): self.netplan_header = config.get("netplan_header", None) self._postcmds = config.get("postcmds", False) self.clean_default = config.get("clean_default", True) - self._features = config.get("features", None) + self._features = config.get("features") or [] - @property - def features(self): - if self._features is None: + def features(self) -> List[str]: + if not self._features: try: info_blob, _err = subp.subp(self.NETPLAN_INFO, capture=True) info = util.load_yaml(info_blob) @@ -287,33 +347,22 @@ def render_network_state( header += "\n" content = header + content - # determine if existing config files have the same content - same_content = False - if os.path.exists(fpnplan): - hashed_content = util.hash_buffer(io.BytesIO(content.encode())) - with open(fpnplan, "rb") as f: - hashed_original_content = util.hash_buffer(f) - if hashed_content == hashed_original_content: - same_content = True - - mode = 0o600 if features.NETPLAN_CONFIG_ROOT_READ_ONLY else 0o644 - if not same_content and os.path.exists(fpnplan): - current_mode = util.get_permissions(fpnplan) - if current_mode & mode == current_mode: - # preserve mode if existing perms are more strict than default - mode = current_mode - util.write_file(fpnplan, content, mode=mode) + netplan_config_changed = has_netplan_config_changed(fpnplan, content) + if not netplan_api_write_yaml_file(content): + fallback_write_netplan_yaml(fpnplan, content) if self.clean_default: _clean_default(target=target) - self._netplan_generate(run=self._postcmds, same_content=same_content) + self._netplan_generate( + run=self._postcmds, config_changed=netplan_config_changed + ) self._net_setup_link(run=self._postcmds) - def _netplan_generate(self, run: bool = False, same_content: bool = False): + def _netplan_generate(self, run: bool, config_changed: bool): if not run: - LOG.debug("netplan generate postcmd disabled") + LOG.debug("netplan generate postcmds disabled") return - if same_content: + if not config_changed: LOG.debug( "skipping call to `netplan generate`." " reason: identical netplan config" @@ -329,6 +378,9 @@ def _net_setup_link(self, run=False): if not run: LOG.debug("netplan net_setup_link postcmd disabled") return + elif "net.ifnames=0" in util.get_cmdline(): + LOG.debug("Predictable interface names disabled.") + return setup_lnk = ["udevadm", "test-builtin", "net_setup_link"] # It's possible we can race a udev rename and attempt to run @@ -353,7 +405,6 @@ def _net_setup_link(self, run=False): ) from last_exception def _render_content(self, network_state: NetworkState) -> str: - # if content already in netplan format, pass it back if network_state.version == 2: LOG.debug("V2 to V2 passthrough") @@ -403,13 +454,10 @@ def _render_content(self, network_state: NetworkState) -> str: bond_config = {} # extract bond params and drop the bond_ prefix as it's # redundant in v2 yaml format - v2_bond_map = cast(dict, NET_CONFIG_TO_V2.get("bond")) - # Previous cast is needed to help mypy to know that the key is - # present in `NET_CONFIG_TO_V2`. This could probably be removed - # by using `Literal` when supported. + v2_bond_map = NET_CONFIG_TO_V2["bond"] for match in ["bond_", "bond-"]: bond_params = _get_params_dict_by_match(ifcfg, match) - for (param, value) in bond_params.items(): + for param, value in bond_params.items(): newname = v2_bond_map.get(param.replace("_", "-")) if newname is None: continue @@ -427,9 +475,18 @@ def _render_content(self, network_state: NetworkState) -> str: elif if_type == "bridge": # required_keys = ['name', 'bridge_ports'] + # + # Rather than raise an exception on `sorted(None)`, log a + # warning and skip this interface when invalid configuration is + # received. bridge_ports = ifcfg.get("bridge_ports") - # mypy wrong error. `copy(None)` is supported: - ports = sorted(copy.copy(bridge_ports)) # type: ignore + if bridge_ports is None: + LOG.warning( + "Invalid config. The key", + f"'bridge_ports' is required in {config}.", + ) + continue + ports = sorted(copy.copy(bridge_ports)) bridge: dict = { "interfaces": ports, } @@ -441,11 +498,8 @@ def _render_content(self, network_state: NetworkState) -> str: # v2 yaml uses different names for the keys # and at least one value format change - v2_bridge_map = cast(dict, NET_CONFIG_TO_V2.get("bridge")) - # Previous cast is needed to help mypy to know that the key is - # present in `NET_CONFIG_TO_V2`. This could probably be removed - # by using `Literal` when supported. - for (param, value) in params.items(): + v2_bridge_map = NET_CONFIG_TO_V2["bridge"] + for param, value in params.items(): newname = v2_bridge_map.get(param) if newname is None: continue diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py index 29f466eda..7a5112880 100644 --- a/cloudinit/net/networkd.py +++ b/cloudinit/net/networkd.py @@ -9,7 +9,7 @@ from typing import Optional from cloudinit import subp, util -from cloudinit.net import renderer +from cloudinit.net import renderer, should_add_gateway_onlink_flag from cloudinit.net.network_state import NetworkState LOG = logging.getLogger(__name__) @@ -68,7 +68,7 @@ def get_final_conf(self): contents += "[" + k + "]\n" for e in sorted(v[n]): contents += e + "\n" - contents += "\n" + contents += "\n" else: contents += "[" + k + "]\n" for e in sorted(v): @@ -169,6 +169,9 @@ def parse_subnets(self, iface, cfg: CfgParser): self.parse_routes(f"r{rid}", i, cfg) rid = rid + 1 if "address" in e: + addr = e["address"] + if "prefix" in e: + addr += "/" + str(e["prefix"]) subnet_cfg_map = { "address": "Address", "gateway": "Gateway", @@ -177,24 +180,30 @@ def parse_subnets(self, iface, cfg: CfgParser): } for k, v in e.items(): if k == "address": - if "prefix" in e: - v += "/" + str(e["prefix"]) - cfg.update_section("Address", subnet_cfg_map[k], v) + cfg.update_section("Address", subnet_cfg_map[k], addr) elif k == "gateway": # Use "a" as a dict key prefix for this route to # isolate it from other sources of routes cfg.update_route_section( "Route", f"a{rid}", subnet_cfg_map[k], v ) + if should_add_gateway_onlink_flag(v, addr): + LOG.debug( + "Gateway %s is not contained within subnet %s," + " adding GatewayOnLink flag", + v, + addr, + ) + cfg.update_route_section( + "Route", f"a{rid}", "GatewayOnLink", "yes" + ) rid = rid + 1 elif k == "dns_nameservers" or k == "dns_search": cfg.update_section(sec, subnet_cfg_map[k], " ".join(v)) cfg.update_section(sec, "DHCP", dhcp) - if dhcp in ["ipv6", "yes"] and isinstance( - iface.get("accept-ra", ""), bool - ): + if isinstance(iface.get("accept-ra", ""), bool): cfg.update_section(sec, "IPv6AcceptRA", iface["accept-ra"]) return dhcp diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 622b8fafa..015c44949 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -317,7 +317,6 @@ class Renderer(renderer.Renderer): "rhel": { "ONBOOT": True, "USERCTL": False, - "NM_CONTROLLED": False, "BOOTPROTO": "none", }, "suse": {"BOOTPROTO": "static", "STARTMODE": "auto"}, @@ -825,11 +824,53 @@ def _render_vlan_interfaces(cls, network_state, iface_contents, flavor): @staticmethod def _render_dns(network_state, existing_dns_path=None): - # skip writing resolv.conf if network_state doesn't include any input. + + found_nameservers = [] + found_dns_search = [] + + for iface in network_state.iter_interfaces(): + for subnet in iface["subnets"]: + # Add subnet-level DNS + if "dns_nameservers" in subnet: + found_nameservers.extend(subnet["dns_nameservers"]) + if "dns_search" in subnet: + found_dns_search.extend(subnet["dns_search"]) + + # Add interface-level DNS + if "dns" in iface: + found_nameservers += [ + dns + for dns in iface["dns"]["nameservers"] + if dns not in found_nameservers + ] + found_dns_search += [ + search + for search in iface["dns"]["search"] + if search not in found_dns_search + ] + + # When both global and interface specific entries are present, + # use them both to generate /etc/resolv.conf eliminating duplicate + # entries. Otherwise use global or interface specific entries whichever + # is provided. + if network_state.dns_nameservers: + found_nameservers += [ + nameserver + for nameserver in network_state.dns_nameservers + if nameserver not in found_nameservers + ] + if network_state.dns_searchdomains: + found_dns_search += [ + search + for search in network_state.dns_searchdomains + if search not in found_dns_search + ] + + # skip writing resolv.conf if no dns information is provided in conf. if not any( [ - len(network_state.dns_nameservers), - len(network_state.dns_searchdomains), + len(found_nameservers), + len(found_dns_search), ] ): return None @@ -838,9 +879,9 @@ def _render_dns(network_state, existing_dns_path=None): content = resolv_conf.ResolvConf( util.load_text_file(existing_dns_path) ) - for nameserver in network_state.dns_nameservers: + for nameserver in found_nameservers: content.add_nameserver(nameserver) - for searchdomain in network_state.dns_searchdomains: + for searchdomain in found_dns_search: content.add_search_domain(searchdomain) header = _make_header(";") content_str = str(content) diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 9478efc35..8b3db6200 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -13,6 +13,7 @@ import re from copy import copy, deepcopy from ipaddress import IPv4Network +from typing import Dict, List, Union from cloudinit import subp, util from cloudinit.net.network_state import net_prefix_to_ipv4_mask @@ -282,7 +283,45 @@ def _netdev_info_ifconfig(ifconfig_data): return devs -def netdev_info(empty=""): +def netdev_info( + empty="", +) -> Dict[str, Dict[str, Union[str, List[Dict[str, str]]]]]: + """return the instance's interfaces and interface data + + includes, interface name, link state, hardware address, and lists of ipv4 + and ipv6 addresses + + example output: + { + 'lo': { + 'up': True, + 'hwaddr': '', + 'ipv4': [ + { + 'bcast': '', + 'ip': '127.0.0.1', + 'mask': '255.0.0.0', + 'scope': 'host', + }], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + }, + 'lxdbr0': { + 'up': True + 'hwaddr': '00:16:3e:fa:84:30', + 'ipv4': [{ + 'bcast': '', + 'ip': '10.161.80.1', + 'mask': '255.255.255.0', + 'scope': 'global', + }], + 'ipv6': [ + {'ip': 'fd42:80e2:4695:1e96::1/64', 'scope6': 'global'}, + {'ip': 'fe80::216:3eff:fefa:8430/64', 'scope6': 'link'}, + ] + }, + } + + """ devs = {} if util.is_NetBSD(): (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1]) diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 8178d4887..2d4612e96 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -10,7 +10,6 @@ import yaml -YAMLError = yaml.YAMLError # SchemaPathMarks track the path to an element within a loaded YAML file. # The start_mark and end_mark contain the row and column indicators @@ -49,11 +48,6 @@ def __eq__(self, other): ) -class _CustomSafeLoader(yaml.SafeLoader): - def construct_python_unicode(self, node): - return super().construct_scalar(node) - - def _find_closest_parent(child_mark, marks): for mark in marks[::-1]: if child_mark in mark and not child_mark == mark: @@ -236,12 +230,6 @@ def get_single_data(self): return data -_CustomSafeLoader.add_constructor( - "tag:yaml.org,2002:python/unicode", - _CustomSafeLoader.construct_python_unicode, -) - - class NoAliasSafeDumper(yaml.dumper.SafeDumper): """A class which avoids constructing anchors/aliases on yaml dump""" @@ -270,10 +258,6 @@ def load_with_marks(blob) -> Tuple[Any, Dict[str, int]]: return result, schemamarks -def load(blob): - return yaml.load(blob, Loader=_CustomSafeLoader) - - def dumps(obj, explicit_start=True, explicit_end=True, noalias=False): """Return data in nicely formatted yaml.""" diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 6f06ea3ac..6d98d0795 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -16,7 +16,7 @@ CLEAN_RUNPARTS_DIR = "/etc/cloud/clean.d" -RUN_CLOUD_CONFIG = "/run/cloud-init/cloud.cfg" +DEFAULT_RUN_DIR = "/run/cloud-init" # What u get if no config is provided CFG_BUILTIN = { @@ -59,6 +59,7 @@ "system_info": { "paths": { "cloud_dir": "/var/lib/cloud", + "docs_dir": "/usr/share/doc/cloud-init/", "templates_dir": "/etc/cloud/templates/", }, "distro": "ubuntu", diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py index 63744178f..00eb4f4af 100644 --- a/cloudinit/signal_handler.py +++ b/cloudinit/signal_handler.py @@ -44,7 +44,7 @@ def _handle_exit(signum, frame): contents = StringIO() contents.write("%s\n" % (msg)) _pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents) - util.multi_log(contents.getvalue(), console=True, stderr=False, log=LOG) + util.multi_log(contents.getvalue(), log=LOG, log_level=logging.ERROR) sys.exit(rc) diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 8db89e467..727477df4 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -7,7 +7,7 @@ from cloudinit import dmi, sources from cloudinit.event import EventScope, EventType from cloudinit.sources import DataSourceEc2 as EC2 -from cloudinit.sources import DataSourceHostname +from cloudinit.sources import DataSourceHostname, NicOrder LOG = logging.getLogger(__name__) @@ -32,6 +32,11 @@ def __init__(self, sys_cfg, distro, paths): super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) self.default_update_events = copy.deepcopy(self.default_update_events) self.default_update_events[EventScope.NETWORK].add(EventType.BOOT) + self._fallback_nic_order = NicOrder.NIC_NAME + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + self._fallback_nic_order = NicOrder.NIC_NAME def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): hostname = self.metadata.get("hostname") diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 418e4ac69..aeecd15d0 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -77,6 +77,12 @@ def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None self.supported_seed_starts = ("/", "file://") + self.source = sources.METADATA_UNKNOWN + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + if not hasattr(self, "source"): + self.source = sources.METADATA_UNKNOWN def __str__(self): root = sources.DataSource.__str__(self) @@ -167,8 +173,6 @@ def _get_data(self): def _get_subplatform(self): """Return the subplatform metadata details.""" cloud_type = self.get_cloud_type() - if not hasattr(self, "source"): - self.source = sources.METADATA_UNKNOWN if cloud_type == "RHEV": self.source = "/dev/fd0" return "%s (%s)" % (cloud_type.lower(), self.source) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c4eb24f67..d7f521f57 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -11,10 +11,10 @@ import os.path import re import socket -import xml.etree.ElementTree as ET +import xml.etree.ElementTree as ET # nosec B405 from enum import Enum from pathlib import Path -from time import sleep, time +from time import monotonic, sleep, time from typing import Any, Dict, List, Optional import requests @@ -49,7 +49,7 @@ from cloudinit.url_helper import UrlError try: - import crypt + import crypt # pylint: disable=W4901 blowfish_hash: Any = functools.partial( crypt.crypt, salt=f"$6${util.rand_str(strlen=16)}" @@ -303,7 +303,6 @@ def get_resource_disk_on_freebsd(port_id) -> Optional[str]: DS_CFG_PATH = ["datasource", DS_NAME] DS_CFG_KEY_PRESERVE_NTFS = "never_destroy_ntfs" -DEF_EPHEMERAL_LABEL = "Temporary Storage" # The redacted password fails to meet password complexity requirements # so we can safely use this to mask/redact the password in the ovf-env.xml @@ -332,7 +331,7 @@ def __init__(self, sys_cfg, distro, paths): ) self._iso_dev = None self._network_config = None - self._ephemeral_dhcp_ctx = None + self._ephemeral_dhcp_ctx: Optional[EphemeralDHCPv4] = None self._route_configured_for_imds = False self._route_configured_for_wireserver = False self._wireserver_endpoint = DEFAULT_WIRESERVER_ENDPOINT @@ -427,8 +426,8 @@ def _setup_ephemeral_networking( dhcp_log_func=dhcp_log_cb, ) - lease = None - start_time = time() + lease: Optional[Dict[str, Any]] = None + start_time = monotonic() deadline = start_time + timeout_minutes * 60 with events.ReportEventStack( name="obtain-dhcp-lease", @@ -445,7 +444,7 @@ def _setup_ephemeral_networking( ) self._report_failure( errors.ReportableErrorDhcpInterfaceNotFound( - duration=time() - start_time + duration=monotonic() - start_time ), host_only=True, ) @@ -464,7 +463,7 @@ def _setup_ephemeral_networking( ) self._report_failure( errors.ReportableErrorDhcpLease( - duration=time() - start_time, interface=iface + duration=monotonic() - start_time, interface=iface ), host_only=True, ) @@ -483,7 +482,7 @@ def _setup_ephemeral_networking( ) # Sleep before retrying, otherwise break if past deadline. - if lease is None and time() + retry_sleep < deadline: + if lease is None and monotonic() + retry_sleep < deadline: sleep(retry_sleep) else: break @@ -766,7 +765,7 @@ def crawl_metadata(self): @azure_ds_telemetry_reporter def get_metadata_from_imds(self, report_failure: bool) -> Dict: - start_time = time() + start_time = monotonic() retry_deadline = start_time + 300 # As a temporary workaround to support Azure Stack implementations @@ -785,7 +784,7 @@ def get_metadata_from_imds(self, report_failure: bool) -> Dict: ) except UrlError as error: error_string = str(error) - duration = time() - start_time + duration = monotonic() - start_time error_report = errors.ReportableErrorImdsUrlError( exception=error, duration=duration ) @@ -1253,7 +1252,7 @@ def _wait_for_pps_unknown_reuse(self): def _poll_imds(self) -> bytes: """Poll IMDs for reprovisiondata XML document data.""" dhcp_attempts = 0 - reprovision_data = None + reprovision_data: Optional[bytes] = None while not reprovision_data: if not self._is_ephemeral_networking_up(): dhcp_attempts += 1 @@ -1799,7 +1798,7 @@ def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): """Azure provides the UserPassword in plain text. So we redact it""" try: - root = ET.fromstring(cnt) + root = ET.fromstring(cnt) # nosec B314 for elem in root.iter(): if ( "UserPassword" in elem.tag @@ -1965,6 +1964,9 @@ def generate_network_config_from_instance_network_metadata( # addresses. nicname = "eth{idx}".format(idx=idx) dhcp_override = {"route-metric": (idx + 1) * 100} + # DNS resolution through secondary NICs is not supported, disable it. + if idx > 0: + dhcp_override["use-dns"] = False dev_config: Dict[str, Any] = { "dhcp4": True, "dhcp4-overrides": dhcp_override, diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 2fafde13e..61bf94f51 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -177,7 +177,7 @@ def wait_for_metadata_service(self): self.metadata_address, "latest/meta-data/instance-id" ) ] - start_time = time.time() + start_time = time.monotonic() url, _response = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, @@ -192,7 +192,7 @@ def wait_for_metadata_service(self): "Giving up on waiting for the metadata from %s" " after %s seconds", urls, - int(time.time() - start_time), + int(time.monotonic() - start_time), ) return bool(url) @@ -210,7 +210,7 @@ def _get_data(self): try: if not self.wait_for_metadata_service(): return False - start_time = time.time() + start_time = time.monotonic() self.userdata_raw = ec2.get_instance_userdata( self.api_ver, self.metadata_address ) @@ -219,7 +219,7 @@ def _get_data(self): ) LOG.debug( "Crawl of metadata service took %s seconds", - int(time.time() - start_time), + int(time.monotonic() - start_time), ) password_client = CloudStackPasswordServerClient(self.vr_addr) try: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 057daea77..3f82c89ea 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -19,7 +19,6 @@ # Various defaults/constants... DEFAULT_IID = "iid-dsconfigdrive" -DEFAULT_MODE = "pass" DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } @@ -142,7 +141,6 @@ def _get_data(self): self.files.update(results.get("files", {})) vd = results.get("vendordata") - self.vendordata_pure = vd try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: @@ -150,7 +148,6 @@ def _get_data(self): self.vendordata_raw = None vd2 = results.get("vendordata2") - self.vendordata2_pure = vd2 try: self.vendordata2_raw = sources.convert_vendordata(vd2) except ValueError as e: diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5145bde7b..951006ed8 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -48,6 +48,7 @@ def __init__(self, sys_cfg, distro, paths): self.use_ip4LL = self.ds_cfg.get("use_ip4LL", MD_USE_IPV4LL) self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = None + self.metadata_full = None def _unpickle(self, ci_pkl_version: int) -> None: super()._unpickle(ci_pkl_version) @@ -93,7 +94,7 @@ def _get_data(self): self.userdata_raw = md.get("user_data", None) if ipv4LL_nic: - do_helper.del_ipv4_link_local(ipv4LL_nic) + do_helper.del_ipv4_link_local(self.distro, ipv4LL_nic) return True diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 81e7ab419..430002243 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -12,6 +12,8 @@ import logging import os import time +import uuid +from contextlib import suppress from typing import Dict, List from cloudinit import dmi, net, sources @@ -22,12 +24,11 @@ from cloudinit.net import netplan from cloudinit.net.dhcp import NoDHCPLeaseError from cloudinit.net.ephemeral import EphemeralIPNetwork +from cloudinit.sources import NicOrder from cloudinit.sources.helpers import ec2 LOG = logging.getLogger(__name__) -SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) - STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" @@ -110,10 +111,13 @@ class DataSourceEc2(sources.DataSource): def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None + self.identity = None + self._fallback_nic_order = NicOrder.MAC def _unpickle(self, ci_pkl_version: int) -> None: super()._unpickle(ci_pkl_version) self.extra_hotplug_udev_rules = _EXTRA_HOTPLUG_UDEV_RULES + self._fallback_nic_order = NicOrder.MAC def _get_cloud_name(self): """Return the cloud name as identified during _get_data.""" @@ -196,9 +200,6 @@ def launch_index(self): @property def platform(self): - # Handle upgrade path of pickled ds - if not hasattr(self, "_platform_type"): - self._platform_type = DataSourceEc2.dsname.lower() if not self._platform_type: self._platform_type = DataSourceEc2.dsname.lower() return self._platform_type @@ -326,6 +327,8 @@ def _maybe_fetch_api_token(self, mdurls): return None def wait_for_metadata_service(self): + urls = [] + start_time = 0 mcfg = self.ds_cfg url_params = self.get_url_params() @@ -359,7 +362,6 @@ def wait_for_metadata_service(self): and self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS ): # if we can't get a token, use instance-id path - urls = [] url2base = {} url_path = "{ver}/meta-data/instance-id".format( ver=self.min_metadata_version @@ -370,7 +372,7 @@ def wait_for_metadata_service(self): urls.append(cur) url2base[cur] = url - start_time = time.time() + start_time = time.monotonic() url, _ = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, @@ -393,7 +395,7 @@ def wait_for_metadata_service(self): LOG.critical( "Giving up on md from %s after %s seconds", urls, - int(time.time() - start_time), + int(time.monotonic() - start_time), ) return bool(metadata_address) @@ -527,6 +529,7 @@ def network_config(self): full_network_config=util.get_cfg_option_bool( self.ds_cfg, "apply_full_imds_network_config", True ), + fallback_nic_order=self._fallback_nic_order, ) # Non-VPC (aka Classic) Ec2 instances need to rewrite the @@ -785,11 +788,17 @@ def identify_aliyun(data): def identify_aws(data): # data is a dictionary returned by _collect_platform_data. - if data["uuid"].startswith("ec2") and ( - data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"] - ): + uuid_str = data["uuid"] + if uuid_str.startswith("ec2"): + # example same-endian uuid: + # EC2E1916-9099-7CAF-FD21-012345ABCDEF return CloudNames.AWS - + with suppress(ValueError): + if uuid.UUID(uuid_str).bytes_le.hex().startswith("ec2"): + # check for other endianness + # example other-endian uuid: + # 45E12AEC-DCD1-B213-94ED-012345ABCDEF + return CloudNames.AWS return None @@ -844,7 +853,6 @@ def _collect_platform_data(): Keys in the dictionary are as follows: uuid: system-uuid from dmi or /sys/hypervisor - uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) asset_tag: 'dmidecode -s chassis-asset-tag' vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) @@ -853,44 +861,32 @@ def _collect_platform_data(): On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. """ - data = {} - try: + uuid = None + with suppress(OSError, UnicodeDecodeError): uuid = util.load_text_file("/sys/hypervisor/uuid").strip() - data["uuid_source"] = "hypervisor" - except Exception: - uuid = dmi.read_dmi_data("system-uuid") - data["uuid_source"] = "dmi" - - if uuid is None: - uuid = "" - data["uuid"] = uuid.lower() - - serial = dmi.read_dmi_data("system-serial-number") - if serial is None: - serial = "" - data["serial"] = serial.lower() + uuid = uuid or dmi.read_dmi_data("system-uuid") or "" + serial = dmi.read_dmi_data("system-serial-number") or "" + asset_tag = dmi.read_dmi_data("chassis-asset-tag") or "" + vendor = dmi.read_dmi_data("system-manufacturer") or "" + product_name = dmi.read_dmi_data("system-product-name") or "" - asset_tag = dmi.read_dmi_data("chassis-asset-tag") - if asset_tag is None: - asset_tag = "" - - data["asset_tag"] = asset_tag.lower() - - vendor = dmi.read_dmi_data("system-manufacturer") - data["vendor"] = (vendor if vendor else "").lower() - - product_name = dmi.read_dmi_data("system-product-name") - data["product_name"] = (product_name if product_name else "").lower() - - return data + return { + "uuid": uuid.lower(), + "serial": serial.lower(), + "asset_tag": asset_tag.lower(), + "vendor": vendor.lower(), + "product_name": product_name.lower(), + } def _build_nic_order( - macs_metadata: Dict[str, Dict], macs: List[str] + macs_metadata: Dict[str, Dict], + macs_to_nics: Dict[str, str], + fallback_nic_order: NicOrder = NicOrder.MAC, ) -> Dict[str, int]: """ - Builds a dictionary containing macs as keys nad nic orders as values, + Builds a dictionary containing macs as keys and nic orders as values, taking into account `network-card` and `device-number` if present. Note that the first NIC will be the primary NIC as it will be the one with @@ -898,19 +894,22 @@ def _build_nic_order( @param macs_metadata: dictionary with mac address as key and contents like: {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} - @macs: list of macs to consider + @macs_to_nics: dictionary with mac address as key and nic name as value @return: Dictionary with macs as keys and nic orders as values. """ nic_order: Dict[str, int] = {} - if len(macs) == 0 or len(macs_metadata) == 0: + if len(macs_to_nics) == 0 or len(macs_metadata) == 0: return nic_order valid_macs_metadata = filter( # filter out nics without metadata (not a physical nic) lambda mmd: mmd[1] is not None, # filter by macs - map(lambda mac: (mac, macs_metadata.get(mac)), macs), + map( + lambda mac: (mac, macs_metadata.get(mac), macs_to_nics[mac]), + macs_to_nics.keys(), + ), ) def _get_key_as_int_or(dikt, key, alt_value): @@ -927,7 +926,7 @@ def _get_key_as_int_or(dikt, key, alt_value): # function. return { mac: i - for i, (mac, _mac_metadata) in enumerate( + for i, (mac, _mac_metadata, _nic_name) in enumerate( sorted( valid_macs_metadata, key=lambda mmd: ( @@ -937,6 +936,9 @@ def _get_key_as_int_or(dikt, key, alt_value): _get_key_as_int_or( mmd[1], "device-number", float("infinity") ), + mmd[2] + if fallback_nic_order == NicOrder.NIC_NAME + else mmd[0], ), ) ) @@ -1034,6 +1036,7 @@ def convert_ec2_metadata_network_config( macs_to_nics=None, fallback_nic=None, full_network_config=True, + fallback_nic_order=NicOrder.MAC, ): """Convert ec2 metadata to network config version 2 data dict. @@ -1076,8 +1079,10 @@ def convert_ec2_metadata_network_config( return netcfg # Apply network config for all nics and any secondary IPv4/v6 addresses is_netplan = isinstance(distro.network_renderer, netplan.Renderer) + nic_order = _build_nic_order( + macs_metadata, macs_to_nics, fallback_nic_order + ) macs = sorted(macs_to_nics.keys()) - nic_order = _build_nic_order(macs_metadata, macs) for mac in macs: nic_name = macs_to_nics[mac] nic_metadata = macs_metadata.get(mac) diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 69b93d576..1e2a422cd 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -18,7 +18,6 @@ MD_V1_URL = "http://metadata.google.internal/computeMetadata/v1/" BUILTIN_DS_CONFIG = {"metadata_url": MD_V1_URL} -REQUIRED_FIELDS = ("instance-id", "availability-zone", "local-hostname") GUEST_ATTRIBUTES_URL = ( "http://metadata.google.internal/computeMetadata/" "v1/instance/guest-attributes" diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 110f7ebef..7ef5a5c31 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -4,7 +4,7 @@ # This file is part of cloud-init. See LICENSE file for license information. # """Hetzner Cloud API Documentation - https://docs.hetzner.cloud/""" +https://docs.hetzner.cloud/""" import logging @@ -48,6 +48,7 @@ def __init__(self, sys_cfg, distro, paths): self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = sources.UNSET self.dsmode = sources.DSMODE_NETWORK + self.metadata_full = None def _get_data(self): (on_hetzner, serial) = get_hcloud_data() diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 366f73ef9..89edd79f7 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -1,5 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. -"""Datasource for IBMCloud. +r"""Datasource for IBMCloud. IBMCloud is also know as SoftLayer or BlueMix. IBMCloud hypervisor is xen (2018-03-10). @@ -107,7 +107,6 @@ class Platforms: TEMPLATE_LIVE_METADATA = "Template/Live/Metadata" - TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED." TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata" TEMPLATE_PROVISIONING_NODATA = "Template/Provisioning/No-Metadata" OS_CODE = "OS-Code/Live" @@ -147,7 +146,6 @@ def _get_data(self): self.userdata_raw = results.get("userdata") self.network_json = results.get("networkdata") vd = results.get("vendordata") - self.vendordata_pure = vd self.system_uuid = results["system-uuid"] try: self.vendordata_raw = sources.convert_vendordata(vd) @@ -297,7 +295,7 @@ def read_md(): @return: None if not running on IBM Cloud. dictionary with guaranteed fields: metadata, version and optional fields: userdata, vendordata, networkdata. - Also includes the system uuid from /sys/hypervisor/uuid.""" + Also includes the system uuid from /sys/hypervisor/uuid.""" platform, path = get_ibm_platform() if platform is None: LOG.debug("This is not an IBMCloud platform.") diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py index cd3161016..4f69d90eb 100644 --- a/cloudinit/sources/DataSourceLXD.py +++ b/cloudinit/sources/DataSourceLXD.py @@ -112,6 +112,7 @@ class SocketHTTPConnection(HTTPConnection): def __init__(self, socket_path): super().__init__("localhost") self.socket_path = socket_path + self.sock = None def connect(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) @@ -131,6 +132,13 @@ class LXDSocketAdapter(HTTPAdapter): def get_connection(self, url, proxies=None): return SocketConnectionPool(LXD_SOCKET_PATH) + # Fix for requests 2.32.2+: + # https://github.com/psf/requests/pull/6710 + def get_connection_with_tls_context( + self, request, verify, proxies=None, cert=None + ): + return self.get_connection(request.url, proxies) + def _raw_instance_data_to_dict(metadata_type: str, metadata_value) -> dict: """Convert raw instance data from str, bytes, YAML to dict @@ -166,7 +174,7 @@ class DataSourceLXD(sources.DataSource): dsname = "LXD" _network_config: Union[Dict, str] = sources.UNSET - _crawled_metadata: Union[Dict, str] = sources.UNSET + _crawled_metadata: Optional[Union[Dict, str]] = sources.UNSET sensitive_metadata_keys: Tuple[ str, ... @@ -205,10 +213,6 @@ def _get_data(self) -> bool: user_metadata = _raw_instance_data_to_dict( "user.meta-data", user_metadata ) - if not isinstance(self.metadata, dict): - self.metadata = util.mergemanydict( - [util.load_yaml(self.metadata), user_metadata] - ) if "user-data" in self._crawled_metadata: self.userdata_raw = self._crawled_metadata["user-data"] if "network-config" in self._crawled_metadata: @@ -283,7 +287,7 @@ def _get_json_response( "Skipping %s on [HTTP:%d]:%s", url, url_response.status_code, - url_response.text, + url_response.content.decode("utf-8"), ) return {} try: @@ -292,7 +296,7 @@ def _get_json_response( raise sources.InvalidMetaDataException( "Unable to process LXD config at {url}." " Expected JSON but found: {resp}".format( - url=url, resp=url_response.text + url=url, resp=url_response.content.decode("utf-8") ) ) from exc @@ -320,7 +324,7 @@ def _do_request( "Invalid HTTP response [{code}] from {route}: {resp}".format( code=response.status_code, route=url, - resp=response.text, + resp=response.content.decode("utf-8"), ) ) return response @@ -331,7 +335,7 @@ class MetaDataKeys(Flag): CONFIG = auto() DEVICES = auto() META_DATA = auto() - ALL = CONFIG | DEVICES | META_DATA + ALL = CONFIG | DEVICES | META_DATA # pylint: disable=E1131 class _MetaDataReader: @@ -363,12 +367,13 @@ def _process_config(self, session: requests.Session) -> dict: config_route_response = _do_request( session, config_route_url, do_raise=False ) + response_text = config_route_response.content.decode("utf-8") if not config_route_response.ok: LOG.debug( "Skipping %s on [HTTP:%d]:%s", config_route_url, config_route_response.status_code, - config_route_response.text, + response_text, ) continue @@ -376,16 +381,14 @@ def _process_config(self, session: requests.Session) -> dict: # Leave raw data values/format unchanged to represent it in # instance-data.json for cloud-init query or jinja template # use. - config["config"][cfg_key] = config_route_response.text + config["config"][cfg_key] = response_text # Promote common CONFIG_KEY_ALIASES to top-level keys. if cfg_key in CONFIG_KEY_ALIASES: # Due to sort of config_routes, promote cloud-init.* # aliases before user.*. This allows user.* keys to act as # fallback config on old LXD, with new cloud-init images. if CONFIG_KEY_ALIASES[cfg_key] not in config: - config[ - CONFIG_KEY_ALIASES[cfg_key] - ] = config_route_response.text + config[CONFIG_KEY_ALIASES[cfg_key]] = response_text else: LOG.warning( "Ignoring LXD config %s in favor of %s value.", @@ -403,7 +406,9 @@ def __call__(self, *, metadata_keys: MetaDataKeys) -> dict: md_route = url_helper.combine_url( self._version_url, "meta-data" ) - md["meta-data"] = _do_request(session, md_route).text + md["meta-data"] = _do_request( + session, md_route + ).content.decode("utf-8") if MetaDataKeys.CONFIG in metadata_keys: md.update(self._process_config(session)) if MetaDataKeys.DEVICES in metadata_keys: diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index b33121d98..136cea824 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -43,6 +43,10 @@ class DataSourceMAAS(sources.DataSource): id_hash = None _oauth_helper = None + # Setup read_url parameters per get_url_params. + url_max_wait = 120 + url_timeout = 50 + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None @@ -104,7 +108,6 @@ def _set_data(self, url, data): ud, md, vd = data self.userdata_raw = ud self.metadata = md - self.vendordata_pure = vd if vd: try: self.vendordata_raw = sources.convert_vendordata(vd) @@ -117,30 +120,18 @@ def _get_subplatform(self): return "seed-dir (%s)" % self.base_url def wait_for_metadata_service(self, url): - mcfg = self.ds_cfg - max_wait = 120 - try: - max_wait = int(mcfg.get("max_wait", max_wait)) - except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) - - if max_wait == 0: + url_params = self.get_url_params() + if url_params.max_wait_seconds == 0: return False - timeout = 50 - try: - if timeout in mcfg: - timeout = int(mcfg.get("timeout", timeout)) - except Exception: - LOG.warning("Failed to get timeout, using %s", timeout) - - starttime = time.time() - if url.endswith("/"): - url = url[:-1] + starttime = time.monotonic() + url = url.rstrip("/") check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] url, _response = self.oauth_helper.wait_for_url( - urls=urls, max_wait=max_wait, timeout=timeout + urls=urls, + max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, ) if url: @@ -149,7 +140,7 @@ def wait_for_metadata_service(self, url): LOG.critical( "Giving up on md from %s after %i seconds", urls, - int(time.time() - starttime), + int(time.monotonic() - starttime), ) return bool(url) @@ -188,7 +179,7 @@ def get_oauth_helper(cfg): """Return an oauth helper instance for values in cfg. @raises ValueError from OauthUrlHelper if some required fields have - true-ish values but others do not.""" + true-ish values but others do not.""" keys = ("consumer_key", "consumer_secret", "token_key", "token_secret") kwargs = dict([(r, cfg.get(r)) for r in keys]) return url_helper.OauthUrlHelper(**kwargs) diff --git a/cloudinit/sources/DataSourceNWCS.py b/cloudinit/sources/DataSourceNWCS.py index 1ebd6e821..7c89713cb 100644 --- a/cloudinit/sources/DataSourceNWCS.py +++ b/cloudinit/sources/DataSourceNWCS.py @@ -43,6 +43,12 @@ def __init__(self, sys_cfg, distro, paths): self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = sources.UNSET self.dsmode = sources.DSMODE_NETWORK + self.metadata_full = None + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + if not self._network_config: + self._network_config = sources.UNSET def _get_data(self): md = self.get_metadata() @@ -94,13 +100,6 @@ def get_metadata(self): def network_config(self): LOG.debug("Attempting network configuration") - if self._network_config is None: - LOG.warning( - "Found None as cached _network_config, resetting to %s", - sources.UNSET, - ) - self._network_config = sources.UNSET - if self._network_config != sources.UNSET: return self._network_config diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 12b18d5f6..1bb347bf4 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -32,6 +32,8 @@ def __init__(self, sys_cfg, distro, paths): ] self.seed_dir = None self.supported_seed_starts = ("/", "file://") + self._network_config = None + self._network_eni = None def __str__(self): root = sources.DataSource.__str__(self) @@ -65,7 +67,7 @@ def _get_data(self): try: # Parse the system serial label from dmi. If not empty, try parsing - # like the commandline + # like the command line md = {} serial = dmi.read_dmi_data("system-serial-number") if serial and load_cmdline_data(md, serial): @@ -226,9 +228,6 @@ def _pp2d_callback(mp, data): @property def platform_type(self): - # Handle upgrade path of pickled ds - if not hasattr(self, "_platform_type"): - self._platform_type = None if not self._platform_type: self._platform_type = "lxd" if util.is_lxd() else "nocloud" return self._platform_type @@ -312,7 +311,9 @@ def load_cmdline_data(fill, cmdline=None): seedfrom = fill.get("seedfrom") if seedfrom: - if seedfrom.startswith(("http://", "https://")): + if seedfrom.startswith( + ("http://", "https://", "ftp://", "ftps://") + ): fill["dsmode"] = sources.DSMODE_NETWORK elif seedfrom.startswith(("file://", "/")): fill["dsmode"] = sources.DSMODE_LOCAL @@ -389,10 +390,15 @@ def _merge_new_seed(cur, seeded): class DataSourceNoCloudNet(DataSourceNoCloud): def __init__(self, sys_cfg, distro, paths): DataSourceNoCloud.__init__(self, sys_cfg, distro, paths) - self.supported_seed_starts = ("http://", "https://") + self.supported_seed_starts = ( + "http://", + "https://", + "ftp://", + "ftps://", + ) def ds_detect(self): - """Check dmi and kernel commandline for dsname + """Check dmi and kernel command line for dsname NoCloud historically used "nocloud-net" as its dsname for network timeframe (DEP_NETWORK), which supports http(s) urls. @@ -438,3 +444,14 @@ def ds_detect(self): # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + from sys import argv + + logging.basicConfig(level=logging.DEBUG) + seedfrom = argv[1] + md_seed, ud, vd = util.read_seeded(seedfrom) + print(f"seeded: {md_seed}") + print(f"ud: {ud}") + print(f"vd: {vd}") diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index 40e89ae8a..4f774d599 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -32,10 +32,6 @@ def _get_subplatform(self): def get_instance_id(self): return "iid-datasource-none" - @property - def is_disconnected(self): - return True - # Used to match classes to dependencies datasources = [ diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 04cb2629f..e2e2d31a6 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -17,9 +17,11 @@ import logging import os import re -from xml.dom import minidom +from xml.dom import minidom # nosec B408 -from cloudinit import safeyaml, sources, subp, util +import yaml + +from cloudinit import sources, subp, util LOG = logging.getLogger(__name__) @@ -361,7 +363,7 @@ def find_child(node, filter_func): def get_properties(contents): - dom = minidom.parseString(contents) + dom = minidom.parseString(contents) # nosec B318 if dom.documentElement.localName != "Environment": raise XmlError("No Environment Node") @@ -416,4 +418,4 @@ def safeload_yaml_or_dict(data): """ if not data: return {} - return safeyaml.load(data) + return yaml.safe_load(data) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index dd9304731..5ad3b6bc6 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -18,7 +18,8 @@ import os import pwd import re -import string +import shlex +import textwrap from cloudinit import atomic_helper, net, sources, subp, util @@ -27,6 +28,16 @@ DEFAULT_IID = "iid-dsopennebula" DEFAULT_PARSEUSER = "nobody" CONTEXT_DISK_FILES = ["context.sh"] +EXCLUDED_VARS = ( + "EPOCHREALTIME", + "EPOCHSECONDS", + "RANDOM", + "LINENO", + "SECONDS", + "_", + "SRANDOM", + "__v", +) class DataSourceOpenNebula(sources.DataSource): @@ -37,6 +48,7 @@ def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None self.seed_dir = os.path.join(paths.seed_dir, "opennebula") + self.network = None def __str__(self): root = sources.DataSource.__str__(self) @@ -298,109 +310,86 @@ def switch_user_cmd(user): return ["sudo", "-u", user] -def parse_shell_config( - content, keylist=None, bash=None, asuser=None, switch_user_cb=None -): - - if isinstance(bash, str): - bash = [bash] - elif bash is None: - bash = ["bash", "-e"] +def varprinter(): + """print the shell environment variables within delimiters to be parsed""" + return textwrap.dedent( + """ + printf "%s\\0" _start_ + [ $0 != 'sh' ] && set -o posix + set + [ $0 != 'sh' ] && set +o posix + printf "%s\\0" _start_ + """ + ) - if switch_user_cb is None: - switch_user_cb = switch_user_cmd - # allvars expands to all existing variables by using '${!x*}' notation - # where x is lower or upper case letters or '_' - allvars = ["${!%s*}" % x for x in string.ascii_letters + "_"] +def parse_shell_config(content, asuser=None): + """run content and return environment variables which changed - keylist_in = keylist - if keylist is None: - keylist = allvars - keylist_in = [] + WARNING: the special variable _start_ is used to delimit content - setup = "\n".join( - ( - '__v="";', - "", - ) - ) + a context.sh that defines this variable might break in unexpected + ways - def varprinter(vlist): - # output '\0'.join(['_start_', key=value NULL for vars in vlist] - return "\n".join( - ( - 'printf "%s\\0" _start_', - "for __v in %s; do" % " ".join(vlist), - ' printf "%s=%s\\0" "$__v" "${!__v}";', - "done", - "", - ) + compatible with posix shells such as dash and ash and any shell + which supports `set -o posix` + """ + if b"_start_\x00" in content.encode(): + LOG.warning( + "User defined _start_ variable in context.sh, this may break" + "cloud-init in unexpected ways." ) - # the rendered 'bcmd' is bash syntax that does + # the rendered 'bcmd' does: + # # setup: declare variables we use (so they show up in 'all') # varprinter(allvars): print all variables known at beginning # content: execute the provided content # varprinter(keylist): print all variables known after content # - # output is then a null terminated array of: - # literal '_start_' - # key=value (for each preset variable) - # literal '_start_' - # key=value (for each post set variable) + # output is then a newline terminated array of: + # [0] unwanted content before first _start_ + # [1] key=value (for each preset variable) + # [2] unwanted content between second and third _start_ + # [3] key=value (for each post set variable) bcmd = ( - "unset IFS\n" - + setup - + varprinter(allvars) + varprinter() + "{\n%s\n\n:\n} > /dev/null\n" % content - + "unset IFS\n" - + varprinter(keylist) + + varprinter() + "\n" ) cmd = [] if asuser is not None: - cmd = switch_user_cb(asuser) - - cmd.extend(bash) - - (output, _error) = subp.subp(cmd, data=bcmd) - - # exclude vars in bash that change on their own or that we used - excluded = ( - "EPOCHREALTIME", - "EPOCHSECONDS", - "RANDOM", - "LINENO", - "SECONDS", - "_", - "SRANDOM", - "__v", - ) - preset = {} + cmd = switch_user_cmd(asuser) + cmd.extend(["sh", "-e"]) + + output = subp.subp(cmd, data=bcmd).stdout + + # exclude vars that change on their own or that we used ret = {} - target = None - output = output[0:-1] # remove trailing null - # go through output. First _start_ is for 'preset', second for 'target'. # Add to ret only things were changed and not in excluded. - for line in output.split("\x00"): - try: - (key, val) = line.split("=", 1) - if target is preset: - preset[key] = val - elif key not in excluded and ( - key in keylist_in or preset.get(key) != val - ): - ret[key] = val - except ValueError: - if line != "_start_": - raise - if target is None: - target = preset - elif target is preset: - target = ret + # skip all content before initial _start_\x00 pair + sections = output.split("_start_\x00")[1:] + + # store env variables prior to content run + # skip all content before second _start\x00 pair + # store env variables prior to content run + before, after = sections[0], sections[2] + + pre_env = dict( + variable.split("=", maxsplit=1) for variable in shlex.split(before) + ) + post_env = dict( + variable.split("=", maxsplit=1) for variable in shlex.split(after) + ) + for key in set(pre_env.keys()).union(set(post_env.keys())): + if key in EXCLUDED_VARS: + continue + value = post_env.get(key) + if value is not None and value != pre_env.get(key): + ret[key] = value return ret diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index ef407bd31..2c8a2b7bb 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -17,7 +17,6 @@ LOG = logging.getLogger(__name__) # Various defaults/constants... -DEF_MD_URLS = ["http://[fe80::a9fe:a9fe]", "http://169.254.169.254"] DEFAULT_IID = "iid-dsopenstack" DEFAULT_METADATA = { "instance-id": DEFAULT_IID, @@ -73,6 +72,12 @@ def __str__(self): return mstr def wait_for_metadata_service(self): + DEF_MD_URLS = [ + "http://[fe80::a9fe:a9fe%25{iface}]".format( + iface=self.distro.fallback_interface + ), + "http://169.254.169.254", + ] urls = self.ds_cfg.get("metadata_urls", DEF_MD_URLS) filtered = [x for x in urls if util.is_resolvable_url(x)] if set(filtered) != set(urls): @@ -94,7 +99,7 @@ def wait_for_metadata_service(self): url2base[md_url] = url url_params = self.get_url_params() - start_time = time.time() + start_time = time.monotonic() avail_url, _response = url_helper.wait_for_url( urls=md_urls, max_wait=url_params.max_wait_seconds, @@ -107,7 +112,7 @@ def wait_for_metadata_service(self): LOG.debug( "Giving up on OpenStack md from %s after %s seconds", md_urls, - int(time.time() - start_time), + int(time.monotonic() - start_time), ) self.metadata_address = url2base.get(avail_url) @@ -184,7 +189,6 @@ def _get_data(self): self.files.update(results.get("files", {})) vd = results.get("vendordata") - self.vendordata_pure = vd try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: @@ -192,7 +196,6 @@ def _get_data(self): self.vendordata_raw = None vd2 = results.get("vendordata2") - self.vendordata2_pure = vd2 try: self.vendordata2_raw = sources.convert_vendordata(vd2) except ValueError as e: diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 1885f9762..8369487bd 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -116,7 +116,6 @@ class DataSourceOracle(sources.DataSource): dsname = "Oracle" system_uuid = None - vendordata_pure = None network_config_sources: Tuple[sources.NetworkConfigSource, ...] = ( sources.NetworkConfigSource.CMD_LINE, sources.NetworkConfigSource.SYSTEM_CFG, @@ -124,7 +123,6 @@ class DataSourceOracle(sources.DataSource): sources.NetworkConfigSource.INITRAMFS, ) - _network_config: dict = {"config": [], "version": 1} perform_dhcp_setup = True # Careful...these can be overridden in __init__ @@ -142,6 +140,7 @@ def __init__(self, sys_cfg, *args, **kwargs): ] ) self._network_config_source = KlibcOracleNetworkConfigSource() + self._network_config: dict = {"config": [], "version": 1} url_params = self.get_url_params() self.url_max_wait = url_params.max_wait_seconds @@ -157,6 +156,8 @@ def _unpickle(self, ci_pkl_version: int) -> None: "_network_config_source", KlibcOracleNetworkConfigSource(), ) + if not hasattr(self, "_network_config"): + self._network_config = {"config": [], "version": 1} def _has_network_config(self) -> bool: return bool(self._network_config.get("config", [])) @@ -408,7 +409,7 @@ def read_opc_metadata( METADATA_PATTERN.format(version=2, path="instance"), METADATA_PATTERN.format(version=1, path="instance"), ] - start_time = time.time() + start_time = time.monotonic() instance_url, instance_response = wait_for_url( urls, max_wait=max_wait, @@ -430,7 +431,7 @@ def read_opc_metadata( # like a worthwhile tradeoff rather than having incomplete metadata. vnics_url, vnics_response = wait_for_url( [METADATA_PATTERN.format(version=metadata_version, path="vnics")], - max_wait=max_wait - (time.time() - start_time), + max_wait=max_wait - (time.monotonic() - start_time), timeout=timeout, headers_cb=_headers_cb, sleep_time=0, diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py index 6729e8460..2fba1149d 100644 --- a/cloudinit/sources/DataSourceRbxCloud.py +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -220,6 +220,8 @@ class DataSourceRbxCloud(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None + self.gratuitous_arp = None + self.cfg = None def __str__(self): root = sources.DataSource.__str__(self) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 5ebe13fbd..05b50b98e 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -173,6 +173,7 @@ def __init__(self, sys_cfg, distro, paths): self.max_wait = int(self.ds_cfg.get("max_wait", DEF_MD_MAX_WAIT)) self._network_config = sources.UNSET self.metadata_urls = DS_BASE_URLS + self.metadata_url = None self.userdata_url = None self.vendordata_url = None self.ephemeral_fixed_address = None @@ -199,7 +200,7 @@ def _set_metadata_url(self, urls): Define metadata_url based upon api-metadata URL availability. """ - start_time = time.time() + start_time = time.monotonic() avail_url, _ = url_helper.wait_for_url( urls=urls, max_wait=self.max_wait, @@ -216,7 +217,7 @@ def _set_metadata_url(self, urls): LOG.debug( "Unable to reach api-metadata at %s after %s seconds", urls, - int(time.time() - start_time), + int(time.monotonic() - start_time), ) raise ConnectionError diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 7c526a160..8d6b8c0a2 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -190,6 +190,7 @@ def __init__(self, sys_cfg, distro, paths): self.metadata = {} self.network_data = None self._network_config = None + self.routes_data = None self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) @@ -423,7 +424,7 @@ def _get_value_from_frame(self, expected_request_id, frame): return value def _readline(self): - """ + r""" Reads a line a byte at a time until \n is encountered. Returns an ascii string with the trailing newline removed. @@ -643,7 +644,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient): b.) base64_all: string interpreted as a boolean that indicates if all keys are base64 encoded. c.) set a key named b64- with a boolean indicating that - is base64 encoded.""" + is base64 encoded.""" def __init__(self, device, timeout=10, smartos_type=None): s = super(JoyentMetadataLegacySerialClient, self) diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py index cb47ca7df..c69678378 100644 --- a/cloudinit/sources/DataSourceUpCloud.py +++ b/cloudinit/sources/DataSourceUpCloud.py @@ -46,6 +46,7 @@ def __init__(self, sys_cfg, distro, paths): self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT) self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = None + self.metadata_full = None def _get_sysinfo(self): return uc_helper.read_sysinfo() diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py index 77a2de6cb..888060c9b 100644 --- a/cloudinit/sources/DataSourceVMware.py +++ b/cloudinit/sources/DataSourceVMware.py @@ -16,51 +16,6 @@ * EnvVars * GuestInfo * IMC (Guest Customization) - -Netifaces (https://github.com/al45tair/netifaces) - - Please note this module relies on the netifaces project to introspect the - runtime, network configuration of the host on which this datasource is - running. This is in contrast to the rest of cloud-init which uses the - cloudinit/netinfo module. - - The reasons for using netifaces include: - - * Netifaces is built in C and is more portable across multiple systems - and more deterministic than shell exec'ing local network commands and - parsing their output. - - * Netifaces provides a stable way to determine the view of the host's - network after DHCP has brought the network online. Unlike most other - datasources, this datasource still provides support for JINJA queries - based on networking information even when the network is based on a - DHCP lease. While this does not tie this datasource directly to - netifaces, it does mean the ability to consistently obtain the - correct information is paramount. - - * It is currently possible to execute this datasource on macOS - (which many developers use today) to print the output of the - get_host_info function. This function calls netifaces to obtain - the same runtime network configuration that the datasource would - persist to the local system's instance data. - - However, the netinfo module fails on macOS. The result is either a - hung operation that requires a SIGINT to return control to the user, - or, if brew is used to install iproute2mac, the ip commands are used - but produce output the netinfo module is unable to parse. - - While macOS is not a target of cloud-init, this feature is quite - useful when working on this datasource. - - For more information about this behavior, please see the following - PR comment, https://bit.ly/3fG7OVh. - - The authors of this datasource are not opposed to moving away from - netifaces. The goal may be to eventually do just that. This proviso was - added to the top of this module as a way to remind future-us and others - why netifaces was used in the first place in order to either smooth the - transition away from netifaces or embrace it further up the cloud-init - stack. """ import collections @@ -72,9 +27,7 @@ import socket import time -import netifaces - -from cloudinit import atomic_helper, dmi, log, net, sources, util +from cloudinit import atomic_helper, dmi, log, net, netinfo, sources, util from cloudinit.sources.helpers.vmware.imc import guestcust_util from cloudinit.subp import ProcessExecutionError, subp, which @@ -223,7 +176,7 @@ def _get_data(self): break if not self.data_access_method: - LOG.error("failed to find a valid data access method") + LOG.debug("failed to find a valid data access method") return False LOG.info("using data access method %s", self._get_subplatform()) @@ -317,6 +270,18 @@ def get_instance_id(self): self.metadata["instance-id"] = str(id_file.read()).rstrip().lower() return self.metadata["instance-id"] + def check_if_fallback_is_allowed(self): + if ( + self.data_access_method + and self.data_access_method == DATA_ACCESS_METHOD_IMC + and is_vmware_platform() + ): + LOG.debug( + "Cache fallback is allowed for : %s", self._get_subplatform() + ) + return True + return False + def get_public_ssh_keys(self): for key_name in ( "public-keys-data", @@ -802,91 +767,64 @@ def get_default_ip_addrs(): addresses associated with the device used by the default route for a given address. """ - # TODO(promote and use netifaces in cloudinit.net* modules) - gateways = netifaces.gateways() - if "default" not in gateways: - return None, None - - default_gw = gateways["default"] - if ( - netifaces.AF_INET not in default_gw - and netifaces.AF_INET6 not in default_gw - ): - return None, None + # Get ipv4 and ipv6 interfaces associated with default routes + ipv4_if = None + ipv6_if = None + routes = netinfo.route_info() + for route in routes["ipv4"]: + if route["destination"] == "0.0.0.0": + ipv4_if = route["iface"] + break + for route in routes["ipv6"]: + if route["destination"] == "::/0": + ipv6_if = route["iface"] + break + + # Get ip address associated with default interface ipv4 = None ipv6 = None - - gw4 = default_gw.get(netifaces.AF_INET) - if gw4: - _, dev4 = gw4 - addr4_fams = netifaces.ifaddresses(dev4) - if addr4_fams: - af_inet4 = addr4_fams.get(netifaces.AF_INET) - if af_inet4: - if len(af_inet4) > 1: - LOG.debug( - "device %s has more than one ipv4 address: %s", - dev4, - af_inet4, - ) - elif "addr" in af_inet4[0]: - ipv4 = af_inet4[0]["addr"] - - # Try to get the default IPv6 address by first seeing if there is a default - # IPv6 route. - gw6 = default_gw.get(netifaces.AF_INET6) - if gw6: - _, dev6 = gw6 - addr6_fams = netifaces.ifaddresses(dev6) - if addr6_fams: - af_inet6 = addr6_fams.get(netifaces.AF_INET6) - if af_inet6: - if len(af_inet6) > 1: - LOG.debug( - "device %s has more than one ipv6 address: %s", - dev6, - af_inet6, - ) - elif "addr" in af_inet6[0]: - ipv6 = af_inet6[0]["addr"] + netdev = netinfo.netdev_info() + if ipv4_if in netdev: + addrs = netdev[ipv4_if]["ipv4"] + if len(addrs) > 1: + LOG.debug( + "device %s has more than one ipv4 address: %s", ipv4_if, addrs + ) + elif len(addrs) == 1 and "ip" in addrs[0]: + ipv4 = addrs[0]["ip"] + if ipv6_if in netdev: + addrs = netdev[ipv6_if]["ipv6"] + if len(addrs) > 1: + LOG.debug( + "device %s has more than one ipv6 address: %s", ipv6_if, addrs + ) + elif len(addrs) == 1 and "ip" in addrs[0]: + ipv6 = addrs[0]["ip"] # If there is a default IPv4 address but not IPv6, then see if there is a # single IPv6 address associated with the same device associated with the # default IPv4 address. - if ipv4 and not ipv6: - af_inet6 = addr4_fams.get(netifaces.AF_INET6) - if af_inet6: - if len(af_inet6) > 1: - LOG.debug( - "device %s has more than one ipv6 address: %s", - dev4, - af_inet6, - ) - elif "addr" in af_inet6[0]: - ipv6 = af_inet6[0]["addr"] + if ipv4 is not None and ipv6 is None: + for dev_name in netdev: + for addr in netdev[dev_name]["ipv4"]: + if addr["ip"] == ipv4 and len(netdev[dev_name]["ipv6"]) == 1: + ipv6 = netdev[dev_name]["ipv6"][0]["ip"] + break # If there is a default IPv6 address but not IPv4, then see if there is a # single IPv4 address associated with the same device associated with the # default IPv6 address. - if not ipv4 and ipv6: - af_inet4 = addr6_fams.get(netifaces.AF_INET) - if af_inet4: - if len(af_inet4) > 1: - LOG.debug( - "device %s has more than one ipv4 address: %s", - dev6, - af_inet4, - ) - elif "addr" in af_inet4[0]: - ipv4 = af_inet4[0]["addr"] + if ipv4 is None and ipv6 is not None: + for dev_name in netdev: + for addr in netdev[dev_name]["ipv6"]: + if addr["ip"] == ipv6 and len(netdev[dev_name]["ipv4"]) == 1: + ipv4 = netdev[dev_name]["ipv4"][0]["ip"] + break return ipv4, ipv6 -# patched socket.getfqdn() - see https://bugs.python.org/issue5004 - - def getfqdn(name=""): """Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host. @@ -921,11 +859,63 @@ def is_valid_ip_addr(val): ) +def convert_to_netifaces_ipv4_format(addr: dict) -> dict: + """ + Takes a cloudinit.netinfo formatted address and converts to netifaces + format, since this module was originally written with netifaces as the + network introspection module. + netifaces ipv4 format: + { + "broadcast": "10.15.255.255", + "netmask": "255.240.0.0", + "addr": "10.0.1.4" + } + cloudinit.netinfo ipv4 format: + { + "ip": "10.0.1.4", + "mask": "255.240.0.0", + "bcast": "10.15.255.255", + "scope": "global", + } + """ + if not addr.get("ip"): + return {} + return { + "broadcast": addr.get("bcast"), + "netmask": addr.get("mask"), + "addr": addr.get("ip"), + } + + +def convert_to_netifaces_ipv6_format(addr: dict) -> dict: + """ + Takes a cloudinit.netinfo formatted address and converts to netifaces + format, since this module was originally written with netifaces as the + network introspection module. + netifaces ipv6 format: + { + "netmask": "ffff:ffff:ffff:ffff::/64", + "addr": "2001:db8:abcd:1234::1" + } + cloudinit.netinfo ipv6 format: + { + "ip": "2001:db8:abcd:1234::1/64", + "scope6": "global", + } + """ + if not addr.get("ip"): + return {} + ipv6 = ipaddress.IPv6Interface(addr.get("ip")) + return { + "netmask": f"{ipv6.netmask}/{ipv6.network.prefixlen}", + "addr": str(ipv6.ip), + } + + def get_host_info(): """ Returns host information such as the host name and network interfaces. """ - # TODO(look to promote netifices use up in cloud-init netinfo funcs) host_info = { "network": { "interfaces": { @@ -951,16 +941,16 @@ def get_host_info(): by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"] by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"] - ifaces = netifaces.interfaces() + ifaces = netinfo.netdev_info() for dev_name in ifaces: - addr_fams = netifaces.ifaddresses(dev_name) - af_link = addr_fams.get(netifaces.AF_LINK) - af_inet4 = addr_fams.get(netifaces.AF_INET) - af_inet6 = addr_fams.get(netifaces.AF_INET6) - - mac = None - if af_link and "addr" in af_link[0]: - mac = af_link[0]["addr"] + af_inet4 = [] + af_inet6 = [] + for addr in ifaces[dev_name]["ipv4"]: + af_inet4.append(convert_to_netifaces_ipv4_format(addr)) + for addr in ifaces[dev_name]["ipv6"]: + af_inet6.append(convert_to_netifaces_ipv6_format(addr)) + + mac = ifaces[dev_name].get("hwaddr") # Do not bother recording localhost if mac == "00:00:00:00:00:00": diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py index d03ae5882..2d7f1f31a 100644 --- a/cloudinit/sources/DataSourceVultr.py +++ b/cloudinit/sources/DataSourceVultr.py @@ -42,6 +42,7 @@ def __init__(self, sys_cfg, distro, paths): BUILTIN_DS_CONFIG, ] ) + self.netcfg = None @staticmethod def ds_detect(): diff --git a/cloudinit/sources/DataSourceWSL.py b/cloudinit/sources/DataSourceWSL.py index 0fccf61f7..b81298927 100644 --- a/cloudinit/sources/DataSourceWSL.py +++ b/cloudinit/sources/DataSourceWSL.py @@ -3,41 +3,40 @@ # Author: Carlos Nihelton # # This file is part of cloud-init. See LICENSE file for license information. -""" Datasource to support the Windows Subsystem for Linux platform. """ +"""Datasource to support the Windows Subsystem for Linux platform.""" import logging import os +import typing from pathlib import PurePath -from typing import List, cast +from typing import Any, List, Optional, Tuple, Union, cast + +import yaml from cloudinit import sources, subp, util +from cloudinit.distros import Distro +from cloudinit.helpers import Paths LOG = logging.getLogger(__name__) WSLPATH_CMD = "/usr/bin/wslpath" - -def wsl_path_2_win(path: str) -> PurePath: - """ - Translates a path inside the current WSL instance's filesystem to a - Windows accessible path. - - Example: - # Running under an instance named "CoolInstance" - root = wslpath2win("/") # root == "//wsl.localhost/CoolInstance/" - - :param path: string representing a Linux path, whether existing or not. - """ - out, _ = subp.subp([WSLPATH_CMD, "-am", path]) - return PurePath(out.rstrip()) +DEFAULT_INSTANCE_ID = "iid-datasource-wsl" +LANDSCAPE_DATA_FILE = "%s.user-data" +AGENT_DATA_FILE = "agent.yaml" def instance_name() -> str: """ Returns the name of the current WSL instance as seen from outside. """ - root_net_path = wsl_path_2_win("/") - return root_net_path.name + # Translates a path inside the current WSL instance's filesystem to a + # Windows accessible path. + # Example: + # Running under an instance named "CoolInstance" + # WSLPATH_CMD -am "/" == "//wsl.localhost/CoolInstance/" + root_net_path, _ = subp.subp([WSLPATH_CMD, "-am", "/"]) + return PurePath(root_net_path.rstrip()).name def mounted_win_drives() -> List[str]: @@ -56,26 +55,6 @@ def mounted_win_drives() -> List[str]: return mounted -def win_path_2_wsl(path: str) -> PurePath: - """ - Returns a translation of a Windows path to a Linux path that can be - accessed inside the current instance filesystem. - - It requires the Windows drive mounting feature to be enabled and the - disk drive must be muonted for this to succeed. - - Example: - # Assuming Windows drives are mounted under /mnt/ and "S:" doesn't exist: - p = winpath2wsl("C:\\ProgramData") # p == "/mnt/c/ProgramData/" - n = winpath2wsl("S:\\CoolFolder") # Exception! S: is not mounted. - - :param path: string representing a Windows path. The root drive must exist, - although the path is not required to. - """ - out, _ = subp.subp([WSLPATH_CMD, "-au", path]) - return PurePath(out.rstrip()) - - def cmd_executable() -> PurePath: """ Returns the Linux path to the Windows host's cmd.exe. @@ -100,10 +79,13 @@ def cmd_executable() -> PurePath: ) -def cloud_init_data_dir() -> PurePath: +def find_home() -> PurePath: """ - Returns the Windows user profile directory translated as a Linux path - accessible inside the current WSL instance. + Finds the user's home directory path as a WSL path. + + raises: IOError when no mountpoint with cmd.exe is found + ProcessExecutionError when either cmd.exe is unable to retrieve + the user's home directory """ cmd = cmd_executable() @@ -117,11 +99,26 @@ def cloud_init_data_dir() -> PurePath: raise subp.ProcessExecutionError( "No output from cmd.exe to show the user profile dir." ) + # Returns a translation of a Windows path to a Linux path that can be + # accessed inside the current instance filesystem. + # Example: + # Assuming Windows drives are mounted under /mnt/ and "S:" doesn't exist: + # WSLPATH_CMD -au "C:\\ProgramData" == "/mnt/c/ProgramData/" + # WSLPATH_CMD -au "S:\\Something" # raises exception S: is not mounted. + out, _ = subp.subp([WSLPATH_CMD, "-au", home]) + return PurePath(out.rstrip()) + - win_profile_dir = win_path_2_wsl(home) - seed_dir = os.path.join(win_profile_dir, ".cloud-init") +def cloud_init_data_dir(user_home: PurePath) -> Optional[PurePath]: + """ + Returns the Windows user profile .cloud-init directory translated as a + Linux path accessible inside the current WSL instance, or None if not + found. + """ + seed_dir = os.path.join(user_home, ".cloud-init") if not os.path.isdir(seed_dir): - raise FileNotFoundError("%s directory doesn't exist." % seed_dir) + LOG.debug("cloud-init user data dir %s doesn't exist.", seed_dir) + return None return PurePath(seed_dir) @@ -146,18 +143,38 @@ def candidate_user_data_file_names(instance_name) -> List[str]: ] -DEFAULT_INSTANCE_ID = "iid-datasource-wsl" +def load_yaml_or_bin(data_path: str) -> Optional[Union[dict, bytes]]: + """ + Tries to load a YAML file as a dict, otherwise returns the file's raw + binary contents as `bytes`. Returns `None` if no file is found. + """ + try: + bin_data = util.load_binary_file(data_path) + dict_data = util.load_yaml(bin_data) + if dict_data is None: + return bin_data + return dict_data + except FileNotFoundError: + LOG.debug("No data found at %s, ignoring.", data_path) -def load_instance_metadata(cloudinitdir: PurePath, instance_name: str) -> dict: + return None + + +def load_instance_metadata( + cloudinitdir: Optional[PurePath], instance_name: str +) -> dict: """ Returns the relevant metadata loaded from cloudinit dir based on the instance name """ metadata = {"instance-id": DEFAULT_INSTANCE_ID} + if cloudinitdir is None: + return metadata metadata_path = os.path.join( cloudinitdir.as_posix(), "%s.meta-data" % instance_name ) + try: metadata = util.load_yaml(util.load_binary_file(metadata_path)) except FileNotFoundError: @@ -177,9 +194,31 @@ def load_instance_metadata(cloudinitdir: PurePath, instance_name: str) -> dict: return metadata +def load_ubuntu_pro_data( + user_home: PurePath, +) -> Tuple[Union[dict, bytes, None], Union[dict, bytes, None]]: + """ + Read .ubuntupro user-data if present and return a tuple of agent and + landscape user-data. + """ + pro_dir = os.path.join(user_home, ".ubuntupro/.cloud-init") + if not os.path.isdir(pro_dir): + return None, None + + landscape_data = load_yaml_or_bin( + os.path.join(pro_dir, LANDSCAPE_DATA_FILE % instance_name()) + ) + agent_data = load_yaml_or_bin(os.path.join(pro_dir, AGENT_DATA_FILE)) + return agent_data, landscape_data + + class DataSourceWSL(sources.DataSource): dsname = "WSL" + def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): + super().__init__(sys_cfg, distro, paths, ud_proc) + self.instance_name = "" + def find_user_data_file(self, seed_dir: PurePath) -> PurePath: """ Finds the most precendent of the candidate files that may contain @@ -218,9 +257,8 @@ def check_instance_id(self, sys_cfg) -> bool: return False try: - metadata = load_instance_metadata( - cloud_init_data_dir(), self.instance_name - ) + data_dir = cloud_init_data_dir(find_home()) + metadata = load_instance_metadata(data_dir, instance_name()) return current == metadata.get("instance-id") except (IOError, ValueError) as err: @@ -231,24 +269,86 @@ def check_instance_id(self, sys_cfg) -> bool: return False def _get_data(self) -> bool: - self.vendordata_raw = None - seed_dir = cloud_init_data_dir() + if not subp.which(WSLPATH_CMD): + LOG.debug( + "No WSL command %s found. Cannot detect WSL datasource", + WSLPATH_CMD, + ) + return False self.instance_name = instance_name() + try: + user_home = find_home() + except IOError as e: + LOG.debug("Unable to detect WSL datasource: %s", e) + return False + + seed_dir = cloud_init_data_dir(user_home) + agent_data = None + user_data: Optional[Union[dict, bytes]] = None + + # Load any metadata try: self.metadata = load_instance_metadata( seed_dir, self.instance_name ) - file = self.find_user_data_file(seed_dir) - self.userdata_raw = cast( - str, util.load_binary_file(file.as_posix()) - ) - return True + except (ValueError, IOError) as err: + LOG.error("Unable to load metadata: %s", str(err)) + return False + + # # Load Ubuntu Pro configs only on Ubuntu distros + if self.distro.name == "ubuntu": + agent_data, user_data = load_ubuntu_pro_data(user_home) + # Load regular user configs + try: + if user_data is None and seed_dir is not None: + file = self.find_user_data_file(seed_dir) + user_data = load_yaml_or_bin(file.as_posix()) except (ValueError, IOError) as err: - LOG.error("Unable to setup WSL datasource: %s", str(err)) + LOG.error( + "Unable to load any user-data file in %s: %s", + seed_dir, + str(err), + ) + + # No configs were found + if not any([user_data, agent_data]): return False + # If we cannot reliably model data files as dicts, then we cannot merge + # ourselves, so we can pass the data in ascending order as a list for + # cloud-init to handle internally + if isinstance(agent_data, bytes) or isinstance(user_data, bytes): + self.userdata_raw = cast(Any, [user_data, agent_data]) + return True + + # We only care about overriding modules entirely, so we can just + # iterate over the top level keys and write over them if the agent + # provides them instead. + # That's the reason for not using util.mergemanydict(). + merged: dict = {} + overridden_keys: typing.List[str] = [] + if user_data: + merged = user_data + if agent_data: + if user_data: + LOG.debug("Merging both user_data and agent.yaml configs.") + for key in agent_data: + if key in merged: + overridden_keys.append(key) + merged[key] = agent_data[key] + if overridden_keys: + LOG.debug( + ( + " agent.yaml overrides config keys: " + ", ".join(overridden_keys) + ) + ) + + self.userdata_raw = "#cloud-config\n%s" % yaml.dump(merged) + return True + # Used to match classes to dependencies datasources = [ diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 4ea1fc561..27c37ee1e 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -17,7 +17,7 @@ import re from collections import namedtuple from enum import Enum, unique -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union from cloudinit import atomic_helper, dmi, importer, net, type_utils from cloudinit import user_data as ud @@ -83,6 +83,16 @@ def __str__(self) -> str: return self.value +class NicOrder(Enum): + """Represents ways to sort NICs""" + + MAC = "mac" + NIC_NAME = "nic_name" + + def __str__(self) -> str: + return self.value + + class DatasourceUnpickleUserDataError(Exception): """Raised when userdata is unable to be unpickled due to python upgrades""" @@ -195,6 +205,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): # - seed-dir () _subplatform = None + _crawled_metadata: Optional[Union[Dict, str]] = None + # The network configuration sources that should be considered for this data # source. (The first source in this list that provides network # configuration will be used without considering any that follow.) This @@ -305,13 +317,16 @@ def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro self.paths = paths - self.userdata = None + self.userdata: Optional[Any] = None self.metadata: dict = {} self.userdata_raw: Optional[str] = None self.vendordata = None self.vendordata2 = None self.vendordata_raw = None self.vendordata2_raw = None + self.metadata_address = None + self.network_json = UNSET + self.ec2_metadata = UNSET self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {} @@ -326,12 +341,24 @@ def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): def _unpickle(self, ci_pkl_version: int) -> None: """Perform deserialization fixes for Paths.""" - if not hasattr(self, "vendordata2"): - self.vendordata2 = None - if not hasattr(self, "vendordata2_raw"): - self.vendordata2_raw = None - if not hasattr(self, "skip_hotplug_detect"): - self.skip_hotplug_detect = False + expected_attrs = { + "_crawled_metadata": None, + "_platform_type": None, + "_subplatform": None, + "ec2_metadata": UNSET, + "extra_hotplug_udev_rules": None, + "metadata_address": None, + "network_json": UNSET, + "skip_hotplug_detect": False, + "vendordata2": None, + "vendordata2_raw": None, + } + for key, value in expected_attrs.items(): + if not hasattr(self, key): + setattr(self, key, value) + + if not hasattr(self, "check_if_fallback_is_allowed"): + setattr(self, "check_if_fallback_is_allowed", lambda: False) if hasattr(self, "userdata") and self.userdata is not None: # If userdata stores MIME data, on < python3.6 it will be # missing the 'policy' attribute that exists on >=python3.6. @@ -347,8 +374,6 @@ def _unpickle(self, ci_pkl_version: int) -> None: e, ) raise DatasourceUnpickleUserDataError() from e - if not hasattr(self, "extra_hotplug_udev_rules"): - self.extra_hotplug_udev_rules = None def __str__(self): return type_utils.obj_name(self) @@ -360,7 +385,7 @@ def ds_detect(self) -> bool: def override_ds_detect(self) -> bool: """Override if either: - only a single datasource defined (nothing to fall back to) - - commandline argument is used (ci.ds=OpenStack) + - command line argument is used (ci.ds=OpenStack) Note: get_cmdline() is required for the general case - when ds-identify does not run, _something_ needs to detect the kernel command line @@ -368,7 +393,7 @@ def override_ds_detect(self) -> bool: """ if self.dsname.lower() == parse_cmdline().lower(): LOG.debug( - "Machine is configured by the kernel commandline to run on " + "Machine is configured by the kernel command line to run on " "single datasource %s.", self, ) @@ -458,6 +483,12 @@ def get_data(self) -> bool: """ self._dirty_cache = True return_value = self._check_and_get_data() + # TODO: verify that datasource types are what they are expected to be + # each datasource uses different logic to get userdata, metadata, etc + # and then the rest of the codebase assumes the types of this data + # it would be prudent to have a type check here that warns, when the + # datatype is incorrect, rather than assuming types and throwing + # exceptions later if/when they get used incorrectly. if not return_value: return return_value self.persist_instance_data() @@ -476,25 +507,19 @@ def persist_instance_data(self, write_cache=True): """ if write_cache and os.path.lexists(self.paths.instance_link): pkl_store(self, self.paths.get_ipath_cur("obj_pkl")) - if hasattr(self, "_crawled_metadata"): + if self._crawled_metadata is not None: # Any datasource with _crawled_metadata will best represent # most recent, 'raw' metadata - crawled_metadata = copy.deepcopy( - getattr(self, "_crawled_metadata") - ) + crawled_metadata = copy.deepcopy(self._crawled_metadata) crawled_metadata.pop("user-data", None) crawled_metadata.pop("vendor-data", None) instance_data = {"ds": crawled_metadata} else: instance_data = {"ds": {"meta_data": self.metadata}} - if hasattr(self, "network_json"): - network_json = getattr(self, "network_json") - if network_json != UNSET: - instance_data["ds"]["network_json"] = network_json - if hasattr(self, "ec2_metadata"): - ec2_metadata = getattr(self, "ec2_metadata") - if ec2_metadata != UNSET: - instance_data["ds"]["ec2_metadata"] = ec2_metadata + if self.network_json != UNSET: + instance_data["ds"]["network_json"] = self.network_json + if self.ec2_metadata != UNSET: + instance_data["ds"]["ec2_metadata"] = self.ec2_metadata instance_data["ds"]["_doc"] = EXPERIMENTAL_TEXT # Add merged cloud.cfg and sys info for jinja templates and cli query instance_data["merged_cfg"] = copy.deepcopy(self.sys_cfg) @@ -631,9 +656,6 @@ def get_vendordata2(self): @property def platform_type(self): - if not hasattr(self, "_platform_type"): - # Handle upgrade path where pickled datasource has no _platform. - self._platform_type = self.dsname.lower() if not self._platform_type: self._platform_type = self.dsname.lower() return self._platform_type @@ -650,17 +672,14 @@ def subplatform(self): nocloud: seed-dir (/seed/dir/path) lxd: nocloud (/seed/dir/path) """ - if not hasattr(self, "_subplatform"): - # Handle upgrade path where pickled datasource has no _platform. - self._subplatform = self._get_subplatform() if not self._subplatform: self._subplatform = self._get_subplatform() return self._subplatform def _get_subplatform(self): """Subclasses should implement to return a "slug (detail)" string.""" - if hasattr(self, "metadata_address"): - return "metadata (%s)" % getattr(self, "metadata_address") + if self.metadata_address: + return f"metadata ({self.metadata_address})" return METADATA_UNKNOWN @property @@ -712,10 +731,6 @@ def _filter_xdata(self, processed_ud): new_ud = f.apply(new_ud) return new_ud - @property - def is_disconnected(self): - return False - def get_userdata_raw(self): return self.userdata_raw @@ -925,6 +940,16 @@ def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still return False + def check_if_fallback_is_allowed(self): + """check_if_fallback_is_allowed() + Checks if a cached ds is allowed to be restored when no valid ds is + found in local mode by checking instance-id and searching valid data + through ds list. + + @return True if a ds allows fallback, False otherwise. + """ + return False + @staticmethod def _determine_dsmode(candidates, default=None, valid=None): # return the first candidate that is non None, warn if not valid @@ -1208,7 +1233,7 @@ def parse_cmdline_or_dmi(input: str) -> str: dsname = deprecated.group(1).strip() util.deprecate( deprecated=( - f"Defining the datasource on the commandline using " + f"Defining the datasource on the command line using " f"ci.ds={dsname} or " f"ci.datasource={dsname}" ), diff --git a/cloudinit/sources/azure/errors.py b/cloudinit/sources/azure/errors.py index 299a21fdd..851a9b6f9 100644 --- a/cloudinit/sources/azure/errors.py +++ b/cloudinit/sources/azure/errors.py @@ -9,7 +9,7 @@ from datetime import datetime from io import StringIO from typing import Any, Dict, List, Optional, Tuple -from xml.etree import ElementTree +from xml.etree import ElementTree # nosec B405 import requests diff --git a/cloudinit/sources/azure/imds.py b/cloudinit/sources/azure/imds.py index 0487275ba..4f9ec2339 100644 --- a/cloudinit/sources/azure/imds.py +++ b/cloudinit/sources/azure/imds.py @@ -3,7 +3,8 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging -from time import time +import uuid +from time import monotonic from typing import Dict, Optional, Type, Union import requests @@ -17,13 +18,20 @@ IMDS_URL = "http://169.254.169.254/metadata" +def headers_cb(_url): + return { + "Metadata": "true", + "x-ms-client-request-id": str(uuid.uuid4()), + } + + class ReadUrlRetryHandler: """Manager for readurl retry behavior using exception_callback(). :param logging_backoff: Backoff to limit logging. :param max_connection_errors: Number of connection errors to retry on. :param retry_codes: Set of http codes to retry on. - :param retry_deadline: Optional time()-based deadline to retry until. + :param retry_deadline: Optional monotonic()-based deadline to retry until. """ def __init__( @@ -58,7 +66,10 @@ def exception_callback(self, req_args, exception) -> bool: return False log = True - if self.retry_deadline is not None and time() >= self.retry_deadline: + if ( + self.retry_deadline is not None + and monotonic() >= self.retry_deadline + ): retry = False else: retry = True @@ -129,7 +140,7 @@ def _fetch_url( response = readurl( url, exception_cb=retry_handler.exception_callback, - headers={"Metadata": "true"}, + headers_cb=headers_cb, infinite=True, log_req_resp=log_response, timeout=timeout, @@ -221,7 +232,7 @@ def fetch_reprovision_data() -> bytes: response = readurl( url, exception_cb=handler.exception_callback, - headers={"Metadata": "true"}, + headers_cb=headers_cb, infinite=True, log_req_resp=False, timeout=30, diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 1af5f13bd..1493afc51 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -1,4 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. + +# TODO: Importing this file without first importing +# cloudinit.sources.azure.errors will result in a circular import. import base64 import json import logging @@ -10,8 +13,8 @@ from datetime import datetime from time import sleep, time from typing import Callable, List, Optional, TypeVar, Union -from xml.etree import ElementTree -from xml.sax.saxutils import escape +from xml.etree import ElementTree # nosec B405 +from xml.sax.saxutils import escape # nosec B406 from cloudinit import distros, subp, temp_utils, url_helper, util, version from cloudinit.reporting import events @@ -360,7 +363,7 @@ def __init__( self.azure_endpoint_client = azure_endpoint_client try: - self.root = ElementTree.fromstring(unparsed_xml) + self.root = ElementTree.fromstring(unparsed_xml) # nosec B314 except ElementTree.ParseError as e: report_diagnostic_event( "Failed to parse GoalState XML: %s" % e, @@ -476,7 +479,7 @@ def _get_ssh_key_from_cert(self, certificate): @azure_ds_telemetry_reporter def _get_fingerprint_from_cert(self, certificate): - """openssl x509 formats fingerprints as so: + r"""openssl x509 formats fingerprints as so: 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ B6:A8:BF:27:D4:73\n' @@ -493,7 +496,9 @@ def _decrypt_certs_from_xml(self, certificates_xml): """Decrypt the certificates XML document using the our private key; return the list of certs and private keys contained in the doc. """ - tag = ElementTree.fromstring(certificates_xml).find(".//Data") + tag = ElementTree.fromstring(certificates_xml).find( # nosec B314 + ".//Data" + ) certificates_content = tag.text lines = [ b"MIME-Version: 1.0", @@ -1001,7 +1006,7 @@ def parse_text(cls, ovf_env_xml: str) -> "OvfEnvXml": unparsable or invalid. """ try: - root = ElementTree.fromstring(ovf_env_xml) + root = ElementTree.fromstring(ovf_env_xml) # nosec B314 except ElementTree.ParseError as e: raise errors.ReportableErrorOvfParsingException(exception=e) from e @@ -1134,6 +1139,7 @@ def _parse_platform_settings_section(self, root): self.provision_guest_proxy_agent = self._parse_property( platform_settings, "ProvisionGuestProxyAgent", + parse_bool=True, default=False, required=False, ) diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index f9fd683c7..4d0dd363b 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -8,7 +8,7 @@ from cloudinit import dmi from cloudinit import net as cloudnet -from cloudinit import subp, url_helper, util +from cloudinit import url_helper, util NIC_MAP = {"public": "eth0", "private": "eth1"} @@ -36,19 +36,10 @@ def assign_ipv4_link_local(distro, nic=None): random.randint(1, 168), random.randint(0, 255) ) - ip_addr_cmd = ["ip", "addr", "add", addr, "dev", nic] - ip_link_cmd = ["ip", "link", "set", "dev", nic, "up"] - - if not subp.which("ip"): - raise RuntimeError( - "No 'ip' command available to configure ip4LL address" - ) - try: - subp.subp(ip_addr_cmd) - LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic) - subp.subp(ip_link_cmd) - LOG.debug("brought device '%s' up", nic) + distro.net_ops.add_addr(nic, addr) + distro.net_ops.link_up(nic) + LOG.debug("brought device '%s' up with address %s", nic, addr) except Exception: util.logexc( LOG, @@ -73,7 +64,7 @@ def get_link_local_nic(distro): return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, "ifindex")) -def del_ipv4_link_local(nic=None): +def del_ipv4_link_local(distro, nic=None): """Remove the ip4LL address. While this is not necessary, the ip4LL address is extraneous and confusing to users. """ @@ -86,10 +77,8 @@ def del_ipv4_link_local(nic=None): LOG.debug("cleaning up ipv4LL address") - ip_addr_cmd = ["ip", "addr", "flush", "dev", nic] - try: - subp.subp(ip_addr_cmd) + distro.net_ops.flush_addr(nic) LOG.debug("removed ip4LL addresses from %s", nic) except Exception as e: diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py index 37c4a2e1a..544ca70ed 100644 --- a/cloudinit/sources/helpers/netlink.py +++ b/cloudinit/sources/helpers/netlink.py @@ -15,15 +15,11 @@ # http://man7.org/linux/man-pages/man7/netlink.7.html RTMGRP_LINK = 1 -NLMSG_NOOP = 1 -NLMSG_ERROR = 2 -NLMSG_DONE = 3 RTM_NEWLINK = 16 RTM_DELLINK = 17 RTM_GETLINK = 18 RTM_SETLINK = 19 MAX_SIZE = 65535 -RTA_DATA_OFFSET = 32 MSG_TYPE_OFFSET = 16 SELECT_TIMEOUT = 60 diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 031ac8c93..70998dda2 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -667,12 +667,19 @@ def convert_net_json(network_json=None, known_macs=None): if link["type"] in ["bond"]: params = {} if link_mac_addr: - params["mac_address"] = link_mac_addr + cfg.update({"mac_address": link_mac_addr}) for k, v in link.items(): if k == "bond_links": continue elif k.startswith("bond"): - params.update({k: v}) + # There is a difference in key name formatting for + # bond parameters in the cloudinit and OpenStack + # network schemas. The keys begin with 'bond-' in the + # cloudinit schema but 'bond_' in OpenStack + # network_data.json schema. Translate them to what + # is expected by cloudinit. + translated_key = "bond-{}".format(k.split("bond_", 1)[-1]) + params.update({translated_key: v}) # openstack does not provide a name for the bond. # they do provide an 'id', but that is possibly non-sensical. @@ -700,7 +707,6 @@ def convert_net_json(network_json=None, known_macs=None): { "name": name, "vlan_id": link["vlan_id"], - "mac_address": link["vlan_mac_address"], } ) link_updates.append((cfg, "vlan_link", "%s", link["vlan_link"])) @@ -763,7 +769,7 @@ def convert_net_json(network_json=None, known_macs=None): cfg["type"] = "infiniband" for service in services: - cfg = service + cfg = copy.deepcopy(service) cfg.update({"type": "nameserver"}) config.append(cfg) diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 68198cf71..1a406eb84 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -24,12 +24,11 @@ class Config: RESETPASS = "PASSWORD|RESET" SUFFIX = "DNS|SUFFIX|" TIMEZONE = "DATETIME|TIMEZONE" - UTC = "DATETIME|UTC" POST_GC_STATUS = "MISC|POST-GC-STATUS" DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT" CLOUDINIT_META_DATA = "CLOUDINIT|METADATA" CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA" - CLOUDINIT_INSTANCE_ID = "CLOUDINIT|INSTANCE-ID" + CLOUDINIT_INSTANCE_ID = "MISC|INSTANCE-ID" def __init__(self, configFile): self._configFile = configFile diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index ce54f871e..a3aba8f10 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -11,7 +11,9 @@ import re import time -from cloudinit import safeyaml, subp, util +import yaml + +from cloudinit import subp, util from .config import Config from .config_custom_script import PostCustomScript, PreCustomScript @@ -263,8 +265,8 @@ def get_data_from_imc_raw_data_cust_cfg(cust_cfg): try: logger.debug("Validating if meta data is valid or not") - md = safeyaml.load(md) - except safeyaml.YAMLError as e: + md = yaml.safe_load(md) + except yaml.YAMLError as e: set_cust_error_status( "Error parsing the cloud-init meta data", str(e), diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 94caa9c4d..52876e724 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -11,7 +11,7 @@ import sys from collections import namedtuple from contextlib import suppress -from typing import Dict, Iterable, List, Optional, Set +from typing import Dict, Iterable, List, Optional, Set, Tuple, Union from cloudinit import ( atomic_helper, @@ -26,6 +26,7 @@ type_utils, util, ) +from cloudinit.config import Netv1, Netv2 from cloudinit.event import EventScope, EventType, userdata_to_events # Default handlers (used if not overridden) @@ -40,10 +41,10 @@ from cloudinit.reporting import events from cloudinit.settings import ( CLOUD_CONFIG, + DEFAULT_RUN_DIR, PER_ALWAYS, PER_INSTANCE, PER_ONCE, - RUN_CLOUD_CONFIG, ) from cloudinit.sources import NetworkConfigSource @@ -275,7 +276,26 @@ def read_cfg(self, extra_fns=None): self._cfg = self._read_cfg(extra_fns) def _read_cfg(self, extra_fns): - no_cfg_paths = helpers.Paths({}, self.datasource) + """read and merge our configuration""" + # No config is passed to Paths() here because we don't yet have a + # config to pass. We must bootstrap a config to identify + # distro-specific run_dir locations. Once we have the run_dir + # we re-read our config with a valid Paths() object. This code has to + # assume the location of /etc/cloud/cloud.cfg && /etc/cloud/cloud.cfg.d + + initial_config = self._read_bootstrap_cfg(extra_fns, {}) + paths = initial_config.get("system_info", {}).get("paths", {}) + + # run_dir hasn't changed so we can safely return the config + if paths.get("run_dir") in (DEFAULT_RUN_DIR, None): + return initial_config + + # run_dir has changed so re-read the config to get a valid one + # using the new location of run_dir + return self._read_bootstrap_cfg(extra_fns, paths) + + def _read_bootstrap_cfg(self, extra_fns, bootstrapped_config: dict): + no_cfg_paths = helpers.Paths(bootstrapped_config, self.datasource) instance_data_file = no_cfg_paths.get_runpath( "instance_data_sensitive" ) @@ -283,7 +303,9 @@ def _read_cfg(self, extra_fns): paths=no_cfg_paths, datasource=self.datasource, additional_fns=extra_fns, - base_cfg=fetch_base_config(instance_data_file=instance_data_file), + base_cfg=fetch_base_config( + no_cfg_paths.run_dir, instance_data_file=instance_data_file + ), ) return merger.cfg @@ -359,20 +381,32 @@ def _get_data_source(self, existing) -> sources.DataSource: LOG.debug(myrep.description) if not ds: - util.del_file(self.paths.instance_link) - (cfg_list, pkg_list) = self._get_datasources() - # Deep copy so that user-data handlers can not modify - # (which will affect user-data handlers down the line...) - (ds, dsname) = sources.find_source( - self.cfg, - self.distro, - self.paths, - copy.deepcopy(self.ds_deps), - cfg_list, - pkg_list, - self.reporter, - ) - LOG.info("Loaded datasource %s - %s", dsname, ds) + try: + cfg_list, pkg_list = self._get_datasources() + # Deep copy so that user-data handlers can not modify + # (which will affect user-data handlers down the line...) + ds, dsname = sources.find_source( + self.cfg, + self.distro, + self.paths, + copy.deepcopy(self.ds_deps), + cfg_list, + pkg_list, + self.reporter, + ) + util.del_file(self.paths.instance_link) + LOG.info("Loaded datasource %s - %s", dsname, ds) + except sources.DataSourceNotFoundException as e: + if existing != "check": + raise e + ds = self._restore_from_cache() + if ds and ds.check_if_fallback_is_allowed(): + LOG.info( + "Restored fallback datasource from checked cache: %s", + ds, + ) + else: + raise e self.datasource = ds # Ensure we adjust our path members datasource # now that we have one (thus allowing ipath to be used) @@ -498,6 +532,9 @@ def is_new_instance(self): return ret def fetch(self, existing="check"): + """optionally load datasource from cache, otherwise discover + datasource + """ return self._get_data_source(existing=existing) def instancify(self): @@ -908,7 +945,7 @@ def _consume_userdata(self, frequency=PER_INSTANCE): # Run the handlers self._do_handlers(user_data_msg, c_handlers_list, frequency) - def _get_network_key_contents(self, cfg) -> dict: + def _get_network_key_contents(self, cfg) -> Union[Netv1, Netv2, None]: """ Network configuration can be passed as a dict under a "network" key, or optionally at the top level. In both cases, return the config. @@ -917,7 +954,9 @@ def _get_network_key_contents(self, cfg) -> dict: return cfg["network"] return cfg - def _find_networking_config(self): + def _find_networking_config( + self, + ) -> Tuple[Union[Netv1, Netv2, None], Union[NetworkConfigSource, str]]: disable_file = os.path.join( self.paths.get_cpath("data"), "upgraded-network" ) @@ -942,7 +981,9 @@ def _find_networking_config(self): order = sources.DataSource.network_config_sources for cfg_source in order: if not isinstance(cfg_source, NetworkConfigSource): - LOG.warning( + # This won't happen in the cloud-init codebase, but out-of-tree + # datasources might have an invalid type that mypy cannot know. + LOG.warning( # type: ignore "data source specifies an invalid network cfg_source: %s", cfg_source, ) @@ -1076,11 +1117,11 @@ def should_run_on_boot_event(): return -def read_runtime_config(): - return util.read_conf(RUN_CLOUD_CONFIG) +def read_runtime_config(run_dir: str): + return util.read_conf(os.path.join(run_dir, "cloud.cfg")) -def fetch_base_config(*, instance_data_file=None) -> dict: +def fetch_base_config(run_dir: str, *, instance_data_file=None) -> dict: return util.mergemanydict( [ # builtin config, hardcoded in settings.py. @@ -1090,7 +1131,7 @@ def fetch_base_config(*, instance_data_file=None) -> dict: CLOUD_CONFIG, instance_data_file=instance_data_file ), # runtime config. I.e., /run/cloud-init/cloud.cfg - read_runtime_config(), + read_runtime_config(run_dir), # Kernel/cmdline parameters override system config util.read_conf_from_cmdline(), ], diff --git a/cloudinit/subp.py b/cloudinit/subp.py index c94b44e7d..443073439 100644 --- a/cloudinit/subp.py +++ b/cloudinit/subp.py @@ -8,7 +8,7 @@ import time from errno import ENOEXEC from io import TextIOWrapper -from typing import List, Union +from typing import List, Optional, Union LOG = logging.getLogger(__name__) @@ -218,21 +218,14 @@ def subp( if update_env: env.update(update_env) - if not logstring: - LOG.debug( - "Running command %s with allowed return codes %s" - " (shell=%s, capture=%s)", - args, - rcs, - shell, - capture, - ) - else: - LOG.debug( - "Running hidden command to protect sensitive " - "input/output logstring: %s", - logstring, - ) + LOG.debug( + "Running command %s with allowed return codes %s" + " (shell=%s, capture=%s)", + logstring if logstring else args, + rcs, + shell, + capture, + ) stdin: Union[TextIOWrapper, int] stdout = None @@ -263,7 +256,7 @@ def subp( x if isinstance(x, bytes) else x.encode("utf-8") for x in args ] try: - before = time.time() + before = time.monotonic() sp = subprocess.Popen( bytes_args, stdout=stdout, @@ -274,9 +267,13 @@ def subp( cwd=cwd, ) out, err = sp.communicate(data, timeout=timeout) - total = time.time() - before + total = time.monotonic() - before if total > 0.1: - LOG.debug("command %s took %.3ss to run", args, total) + LOG.debug( + "%s took %.3ss to run", + logstring if logstring else args, + total, + ) except OSError as e: raise ProcessExecutionError( cmd=args, @@ -322,7 +319,7 @@ def target_path(target=None, path=None): return os.path.join(target, path) -def which(program, search=None, target=None): +def which(program, search=None, target=None) -> Optional[str]: target = target_path(target) if os.path.sep in program and is_exe(target_path(target, program)): diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py index 957433474..faa4aaa28 100644 --- a/cloudinit/temp_utils.py +++ b/cloudinit/temp_utils.py @@ -10,7 +10,6 @@ from cloudinit import util LOG = logging.getLogger(__name__) -_TMPDIR = None _ROOT_TMPDIR = "/run/cloud-init/tmp" _EXE_ROOT_TMPDIR = "/var/tmp/cloud-init" @@ -20,8 +19,6 @@ def get_tmp_ancestor(odir=None, needs_exe: bool = False): return odir if needs_exe: return _EXE_ROOT_TMPDIR - if _TMPDIR: - return _TMPDIR if os.getuid() == 0: return _ROOT_TMPDIR return os.environ.get("TMPDIR", "/tmp") @@ -53,18 +50,11 @@ def _tempfile_dir_arg(odir=None, needs_exe: bool = False): " mounted as noexec", tdir, ) - - if odir is None and not needs_exe: - global _TMPDIR - _TMPDIR = tdir - return tdir def ExtendedTemporaryFile(**kwargs): - kwargs["dir"] = _tempfile_dir_arg( - kwargs.pop("dir", None), kwargs.pop("needs_exe", False) - ) + kwargs["dir"] = _tempfile_dir_arg() fh = tempfile.NamedTemporaryFile(**kwargs) # Replace its unlink with a quiet version # that does not raise errors when the diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 7e322266b..d409e3228 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -9,6 +9,8 @@ # This file is part of cloud-init. See LICENSE file for license information. import copy +import ftplib +import io import json import logging import os @@ -16,17 +18,17 @@ import time from concurrent.futures import ThreadPoolExecutor, TimeoutError, as_completed from email.utils import parsedate -from errno import ENOENT from functools import partial from http.client import NOT_FOUND from itertools import count +from ssl import create_default_context from typing import Any, Callable, Iterator, List, Optional, Tuple, Union -from urllib.parse import quote, urlparse, urlunparse +from urllib.parse import quote, urlparse, urlsplit, urlunparse import requests from requests import exceptions -from cloudinit import version +from cloudinit import util, version LOG = logging.getLogger(__name__) @@ -59,7 +61,164 @@ def combine_single(url, add_on): return url -def read_file_or_url(url, **kwargs) -> Union["FileResponse", "UrlResponse"]: +def ftp_get_return_code_from_exception(exc) -> int: + """helper for read_ftps to map return codes to a number""" + # ftplib doesn't expose error codes, so use this lookup table + ftp_error_codes = { + ftplib.error_reply: 300, # unexpected [123]xx reply + ftplib.error_temp: 400, # 4xx errors + ftplib.error_perm: 500, # 5xx errors + ftplib.error_proto: 600, # response does not begin with [1-5] + EOFError: 700, # made up + # OSError is also possible. Use OSError.errno for that. + } + code = ftp_error_codes.get(type(exc)) # pyright: ignore + if not code: + if isinstance(exc, OSError): + code = exc.errno + else: + LOG.warning( + "Unexpected exception type while connecting to ftp server." + ) + code = -99 + return code + + +def read_ftps(url: str, timeout: float = 5.0, **kwargs: dict) -> "FtpResponse": + """connect to URL using ftp over TLS and read a file + + when using strict mode (ftps://), raise exception in event of failure + when not using strict mode (ftp://), fall back to using unencrypted ftp + + url: string containing the desination to read a file from. The url is + parsed with urllib.urlsplit to identify username, password, host, + path, and port in the following format: + ftps://[username:password@]host[:port]/[path] + host is the only required component + timeout: maximum time for the connection to take + kwargs: unused, for compatibility with read_url + returns: UrlResponse + """ + + url_parts = urlsplit(url) + if not url_parts.hostname: + raise UrlError( + cause="Invalid url provided", code=NOT_FOUND, headers=None, url=url + ) + with io.BytesIO() as buffer: + port = url_parts.port or 21 + user = url_parts.username or "anonymous" + if "ftps" == url_parts.scheme: + try: + ftp_tls = ftplib.FTP_TLS( + context=create_default_context(), + ) + LOG.debug( + "Attempting to connect to %s via port [%s] over tls.", + url, + port, + ) + ftp_tls.connect( + host=url_parts.hostname, + port=port, + timeout=timeout or 5.0, # uses float internally + ) + except ftplib.all_errors as e: + code = ftp_get_return_code_from_exception(e) + raise UrlError( + cause=( + "Reading file from server over tls " + f"failed for url {url} [{code}]" + ), + code=code, + headers=None, + url=url, + ) from e + LOG.debug("Attempting to login with user [%s]", user) + try: + ftp_tls.login( + user=user, + passwd=url_parts.password or "", + ) + except ftplib.error_perm as e: + LOG.warning( + "Attempted to connect to an insecure ftp server but used " + "a scheme of ftps://, which is not allowed. Use ftp:// " + "to allow connecting to insecure ftp servers." + ) + raise UrlError( + cause=( + "Attempted to connect to an insecure ftp server but " + "used a scheme of ftps://, which is not allowed. Use " + "ftp:// to allow connecting to insecure ftp servers." + ), + code=500, + headers=None, + url=url, + ) from e + LOG.debug("Creating a secure connection") + ftp_tls.prot_p() + LOG.debug("Reading file: %s", url_parts.path) + ftp_tls.retrbinary(f"RETR {url_parts.path}", callback=buffer.write) + + response = FtpResponse(buffer.getvalue(), url) + LOG.debug("Closing connection") + ftp_tls.close() + return response + else: + try: + ftp = ftplib.FTP() + LOG.debug( + "Attempting to connect to %s via port %s.", url, port + ) + ftp.connect( + host=url_parts.hostname, + port=port, + timeout=timeout or 5.0, # uses float internally + ) + except ftplib.all_errors as e: + code = ftp_get_return_code_from_exception(e) + raise UrlError( + cause=( + "Reading file from ftp server" + f" failed for url {url} [{code}]" + ), + code=code, + headers=None, + url=url, + ) from e + LOG.debug("Attempting to login with user [%s]", user) + ftp.login( + user=user, + passwd=url_parts.password or "", + ) + LOG.debug("Reading file: %s", url_parts.path) + ftp.retrbinary(f"RETR {url_parts.path}", callback=buffer.write) + response = FtpResponse(buffer.getvalue(), url) + LOG.debug("Closing connection") + ftp.close() + return response + + +def _read_file(path: str, **kwargs) -> "FileResponse": + """read a binary file and return a FileResponse + + matches function signature with read_ftps and read_url + """ + if kwargs.get("data"): + LOG.warning("Unable to post data to file resource %s", path) + try: + contents = util.load_binary_file(path) + return FileResponse(contents, path) + except FileNotFoundError as e: + raise UrlError(cause=e, code=NOT_FOUND, headers=None, url=path) from e + except IOError as e: + raise UrlError(cause=e, code=e.errno, headers=None, url=path) from e + + +def read_file_or_url( + url, **kwargs +) -> Union["FileResponse", "UrlResponse", "FtpResponse"]: """Wrapper function around readurl to allow passing a file path as url. When url is not a local file path, passthrough any kwargs to readurl. @@ -68,22 +227,19 @@ def read_file_or_url(url, **kwargs) -> Union["FileResponse", "UrlResponse"]: parameters. See: call-signature of readurl in this module for param docs. """ url = url.lstrip() - if url.startswith("/"): - url = "file://%s" % url - if url.lower().startswith("file://"): - if kwargs.get("data"): - LOG.warning("Unable to post data to file resource %s", url) - file_path = url[len("file://") :] - try: - with open(file_path, "rb") as fp: - contents = fp.read() - except IOError as e: - code = e.errno - if e.errno == ENOENT: - code = NOT_FOUND - raise UrlError(cause=e, code=code, headers=None, url=url) from e - return FileResponse(file_path, contents=contents) + try: + parsed = urlparse(url) + except ValueError as e: + raise UrlError(cause=e, url=url) from e + scheme = parsed.scheme + if scheme == "file" or (url and "/" == url[0]): + return _read_file(parsed.path, **kwargs) + elif scheme in ("ftp", "ftps"): + return read_ftps(url, **kwargs) + elif scheme in ("http", "https"): + return readurl(url, **kwargs) else: + LOG.warning("Attempting unknown protocol %s", scheme) return readurl(url, **kwargs) @@ -91,11 +247,11 @@ def read_file_or_url(url, **kwargs) -> Union["FileResponse", "UrlResponse"]: # read_file_or_url can return this or that object and the # 'user' of those objects will not need to know the difference. class StringResponse: - def __init__(self, contents, code=200): + def __init__(self, contents, url, code=200): self.code = code self.headers = {} self.contents = contents - self.url = None + self.url = url def ok(self, *args, **kwargs): if self.code != 200: @@ -107,9 +263,13 @@ def __str__(self): class FileResponse(StringResponse): - def __init__(self, path, contents, code=200): - StringResponse.__init__(self, contents, code=code) - self.url = path + def __init__(self, contents: bytes, url: str, code=200): + super().__init__(contents, url, code=code) + + +class FtpResponse(StringResponse): + def __init__(self, contents: bytes, url: str): + super().__init__(contents, url) class UrlResponse: @@ -119,14 +279,15 @@ def __init__(self, response: requests.Response): @property def contents(self) -> bytes: if self._response.content is None: - return b"" + # typeshed bug: https://github.com/python/typeshed/pull/12180 + return b"" # type: ignore return self._response.content @property - def url(self): + def url(self) -> str: return self._response.url - def ok(self, redirects_ok=False): + def ok(self, redirects_ok=False) -> bool: upper = 300 if redirects_ok: upper = 400 @@ -140,7 +301,7 @@ def headers(self): return self._response.headers @property - def code(self): + def code(self) -> int: return self._response.status_code def __str__(self): @@ -263,30 +424,30 @@ def readurl( if retries: manual_tries = max(int(retries) + 1, 1) - def_headers = { - "User-Agent": "Cloud-Init/%s" % (version.version_string()), - } - if headers: - def_headers.update(headers) - headers = def_headers - - if not headers_cb: - - def _cb(url): - return headers + user_agent = "Cloud-Init/%s" % (version.version_string()) + if headers is not None: + headers = headers.copy() + else: + headers = {} - headers_cb = _cb if data: req_args["data"] = data if sec_between is None: sec_between = -1 - excps = [] + if session is None: + session = requests.Session() + # Handle retrying ourselves since the built-in support # doesn't handle sleeping between tries... - # Infinitely retry if infinite is True - for i in count() if infinite else range(manual_tries): - req_args["headers"] = headers_cb(url) + for i in count(): + if headers_cb: + headers = headers_cb(url) + + if "User-Agent" not in headers: + headers["User-Agent"] = user_agent + + req_args["headers"] = headers filtered_req_args = {} for (k, v) in req_args.items(): if k == "data": @@ -300,7 +461,6 @@ def _cb(url): else: filtered_req_args[k] = v try: - if log_req_resp: LOG.debug( "[%s/%s] open '%s' with %s configuration", @@ -310,11 +470,7 @@ def _cb(url): filtered_req_args, ) - if session is None: - session = requests.Session() - - with session as sess: - r = sess.request(**req_args) + r = session.request(**req_args) if check_status: r.raise_for_status() @@ -329,6 +485,10 @@ def _cb(url): # subclass for responses, so add our own backward-compat # attrs return UrlResponse(r) + except exceptions.SSLError as e: + # ssl exceptions are not going to get fixed by waiting a + # few seconds + raise UrlError(e, url=url) from e except exceptions.RequestException as e: if ( isinstance(e, (exceptions.HTTPError)) @@ -337,29 +497,26 @@ def _cb(url): e.response, "status_code" ) ): - excps.append( - UrlError( - e, - code=e.response.status_code, - headers=e.response.headers, - url=url, - ) + url_error = UrlError( + e, + code=e.response.status_code, + headers=e.response.headers, + url=url, ) else: - excps.append(UrlError(e, url=url)) - if isinstance(e, exceptions.SSLError): - # ssl exceptions are not going to get fixed by waiting a - # few seconds - break - if exception_cb and not exception_cb(req_args.copy(), excps[-1]): + url_error = UrlError(e, url=url) + + if exception_cb and not exception_cb(req_args.copy(), url_error): # if an exception callback was given, it should return True # to continue retrying and False to break and re-raise the # exception - break - if (infinite and sec_between > 0) or ( - i + 1 < manual_tries and sec_between > 0 - ): + raise url_error from e + + will_retry = infinite or (i + 1 < manual_tries) + if not will_retry: + raise url_error from e + if sec_between > 0: if log_req_resp: LOG.debug( "Please wait %s seconds while we wait to try again", @@ -367,7 +524,7 @@ def _cb(url): ) time.sleep(sec_between) - raise excps[-1] + raise RuntimeError("This path should be unreachable...") def _run_func_with_delay( @@ -403,7 +560,7 @@ def dual_stack( """ return_result = None returned_address = None - last_exception = None + last_exception: Optional[BaseException] = None exceptions = [] is_done = threading.Event() @@ -463,7 +620,7 @@ def dual_stack( "Timed out waiting for addresses: %s, " "exception(s) raised while waiting: %s", " ".join(addresses), - " ".join(exceptions), # type: ignore + " ".join(map(str, exceptions)), ) finally: executor.shutdown(wait=False) @@ -536,7 +693,7 @@ def timeup(max_wait: float, start_time: float, sleep_time: float = 0): if max_wait in (float("inf"), None): return False return (max_wait <= 0) or ( - time.time() - start_time + sleep_time > max_wait + time.monotonic() - start_time + sleep_time > max_wait ) def handle_url_response(response, url): @@ -579,7 +736,7 @@ def read_url_handle_exceptions( except Exception as e: reason = "unexpected error [%s]" % e url_exc = e - time_taken = int(time.time() - start_time) + time_taken = int(time.monotonic() - start_time) max_wait_str = "%ss" % max_wait if max_wait else "unlimited" status_msg = "Calling '%s' failed [%s/%s]: %s" % ( url or getattr(url_exc, "url", "url ? None"), @@ -613,7 +770,7 @@ def url_reader_serial(url): return (url, read_url_cb(url, timeout)) for url in urls: - now = time.time() + now = time.monotonic() if loop_n != 0: if timeup(max_wait, start_time): return @@ -647,7 +804,7 @@ def read_url_parallel(start_time, timeout, exc_cb, log_cb): if out: return out - start_time = time.time() + start_time = time.monotonic() if sleep_time and sleep_time_cb: raise ValueError("sleep_time and sleep_time_cb are mutually exclusive") @@ -683,7 +840,7 @@ def read_url_parallel(start_time, timeout, exc_cb, log_cb): time.sleep(current_sleep_time) # shorten timeout to not run way over max_time - current_time = time.time() + current_time = time.monotonic() if timeout and current_time + timeout > start_time + max_wait: timeout = max_wait - (current_time - start_time) if timeout <= 0: diff --git a/cloudinit/util.py b/cloudinit/util.py index 87dbfb0e1..459e3e1b1 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -49,6 +49,7 @@ Generator, List, Mapping, + NamedTuple, Optional, Sequence, TypeVar, @@ -56,12 +57,14 @@ ) from urllib import parse +import yaml + from cloudinit import ( features, importer, + log, mergers, net, - safeyaml, settings, subp, temp_utils, @@ -88,6 +91,11 @@ FALSE_STRINGS = ("off", "0", "no", "false") +class DeprecationLog(NamedTuple): + log_level: int + message: str + + def kernel_version(): return tuple(map(int, os.uname().release.split(".")[:2])) @@ -349,8 +357,6 @@ def read_conf(fname, *, instance_data_file=None) -> Dict: config_file, repr(e), ) - if config_file is None: - return {} return load_yaml(config_file, default={}) # pyright: ignore @@ -395,13 +401,13 @@ def clean_filename(fn): def decomp_gzip(data, quiet=True, decode=True): try: - buf = io.BytesIO(encode_text(data)) - with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: - # E1101 is https://github.com/PyCQA/pylint/issues/1444 + with io.BytesIO(encode_text(data)) as buf, gzip.GzipFile( + None, "rb", 1, buf + ) as gh: if decode: - return decode_binary(gh.read()) # pylint: disable=E1101 + return decode_binary(gh.read()) else: - return gh.read() # pylint: disable=E1101 + return gh.read() except Exception as e: if quiet: return data @@ -651,6 +657,7 @@ def _get_variant(info): "almalinux", "alpine", "arch", + "azurelinux", "centos", "cloudlinux", "debian", @@ -751,7 +758,7 @@ def get_cfg_by_path(yobj, keyp, default=None): or an iterable. @param default: The default to return if the path does not exist. @return: The value of the item at keyp." - is not found.""" + is not found.""" if isinstance(keyp, str): keyp = keyp.split("/") @@ -948,13 +955,14 @@ def del_dir(path): shutil.rmtree(path) -# read_optional_seed -# returns boolean indicating success or failure (presence of files) -# if files are present, populates 'fill' dictionary with 'user-data' and -# 'meta-data' entries def read_optional_seed(fill, base="", ext="", timeout=5): + """ + returns boolean indicating success or failure (presense of files) + if files are present, populates 'fill' dictionary with 'user-data' and + 'meta-data' entries + """ try: - (md, ud, vd) = read_seeded(base, ext, timeout) + md, ud, vd = read_seeded(base=base, ext=ext, timeout=timeout) fill["user-data"] = ud fill["vendor-data"] = vd fill["meta-data"] = md @@ -1008,7 +1016,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): len(blob), allowed, ) - converted = safeyaml.load(blob) + converted = yaml.safe_load(blob) if converted is None: LOG.debug("loaded blob returned None, returning default.") converted = default @@ -1019,7 +1027,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): % (allowed, type_utils.obj_name(converted)) ) loaded = converted - except (safeyaml.YAMLError, TypeError, ValueError) as e: + except (yaml.YAMLError, TypeError, ValueError) as e: msg = "Failed loading yaml blob" mark = None if hasattr(e, "context_mark") and getattr(e, "context_mark"): @@ -1038,7 +1046,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): return loaded -def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): +def read_seeded(base="", ext="", timeout=5, retries=10): if base.find("%s") >= 0: ud_url = base.replace("%s", "user-data" + ext) vd_url = base.replace("%s", "vendor-data" + ext) @@ -1410,20 +1418,6 @@ def search_for_mirror(candidates): return None -def close_stdin(): - """ - reopen stdin as /dev/null so even subprocesses or other os level things get - /dev/null as input. - - if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true - value then input will not be closed (useful for debugging). - """ - if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")): - return - with open(os.devnull) as fp: - os.dup2(fp.fileno(), sys.stdin.fileno()) - - def find_devs_with_freebsd( criteria=None, oformat="device", tag=None, no_cache=False, path=None ): @@ -1618,14 +1612,14 @@ def load_binary_file( quiet: bool = False, ) -> bytes: LOG.debug("Reading from %s (quiet=%s)", fname, quiet) - ofh = io.BytesIO() - try: - with open(fname, "rb") as ifh: - pipe_in_out(ifh, ofh, chunk_cb=read_cb) - except FileNotFoundError: - if not quiet: - raise - contents = ofh.getvalue() + with io.BytesIO() as ofh: + try: + with open(fname, "rb") as ifh: + pipe_in_out(ifh, ofh, chunk_cb=read_cb) + except FileNotFoundError: + if not quiet: + raise + contents = ofh.getvalue() LOG.debug("Read %s bytes from %s", len(contents), fname) return contents @@ -1816,21 +1810,10 @@ def get_config_logfiles(cfg): return list(set(logs + rotated_logs)) -def logexc(log, msg, *args): - # Setting this here allows this to change - # levels easily (not always error level) - # or even desirable to have that much junk - # coming out to a non-debug stream - if msg: - log.warning(msg, *args) - # Debug gets the full trace. However, nose has a bug whereby its - # logcapture plugin doesn't properly handle the case where there is no - # actual exception. To avoid tracebacks during the test suite then, we'll - # do the actual exc_info extraction here, and if there is no exception in - # flight, we'll just pass in None. - exc_info = sys.exc_info() - if exc_info == (None, None, None): - exc_info = None +def logexc( + log, msg, *args, log_level: int = logging.WARNING, exc_info=True +) -> None: + log.log(log_level, msg, *args) log.debug(msg, exc_info=exc_info, *args) @@ -2651,7 +2634,7 @@ def find_freebsd_part(fs): return splitted[0] elif len(splitted) == 3: return splitted[2] - elif splitted[2] in ["label", "gpt", "ufs"]: + elif splitted[2] in ["label", "gpt", "gptid", "ufs", "ufsid"]: target_label = fs[5:] (part, _err) = subp.subp(["glabel", "status", "-s"]) for labels in part.split("\n"): @@ -2835,7 +2818,7 @@ def log_time( if kwargs is None: kwargs = {} - start = time.time() + start = time.monotonic() ustart = None if get_uptime: @@ -2847,7 +2830,7 @@ def log_time( try: ret = func(*args, **kwargs) finally: - delta = time.time() - start + delta = time.monotonic() - start udelta = None if ustart is not None: try: @@ -2978,8 +2961,6 @@ def is_x86(uname_arch=None): def message_from_string(string): - if sys.version_info[:2] < (2, 7): - return email.message_from_file(io.StringIO(string)) return email.message_from_string(string) @@ -3054,7 +3035,7 @@ def rootdev_from_cmdline(cmdline): def load_shell_content(content, add_empty=False, empty_val=None): - """Given shell like syntax (key=value\nkey2=value2\n) in content + r"""Given shell like syntax (key=value\nkey2=value2\n) in content return the data in dictionary form. If 'add_empty' is True then add entries in to the returned dictionary for 'VAR=' variables. Set their value to empty_val.""" @@ -3130,7 +3111,7 @@ def udevadm_settle(exists=None, timeout=None): def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False): - """ + r""" Print error to stderr and return or exit @param msg: message to print @@ -3146,13 +3127,49 @@ def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False): @total_ordering class Version(namedtuple("Version", ["major", "minor", "patch", "rev"])): - def __new__(cls, major=-1, minor=-1, patch=-1, rev=-1): + """A class for comparing versions. + + Implemented as a named tuple with all ordering methods. Comparisons + between X.Y.N and X.Y always treats the more specific number as larger. + + :param major: the most significant number in a version + :param minor: next greatest significant number after major + :param patch: next greatest significant number after minor + :param rev: the least significant number in a version + + :raises TypeError: If invalid arguments are given. + :raises ValueError: If invalid arguments are given. + + Examples: + >>> Version(2, 9) == Version.from_str("2.9") + True + >>> Version(2, 9, 1) > Version.from_str("2.9.1") + False + >>> Version(3, 10) > Version.from_str("3.9.9.9") + True + >>> Version(3, 7) >= Version.from_str("3.7") + True + + """ + + def __new__( + cls, major: int = -1, minor: int = -1, patch: int = -1, rev: int = -1 + ) -> "Version": """Default of -1 allows us to tiebreak in favor of the most specific number""" return super(Version, cls).__new__(cls, major, minor, patch, rev) @classmethod - def from_str(cls, version: str): + def from_str(cls, version: str) -> "Version": + """Create a Version object from a string. + + :param version: A period-delimited version string, max 4 segments. + + :raises TypeError: Raised if invalid arguments are given. + :raises ValueError: Raised if invalid arguments are given. + + :return: A Version object. + """ return cls(*(list(map(int, version.split("."))))) def __gt__(self, other): @@ -3177,15 +3194,15 @@ def __iter__(self): def __str__(self): return ".".join(self) - def _compare_version(self, other) -> int: - """ - return values: - 1: self > v2 - -1: self < v2 - 0: self == v2 + def __hash__(self): + return hash(str(self)) + + def _compare_version(self, other: "Version") -> int: + """Compare this Version to another. - to break a tie between 3.1.N and 3.1, always treat the more - specific number as larger + :param other: A Version object. + + :return: -1 if self > other, 1 if self < other, else 0 """ if self == other: return 0 @@ -3200,14 +3217,27 @@ def _compare_version(self, other) -> int: return -1 +def should_log_deprecation(version: str, boundary_version: str) -> bool: + """Determine if a deprecation message should be logged. + + :param version: The version in which the thing was deprecated. + :param boundary_version: The version at which deprecation level is logged. + + :return: True if the message should be logged, else False. + """ + return boundary_version == "devel" or Version.from_str( + version + ) <= Version.from_str(boundary_version) + + def deprecate( *, deprecated: str, deprecated_version: str, extra_message: Optional[str] = None, schedule: int = 5, - return_log: bool = False, -): + skip_log: bool = False, +) -> DeprecationLog: """Mark a "thing" as deprecated. Deduplicated deprecations are logged. @@ -3223,13 +3253,15 @@ def deprecate( @param schedule: Manually set the deprecation schedule. Defaults to 5 years. Leave a comment explaining your reason for deviation if setting this value. - @param return_log: Return log text rather than logging it. Useful for + @param skip_log: Return log text rather than logging it. Useful for running prior to logging setup. + @return: NamedTuple containing log level and log message + DeprecationLog(level: int, message: str) Note: uses keyword-only arguments to improve legibility """ - if not hasattr(deprecate, "_log"): - deprecate._log = set() # type: ignore + if not hasattr(deprecate, "log"): + setattr(deprecate, "log", set()) message = extra_message or "" dedup = hash(deprecated + message + deprecated_version + str(schedule)) version = Version.from_str(deprecated_version) @@ -3239,14 +3271,19 @@ def deprecate( f"{deprecated_version} and scheduled to be removed in " f"{version_removed}. {message}" ).rstrip() - if return_log: - return deprecate_msg - if dedup not in deprecate._log: # type: ignore - deprecate._log.add(dedup) # type: ignore - if hasattr(LOG, "deprecated"): - LOG.deprecated(deprecate_msg) # type: ignore - else: - LOG.warning(deprecate_msg) + if not should_log_deprecation( + deprecated_version, features.DEPRECATION_INFO_BOUNDARY + ): + level = logging.INFO + elif hasattr(LOG, "deprecated"): + level = log.DEPRECATED + else: + level = logging.WARN + log_cache = getattr(deprecate, "log") + if not skip_log and dedup not in log_cache: + log_cache.add(dedup) + LOG.log(level, deprecate_msg) + return DeprecationLog(level, deprecate_msg) def deprecate_call( diff --git a/cloudinit/version.py b/cloudinit/version.py index 3a781d6d1..b6bc8227d 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "24.1.3" +__VERSION__ = "24.2" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 1c0efc5d9..8585c184c 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -4,14 +4,15 @@ {% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %} {% set is_rhel = variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "miraclelinux", "rhel", "rocky", "virtuozzo"] %} -{% set gecos = ({"amazon": "EC2 Default User", "centos": "Cloud User", - "debian": "Debian", "dragonfly": "DragonFly", - "freebsd": "FreeBSD", "mariner": "MarinerOS", - "rhel": "Cloud User", "netbsd": "NetBSD", - "openbsd": "openBSD", "openmandriva": "OpenMandriva admin", - "photon": "PhotonOS", "ubuntu": "Ubuntu", - "unknown": "Ubuntu"}) %} +{% set gecos = ({"amazon": "EC2 Default User", "azurelinux": "Azure Linux", + "centos": "Cloud User", "debian": "Debian", + "dragonfly": "DragonFly", "freebsd": "FreeBSD", + "mariner": "MarinerOS", "rhel": "Cloud User", + "netbsd": "NetBSD", "openbsd": "openBSD", + "openmandriva": "OpenMandriva admin", "photon": "PhotonOS", + "ubuntu": "Ubuntu", "unknown": "Ubuntu"}) %} {% set groups = ({"alpine": "adm, wheel", "arch": "wheel, users", + "azurelinux": "wheel", "debian": "adm, audio, cdrom, dialout, dip, floppy, netdev, plugdev, sudo, video", "gentoo": "users, wheel", "mariner": "wheel", "photon": "wheel", @@ -61,7 +62,7 @@ disable_root: true "openmandriva", "photon", "TencentOS"] or is_rhel %} {% if is_rhel %} -mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2'] +mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.after=cloud-init.service,_netdev', '0', '2'] {% else %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% endif %} @@ -136,6 +137,7 @@ cloud_init_modules: - rsyslog - users_groups - ssh + - set_passwords # The modules that run in the 'config' stage cloud_config_modules: @@ -155,7 +157,6 @@ cloud_config_modules: {% endif %} - locale {% endif %} - - set_passwords {% if variant == "alpine" %} - apk_configure {% elif variant in ["debian", "ubuntu", "unknown"] %} @@ -165,8 +166,8 @@ cloud_config_modules: {% if variant == "ubuntu" %} - ubuntu_pro {% endif %} -{% elif variant in ["fedora", "mariner", "openeuler", "openmandriva", - "photon"] or is_rhel %} +{% elif variant in ["azurelinux", "fedora", "mariner", "openeuler", + "openmandriva", "photon"] or is_rhel %} {% if is_rhel %} - rh_subscription {% endif %} @@ -219,10 +220,10 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "freebsd", - "gentoo", "mariner", "netbsd", "openbsd", "OpenCloudOS", - "openeuler", "openmandriva", "photon", "suse", "TencentOS", - "ubuntu"] or is_rhel %} +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "freebsd", "gentoo", "mariner", "netbsd", "openbsd", + "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", + "TencentOS", "ubuntu"] or is_rhel %} distro: {{ variant }} {% elif variant == "dragonfly" %} distro: dragonflybsd @@ -237,9 +238,10 @@ system_info: {% else %} name: {{ variant }} {% endif %} -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS", "ubuntu", "unknown"] +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS", "ubuntu", + "unknown"] or is_bsd or is_rhel %} lock_passwd: True {% endif %} @@ -292,7 +294,7 @@ system_info: {% elif variant in ["freebsd", "netbsd", "openbsd"] %} network: renderers: ['{{ variant }}'] -{% elif variant in ["mariner", "photon"] %} +{% elif variant in ["azurelinux", "mariner", "photon"] %} network: renderers: ['networkd'] {% elif variant == "openmandriva" %} @@ -306,7 +308,7 @@ system_info: activators: ['netplan', 'eni', 'network-manager', 'networkd'] {% elif is_rhel %} network: - renderers: ['sysconfig', 'eni', 'netplan', 'network-manager', 'networkd'] + renderers: ['eni', 'netplan', 'network-manager', 'sysconfig', 'networkd'] {% endif %} {% if variant == "photon" %} # If set to true, cloud-init will not use fallback network config. @@ -318,9 +320,10 @@ system_info: # Automatically discover the best ntp_client ntp_client: auto {% endif %} -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS", "ubuntu", "unknown"] +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS", "ubuntu", + "unknown"] or is_rhel %} # Other config here will be given to the distro class and/or path classes paths: @@ -328,7 +331,7 @@ system_info: templates_dir: /etc/cloud/templates/ {% elif is_bsd %} paths: - run_dir: /var/run/ + run_dir: /var/run/cloud-init/ {% endif %} {% if variant == "debian" %} package_mirrors: @@ -365,8 +368,9 @@ system_info: {% endif %} {% if variant in ["debian", "ubuntu", "unknown"] %} ssh_svcname: ssh -{% elif variant in ["alpine", "amazon", "arch", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS"] or is_rhel %} +{% elif variant in ["alpine", "amazon", "arch", "azurelinux", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS"] + or is_rhel %} ssh_svcname: sshd {% endif %} diff --git a/conftest.py b/conftest.py index ca4743e23..11aeae8fa 100644 --- a/conftest.py +++ b/conftest.py @@ -205,6 +205,7 @@ def paths(tmpdir): """ dirs = { "cloud_dir": tmpdir.mkdir("cloud_dir").strpath, + "docs_dir": tmpdir.mkdir("docs_dir").strpath, "run_dir": tmpdir.mkdir("run_dir").strpath, } return helpers.Paths(dirs) diff --git a/debian/changelog b/debian/changelog index b2245c951..2917cf71b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,31 @@ +cloud-init (24.2-0ubuntu1~20.04.1) focal; urgency=medium + + * d/control: remove netifaces due to GH-4634 + * drop d/p/do-not-block-user-login.patch: + Upstream now has "Before=systemd-user-sessions" in cloud-init.service + * d/p/drop-unsupported-systemd-condition-environment.patch: + drop ConditionEnvironment from unit files because systemd 245.4 ignores + those keys and emits warnings at systemctl status + * d/p/add-deprecation-info-boundary.patch: Update + DEPRECATION_INFO_BOUNDARY to ensure new deprecations don't trigger + warnings. + * refresh patches: + - d/p/cli-retain-file-argument-as-main-cmd-arg.patch + - d/p/keep-dhclient-as-priority-client.patch + - d/p/netplan99-cannot-use-default.patch + - d/p/retain-ec2-default-net-update-events.patch + - d/p/retain-netplan-world-readable.patch + - d/p/retain-old-groups.patch + - d/p/status-do-not-remove-duplicated-data.patch + - d/p/status-retain-recoverable-error-exit-code.patch + - d/p/revert-551f560d-cloud-config-after-snap-seeding.patch + * Upstream snapshot based on 24.2. (LP: #2071762). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/24.2/ChangeLog + * drop all d/p/cpick-* files as they are included in upstream snapshot + + -- James Falcon Thu, 11 Jul 2024 16:36:14 -0500 + cloud-init (24.1.3-0ubuntu1~20.04.5) focal; urgency=medium * Upstream bug fix release based on 24.1.7 diff --git a/debian/control b/debian/control index 63f728144..9065d1ea3 100644 --- a/debian/control +++ b/debian/control @@ -14,7 +14,6 @@ Build-Depends: debhelper (>= 9.20160709), python3-jsonpatch, python3-jsonschema, python3-mock, - python3-netifaces, python3-oauthlib, python3-pytest, python3-pytest-mock, @@ -39,7 +38,6 @@ Depends: cloud-guest-utils | cloud-utils, procps, python3, python3-debconf, - python3-netifaces, python3-requests, python3-serial, ${misc:Depends}, diff --git a/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch b/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch index d6af741b8..98a3f1cd6 100644 --- a/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch +++ b/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch @@ -9,7 +9,7 @@ Bug: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/2064300 Last-Update: 2024-04-30 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py -@@ -107,6 +107,11 @@ def extract_fns(args): +@@ -121,6 +121,11 @@ def extract_fns(args): # since it would of broke if it couldn't have # read that file already... fn_cfgs = [] @@ -21,7 +21,7 @@ Last-Update: 2024-04-30 -Date: Thu, 23 May 2024 15:30:04 -0500 -Subject: [PATCH] fix(ec2): Ensure metadata exists before configuring PBR - (#5287) - -Fixes GH-5283 ---- - cloudinit/sources/DataSourceEc2.py | 19 +++++-- - tests/unittests/sources/test_ec2.py | 88 +++++++++++++++++++++++++++++ - 2 files changed, 102 insertions(+), 5 deletions(-) - ---- a/cloudinit/sources/DataSourceEc2.py -+++ b/cloudinit/sources/DataSourceEc2.py -@@ -963,11 +963,23 @@ def _configure_policy_routing( - @param: is_ipv4: Boolean indicating if we are acting over ipv4 or not. - @param: table: Routing table id. - """ -+ if is_ipv4: -+ subnet_prefix_routes = nic_metadata.get("subnet-ipv4-cidr-block") -+ ips = nic_metadata.get("local-ipv4s") -+ else: -+ subnet_prefix_routes = nic_metadata.get("subnet-ipv6-cidr-blocks") -+ ips = nic_metadata.get("ipv6s") -+ if not (subnet_prefix_routes and ips): -+ LOG.debug( -+ "Not enough IMDS information to configure policy routing " -+ "for IPv%s", -+ "4" if is_ipv4 else "6", -+ ) -+ return -+ - if not dev_config.get("routes"): - dev_config["routes"] = [] - if is_ipv4: -- subnet_prefix_routes = nic_metadata["subnet-ipv4-cidr-block"] -- ips = nic_metadata["local-ipv4s"] - try: - lease = distro.dhcp_client.dhcp_discovery(nic_name, distro=distro) - gateway = lease["routers"] -@@ -988,9 +1000,6 @@ def _configure_policy_routing( - "table": table, - }, - ) -- else: -- subnet_prefix_routes = nic_metadata["subnet-ipv6-cidr-blocks"] -- ips = nic_metadata["ipv6s"] - - subnet_prefix_routes = ( - [subnet_prefix_routes] ---- a/tests/unittests/sources/test_ec2.py -+++ b/tests/unittests/sources/test_ec2.py -@@ -194,6 +194,43 @@ NIC2_MD_IPV4_IPV6_MULTI_IP = { - "vpc-ipv6-cidr-blocks": "2600:1f16:292:100::/56", - } - -+MULTI_NIC_V6_ONLY_MD = { -+ "macs": { -+ "02:6b:df:a2:4b:2b": { -+ "device-number": "1", -+ "interface-id": "eni-0669816d0cf606123", -+ "ipv6s": "2600:1f16:67f:f201:8d2e:4d1f:9e80:4ab9", -+ "local-hostname": "i-0951b6d0b66337123.us-east-2.compute.internal", -+ "mac": "02:6b:df:a2:4b:2b", -+ "owner-id": "483410185123", -+ "security-group-ids": "sg-0bf34e5c3cde1d123", -+ "security-groups": "default", -+ "subnet-id": "subnet-0903f279682c66123", -+ "subnet-ipv6-cidr-blocks": "2600:1f16:67f:f201:0:0:0:0/64", -+ "vpc-id": "vpc-0ac1befb8c824a123", -+ "vpc-ipv4-cidr-block": "192.168.0.0/20", -+ "vpc-ipv4-cidr-blocks": "192.168.0.0/20", -+ "vpc-ipv6-cidr-blocks": "2600:1f16:67f:f200:0:0:0:0/56", -+ }, -+ "02:7c:03:b8:5c:af": { -+ "device-number": "0", -+ "interface-id": "eni-0f3cddb84c16e1123", -+ "ipv6s": "2600:1f16:67f:f201:6613:29a2:dbf7:2f1f", -+ "local-hostname": "i-0951b6d0b66337123.us-east-2.compute.internal", -+ "mac": "02:7c:03:b8:5c:af", -+ "owner-id": "483410185123", -+ "security-group-ids": "sg-0bf34e5c3cde1d123", -+ "security-groups": "default", -+ "subnet-id": "subnet-0903f279682c66123", -+ "subnet-ipv6-cidr-blocks": "2600:1f16:67f:f201:0:0:0:0/64", -+ "vpc-id": "vpc-0ac1befb8c824a123", -+ "vpc-ipv4-cidr-block": "192.168.0.0/20", -+ "vpc-ipv4-cidr-blocks": "192.168.0.0/20", -+ "vpc-ipv6-cidr-blocks": "2600:1f16:67f:f200:0:0:0:0/56", -+ }, -+ } -+} -+ - SECONDARY_IP_METADATA_2018_09_24 = { - "ami-id": "ami-0986c2ac728528ac2", - "ami-launch-index": "0", -@@ -1396,6 +1433,57 @@ class TestConvertEc2MetadataNetworkConfi - ), - ) - -+ def test_convert_ec2_metadata_network_config_multi_nics_ipv6_only(self): -+ """Like above, but only ipv6s are present in metadata.""" -+ macs_to_nics = { -+ "02:7c:03:b8:5c:af": "eth0", -+ "02:6b:df:a2:4b:2b": "eth1", -+ } -+ mac_data = copy.deepcopy(MULTI_NIC_V6_ONLY_MD) -+ network_metadata = {"interfaces": mac_data} -+ expected = { -+ "version": 2, -+ "ethernets": { -+ "eth0": { -+ "dhcp4": True, -+ "dhcp4-overrides": {"route-metric": 100}, -+ "dhcp6": True, -+ "match": {"macaddress": "02:7c:03:b8:5c:af"}, -+ "set-name": "eth0", -+ "dhcp6-overrides": {"route-metric": 100}, -+ }, -+ "eth1": { -+ "dhcp4": True, -+ "dhcp4-overrides": { -+ "route-metric": 200, -+ "use-routes": True, -+ }, -+ "dhcp6": True, -+ "match": {"macaddress": "02:6b:df:a2:4b:2b"}, -+ "set-name": "eth1", -+ "routes": [ -+ {"to": "2600:1f16:67f:f201:0:0:0:0/64", "table": 101}, -+ ], -+ "routing-policy": [ -+ { -+ "from": "2600:1f16:67f:f201:8d2e:4d1f:9e80:4ab9", -+ "table": 101, -+ }, -+ ], -+ "dhcp6-overrides": { -+ "route-metric": 200, -+ "use-routes": True, -+ }, -+ }, -+ }, -+ } -+ distro = mock.Mock() -+ distro.network_activator = activators.NetplanActivator -+ assert expected == ec2.convert_ec2_metadata_network_config( -+ network_metadata, distro, macs_to_nics -+ ) -+ distro.dhcp_client.dhcp_discovery.assert_not_called() -+ - def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): - """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: "eth9"} diff --git a/debian/patches/cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent b/debian/patches/cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent deleted file mode 100644 index ffdc36972..000000000 --- a/debian/patches/cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent +++ /dev/null @@ -1,63 +0,0 @@ -From 51c6569f96bee4f91aad5db58765c7abab7ffcdf Mon Sep 17 00:00:00 2001 -From: Chad Smith -Date: Fri, 3 May 2024 14:58:01 -0600 -Subject: [PATCH] fix(snapd): ubuntu do not snap refresh when snap absent - -No longer call snap refresh when cloud-config user-data -specifies upgade_packages:true and custom Ubuntu images -do not have snapd package installed - -LP: #2064132 ---- - cloudinit/distros/ubuntu.py | 3 ++- - tests/unittests/distros/test_ubuntu.py | 32 ++++++++++++++++++++++++++ - 2 files changed, 34 insertions(+), 1 deletion(-) - create mode 100644 tests/unittests/distros/test_ubuntu.py - ---- a/cloudinit/distros/ubuntu.py -+++ b/cloudinit/distros/ubuntu.py -@@ -40,7 +40,8 @@ class Distro(debian.Distro): - - def package_command(self, command, args=None, pkgs=None): - super().package_command(command, args, pkgs) -- self.snap.upgrade_packages() -+ if self.snap.available(): -+ self.snap.upgrade_packages() - - @property - def preferred_ntp_clients(self): ---- /dev/null -+++ b/tests/unittests/distros/test_ubuntu.py -@@ -0,0 +1,32 @@ -+# This file is part of cloud-init. See LICENSE file for license information. -+import pytest -+ -+from cloudinit.distros import fetch -+ -+ -+class TestPackageCommand: -+ @pytest.mark.parametrize("snap_available", (True, False)) -+ def test_package_command_only_refresh_snap_when_available( -+ self, snap_available, mocker -+ ): -+ """Avoid calls to snap refresh when snap command not available.""" -+ m_snap_available = mocker.patch( -+ "cloudinit.distros.ubuntu.Snap.available", -+ return_value=snap_available, -+ ) -+ m_snap_upgrade_packges = mocker.patch( -+ "cloudinit.distros.ubuntu.Snap.upgrade_packages", -+ return_value=snap_available, -+ ) -+ m_apt_run_package_command = mocker.patch( -+ "cloudinit.distros.package_management.apt.Apt.run_package_command", -+ ) -+ cls = fetch("ubuntu") -+ distro = cls("ubuntu", {}, None) -+ distro.package_command("upgrade") -+ m_apt_run_package_command.assert_called_once_with("upgrade") -+ m_snap_available.assert_called_once() -+ if snap_available: -+ m_snap_upgrade_packges.assert_called_once() -+ else: -+ m_snap_upgrade_packges.assert_not_called() diff --git a/debian/patches/cpick-74dc7cce-test-Fix-failing-test_ec2.py-test-5324 b/debian/patches/cpick-74dc7cce-test-Fix-failing-test_ec2.py-test-5324 deleted file mode 100644 index 290714033..000000000 --- a/debian/patches/cpick-74dc7cce-test-Fix-failing-test_ec2.py-test-5324 +++ /dev/null @@ -1,21 +0,0 @@ -From 74dc7cce0ef828632bf15172a38927a757a43607 Mon Sep 17 00:00:00 2001 -From: James Falcon -Date: Fri, 24 May 2024 09:33:23 -0500 -Subject: [PATCH] test: Fix failing test_ec2.py test (#5324) - -#5321 missed one test update. Fix it. ---- - tests/unittests/sources/test_ec2.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/tests/unittests/sources/test_ec2.py -+++ b/tests/unittests/sources/test_ec2.py -@@ -1478,7 +1478,7 @@ class TestConvertEc2MetadataNetworkConfi - }, - } - distro = mock.Mock() -- distro.network_activator = activators.NetplanActivator -+ distro.network_renderer = netplan.Renderer - assert expected == ec2.convert_ec2_metadata_network_config( - network_metadata, distro, macs_to_nics - ) diff --git a/debian/patches/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without b/debian/patches/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without deleted file mode 100644 index 417f421b7..000000000 --- a/debian/patches/cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without +++ /dev/null @@ -1,233 +0,0 @@ -From a6f7577d582aa51d51ca129fcff65313e210d47b Mon Sep 17 00:00:00 2001 -From: Chad Smith -Date: Sun, 28 Apr 2024 21:53:22 -0600 -Subject: [PATCH] bug(package_update): avoid snap refresh in images without - snap command - -When package_update or package_upgrade: true are provided in -cloud-config do not call snap refresh on systems that do not have -the snap command. - -This was intended as fixed in cbe5f3a119 but the fix only avoided -snap refresh during package install, not the generic package update -operation. - -Fixes: GH-5143 -LP: #2064132 ---- - cloudinit/distros/__init__.py | 6 ++ - tests/unittests/distros/test_init.py | 134 +++++++++++++++++++++------ - 2 files changed, 114 insertions(+), 26 deletions(-) - ---- a/cloudinit/distros/__init__.py -+++ b/cloudinit/distros/__init__.py -@@ -396,6 +396,12 @@ class Distro(persistence.CloudInitPickle - - def update_package_sources(self): - for manager in self.package_managers: -+ if not manager.available(): -+ LOG.debug( -+ "Skipping update for package manager '%s': not available.", -+ manager.name, -+ ) -+ continue - try: - manager.update_package_sources() - except Exception as e: ---- a/tests/unittests/distros/test_init.py -+++ b/tests/unittests/distros/test_init.py -@@ -33,6 +33,8 @@ for separator in [":", ".", "/", "#", "? - # unable to parse URLs ("[", "]"). - INVALID_URL_CHARS.remove(separator) - -+M_PATH = "cloudinit.distros.package_management." -+ - - class TestGetPackageMirrorInfo: - """ -@@ -253,31 +255,120 @@ class TestGetPackageMirrorInfo: - assert {"primary": expected} == ret - - --class TestInstall: -- """Tests for cloudinit.distros.Distro.install_packages.""" -+class TestUpdatePackageSources: -+ """Tests for cloudinit.distros.Distro.update_package_sources.""" - -- @pytest.fixture(autouse=True) -- def ensure_available(self, mocker): -+ @pytest.mark.parametrize( -+ "apt_error,snap_error,expected_logs", -+ [ -+ pytest.param( -+ RuntimeError("fail to find 'apt' command"), -+ None, -+ [ -+ "Failed to update package using apt: fail to find 'apt'" -+ " command" -+ ], -+ ), -+ pytest.param( -+ None, -+ RuntimeError("fail to find 'snap' command"), -+ [ -+ "Failed to update package using snap: fail to find 'snap'" -+ " command" -+ ], -+ ), -+ ], -+ ) -+ def test_log_errors_with_updating_package_source( -+ self, apt_error, snap_error, expected_logs, mocker, caplog -+ ): -+ """Log error raised from any package_manager.update_package_sources.""" -+ mocker.patch(M_PATH + "apt.Apt.available", return_value=True) -+ mocker.patch(M_PATH + "snap.Snap.available", return_value=True) - mocker.patch( -- "cloudinit.distros.package_management.apt.Apt.available", -- return_value=True, -+ M_PATH + "apt.Apt.update_package_sources", -+ side_effect=apt_error, - ) - mocker.patch( -- "cloudinit.distros.package_management.snap.Snap.available", -- return_value=True, -+ M_PATH + "snap.Snap.update_package_sources", -+ side_effect=snap_error, - ) -+ _get_distro("ubuntu").update_package_sources() -+ for log in expected_logs: -+ assert log in caplog.text -+ -+ @pytest.mark.parametrize( -+ "apt_available,snap_available,expected_logs", -+ [ -+ pytest.param( -+ True, -+ False, -+ ["Skipping update for package manager 'snap': not available."], -+ ), -+ pytest.param( -+ False, -+ True, -+ ["Skipping update for package manager 'apt': not available."], -+ ), -+ pytest.param( -+ False, -+ False, -+ [ -+ "Skipping update for package manager 'apt': not" -+ " available.", -+ "Skipping update for package manager 'snap': not" -+ " available.", -+ ], -+ ), -+ ], -+ ) -+ def test_run_available_package_managers( -+ self, apt_available, snap_available, expected_logs, mocker, caplog -+ ): -+ """Avoid update_package_sources on unavailable package managers""" -+ -+ mocker.patch(M_PATH + "apt.Apt.available", return_value=apt_available) -+ mocker.patch( -+ M_PATH + "snap.Snap.available", -+ return_value=snap_available, -+ ) -+ -+ m_apt_update = mocker.patch(M_PATH + "apt.Apt.update_package_sources") -+ m_snap_update = mocker.patch( -+ M_PATH + "snap.Snap.update_package_sources" -+ ) -+ _get_distro("ubuntu").update_package_sources() -+ if not snap_available: -+ m_snap_update.assert_not_called() -+ else: -+ m_snap_update.assert_called_once() -+ if not apt_available: -+ m_apt_update.assert_not_called() -+ else: -+ m_apt_update.assert_called_once() -+ for log in expected_logs: -+ assert log in caplog.text -+ -+ -+class TestInstall: -+ """Tests for cloudinit.distros.Distro.install_packages.""" -+ -+ @pytest.fixture(autouse=True) -+ def ensure_available(self, mocker): -+ mocker.patch(M_PATH + "apt.Apt.available", return_value=True) -+ mocker.patch(M_PATH + "snap.Snap.available", return_value=True) - - @pytest.fixture - def m_apt_install(self, mocker): - return mocker.patch( -- "cloudinit.distros.package_management.apt.Apt.install_packages", -+ M_PATH + "apt.Apt.install_packages", - return_value=[], - ) - - @pytest.fixture - def m_snap_install(self, mocker): - return mocker.patch( -- "cloudinit.distros.package_management.snap.Snap.install_packages", -+ M_PATH + "snap.Snap.install_packages", - return_value=[], - ) - -@@ -324,7 +415,7 @@ class TestInstall: - ): - """Test fail from package manager not supported by distro.""" - m_snap_install = mocker.patch( -- "cloudinit.distros.package_management.snap.Snap.install_packages", -+ M_PATH + "snap.Snap.install_packages", - return_value=["pkg3"], - ) - with pytest.raises( -@@ -356,7 +447,7 @@ class TestInstall: - ): - """Test fail from package manager doesn't retry as generic.""" - m_apt_install = mocker.patch( -- "cloudinit.distros.package_management.apt.Apt.install_packages", -+ M_PATH + "apt.Apt.install_packages", - return_value=["pkg1"], - ) - with pytest.raises(PackageInstallerError): -@@ -369,14 +460,8 @@ class TestInstall: - self, mocker, m_apt_install, m_snap_install, caplog - ): - """Test that no attempt is made if there are no package manager.""" -- mocker.patch( -- "cloudinit.distros.package_management.apt.Apt.available", -- return_value=False, -- ) -- mocker.patch( -- "cloudinit.distros.package_management.snap.Snap.available", -- return_value=False, -- ) -+ mocker.patch(M_PATH + "apt.Apt.available", return_value=False) -+ mocker.patch(M_PATH + "snap.Snap.available", return_value=False) - with pytest.raises(PackageInstallerError): - _get_distro("ubuntu").install_packages( - ["pkg1", "pkg2", {"other": "pkg3"}] -@@ -449,16 +534,13 @@ class TestInstall: - - So test various combinations of these scenarios. - """ -+ mocker.patch(M_PATH + "apt.Apt.available", return_value=apt_available) - mocker.patch( -- "cloudinit.distros.package_management.apt.Apt.available", -- return_value=apt_available, -- ) -- mocker.patch( -- "cloudinit.distros.package_management.apt.Apt.install_packages", -+ M_PATH + "apt.Apt.install_packages", - return_value=apt_failed, - ) - mocker.patch( -- "cloudinit.distros.package_management.snap.Snap.install_packages", -+ M_PATH + "snap.Snap.install_packages", - return_value=snap_failed, - ) - with pytest.raises(PackageInstallerError) as exc: diff --git a/debian/patches/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321 b/debian/patches/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321 deleted file mode 100644 index a4f99186e..000000000 --- a/debian/patches/cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321 +++ /dev/null @@ -1,88 +0,0 @@ -From d677663231baea15248867140aa00e01bdafd58c Mon Sep 17 00:00:00 2001 -From: James Falcon -Date: Fri, 24 May 2024 09:10:24 -0500 -Subject: [PATCH] fix: Check renderer for netplan-specific code (#5321) - -DataSourceEc2 has netplan-specific code that was gated by the -selected activator. However, in practice, the activators are rarely -used. Somebody may have selected a non-netplan renderer without -selecting the proper activator with it. Since this involves rendering -configuration, we should gate on the renderer instead. - -Fixes GH-5318 ---- - cloudinit/distros/__init__.py | 5 +++-- - cloudinit/sources/DataSourceEc2.py | 4 ++-- - tests/unittests/sources/test_ec2.py | 6 +++--- - 3 files changed, 8 insertions(+), 7 deletions(-) - ---- a/cloudinit/distros/__init__.py -+++ b/cloudinit/distros/__init__.py -@@ -348,7 +348,8 @@ class Distro(persistence.CloudInitPickle - except activators.NoActivatorException: - return None - -- def _get_renderer(self) -> Renderer: -+ @property -+ def network_renderer(self) -> Renderer: - priority = util.get_cfg_by_path( - self._cfg, ("network", "renderers"), None - ) -@@ -441,7 +442,7 @@ class Distro(persistence.CloudInitPickle - - Returns True if any devices failed to come up, otherwise False. - """ -- renderer = self._get_renderer() -+ renderer = self.network_renderer - network_state = parse_net_config_data(netconfig, renderer=renderer) - self._write_network_state(network_state, renderer) - ---- a/cloudinit/sources/DataSourceEc2.py -+++ b/cloudinit/sources/DataSourceEc2.py -@@ -19,7 +19,7 @@ from cloudinit import url_helper as uhel - from cloudinit import util, warnings - from cloudinit.distros import Distro - from cloudinit.event import EventScope, EventType --from cloudinit.net import activators -+from cloudinit.net import netplan - from cloudinit.net.dhcp import NoDHCPLeaseError - from cloudinit.net.ephemeral import EphemeralIPNetwork - from cloudinit.sources.helpers import ec2 -@@ -1075,7 +1075,7 @@ def convert_ec2_metadata_network_config( - netcfg["ethernets"][nic_name] = dev_config - return netcfg - # Apply network config for all nics and any secondary IPv4/v6 addresses -- is_netplan = distro.network_activator == activators.NetplanActivator -+ is_netplan = distro.network_renderer == netplan.Renderer - macs = sorted(macs_to_nics.keys()) - nic_order = _build_nic_order(macs_metadata, macs) - for mac in macs: ---- a/tests/unittests/sources/test_ec2.py -+++ b/tests/unittests/sources/test_ec2.py -@@ -10,7 +10,7 @@ import requests - import responses - - from cloudinit import helpers --from cloudinit.net import activators -+from cloudinit.net import netplan - from cloudinit.sources import DataSourceEc2 as ec2 - from tests.unittests import helpers as test_helpers - from tests.unittests.util import MockDistro -@@ -1345,7 +1345,7 @@ class TestConvertEc2MetadataNetworkConfi - }, - } - distro = mock.Mock() -- distro.network_activator = activators.NetplanActivator -+ distro.network_renderer = netplan.Renderer - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } -@@ -1422,7 +1422,7 @@ class TestConvertEc2MetadataNetworkConfi - }, - } - distro = mock.Mock() -- distro.network_activator = activators.NetplanActivator -+ distro.network_renderer = netplan.Renderer - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } diff --git a/debian/patches/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361 b/debian/patches/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361 deleted file mode 100644 index 0ce03e6bb..000000000 --- a/debian/patches/cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361 +++ /dev/null @@ -1,50 +0,0 @@ -From d771d1f4ba74daed2e82bd3e5d8232e11f52e29a Mon Sep 17 00:00:00 2001 -From: James Falcon -Date: Mon, 3 Jun 2024 21:53:06 -0500 -Subject: [PATCH] fix(ec2): Correctly identify netplan renderer (#5361) - ---- - cloudinit/sources/DataSourceEc2.py | 2 +- - tests/unittests/sources/test_ec2.py | 6 +++--- - 2 files changed, 4 insertions(+), 4 deletions(-) - ---- a/cloudinit/sources/DataSourceEc2.py -+++ b/cloudinit/sources/DataSourceEc2.py -@@ -1075,7 +1075,7 @@ def convert_ec2_metadata_network_config( - netcfg["ethernets"][nic_name] = dev_config - return netcfg - # Apply network config for all nics and any secondary IPv4/v6 addresses -- is_netplan = distro.network_renderer == netplan.Renderer -+ is_netplan = isinstance(distro.network_renderer, netplan.Renderer) - macs = sorted(macs_to_nics.keys()) - nic_order = _build_nic_order(macs_metadata, macs) - for mac in macs: ---- a/tests/unittests/sources/test_ec2.py -+++ b/tests/unittests/sources/test_ec2.py -@@ -1345,7 +1345,7 @@ class TestConvertEc2MetadataNetworkConfi - }, - } - distro = mock.Mock() -- distro.network_renderer = netplan.Renderer -+ distro.network_renderer = netplan.Renderer() - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } -@@ -1422,7 +1422,7 @@ class TestConvertEc2MetadataNetworkConfi - }, - } - distro = mock.Mock() -- distro.network_renderer = netplan.Renderer -+ distro.network_renderer = netplan.Renderer() - distro.dhcp_client.dhcp_discovery.return_value = { - "routers": "172.31.1.0" - } -@@ -1478,7 +1478,7 @@ class TestConvertEc2MetadataNetworkConfi - }, - } - distro = mock.Mock() -- distro.network_renderer = netplan.Renderer -+ distro.network_renderer = netplan.Renderer() - assert expected == ec2.convert_ec2_metadata_network_config( - network_metadata, distro, macs_to_nics - ) diff --git a/debian/patches/deprecation-version-boundary.patch b/debian/patches/deprecation-version-boundary.patch new file mode 100644 index 000000000..1e2a7804a --- /dev/null +++ b/debian/patches/deprecation-version-boundary.patch @@ -0,0 +1,18 @@ +Description: Add deprecation boundary version + This version makes any deprecations added after this version + to be considered "new" deprecations and therefore get logged + at an INFO log level and not cause exit 2. + deprecations don't trigger warnings. +Author: James Falcon +Last-Update: 2024-06-28 +--- a/cloudinit/features.py ++++ b/cloudinit/features.py +@@ -87,7 +87,7 @@ On Debian and Ubuntu systems, cc_apt_con + to write /etc/apt/sources.list directly. + """ + +-DEPRECATION_INFO_BOUNDARY = "devel" ++DEPRECATION_INFO_BOUNDARY = "20.1" + """ + DEPRECATION_INFO_BOUNDARY is used by distros to configure at which upstream + version to start logging deprecations at a level higher than INFO. diff --git a/debian/patches/do-not-block-user-login.patch b/debian/patches/do-not-block-user-login.patch deleted file mode 100644 index 09681c18c..000000000 --- a/debian/patches/do-not-block-user-login.patch +++ /dev/null @@ -1,28 +0,0 @@ -Description: Do not block user login -Author: Brett Holman -Origin: other -Bug: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/2039505 -Last-Update: 2023-10-16 ---- -This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ - ---- a/systemd/cloud-config.service.tmpl -+++ b/systemd/cloud-config.service.tmpl -@@ -3,7 +3,6 @@ - Description=Apply the settings specified in cloud-config - After=network-online.target cloud-config.target - After=snapd.seeded.service --Before=systemd-user-sessions.service - Wants=network-online.target cloud-config.target - ConditionPathExists=!/etc/cloud/cloud-init.disabled - ConditionKernelCommandLine=!cloud-init=disabled ---- a/systemd/cloud-init.service.tmpl -+++ b/systemd/cloud-init.service.tmpl -@@ -38,6 +38,7 @@ Conflicts=shutdown.target - Before=shutdown.target - Conflicts=shutdown.target - {% endif %} -+Before=systemd-user-sessions.service - ConditionPathExists=!/etc/cloud/cloud-init.disabled - ConditionKernelCommandLine=!cloud-init=disabled - ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled diff --git a/debian/patches/drop-unsupported-systemd-condition-environment.patch b/debian/patches/drop-unsupported-systemd-condition-environment.patch new file mode 100644 index 000000000..4f4eabc35 --- /dev/null +++ b/debian/patches/drop-unsupported-systemd-condition-environment.patch @@ -0,0 +1,77 @@ +Description: Drop systemd ignored ConditionEnvironment keys from units on Focal + The systemd ConditionEnvironment config key was introduced in systemd + version 246, yet Ubuntu Focal contains only systemd v. 245.4. This setting + is otherwise ignored by systemd on Focal, but drop the config to avoid + warnings from systemctl status cloud-init. +Origin: backport +Author: Chad Smith +Last-Update: 2024-05-21 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +--- a/systemd/cloud-config.service.tmpl ++++ b/systemd/cloud-config.service.tmpl +@@ -6,7 +6,6 @@ After=network-online.target cloud-config + Wants=network-online.target cloud-config.target + ConditionPathExists=!/etc/cloud/cloud-init.disabled + ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + + [Service] + Type=oneshot +--- a/systemd/cloud-final.service.tmpl ++++ b/systemd/cloud-final.service.tmpl +@@ -10,7 +10,6 @@ Before=apt-daily.service + Wants=network-online.target cloud-config.service + ConditionPathExists=!/etc/cloud/cloud-init.disabled + ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + + + [Service] +--- a/systemd/cloud-init-local.service.tmpl ++++ b/systemd/cloud-init-local.service.tmpl +@@ -29,7 +29,6 @@ Conflicts=shutdown.target + RequiresMountsFor=/var/lib/cloud + ConditionPathExists=!/etc/cloud/cloud-init.disabled + ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + + [Service] + Type=oneshot +--- a/systemd/cloud-init.service.tmpl ++++ b/systemd/cloud-init.service.tmpl +@@ -42,7 +42,6 @@ Conflicts=shutdown.target + {% endif %} + ConditionPathExists=!/etc/cloud/cloud-init.disabled + ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + + [Service] + Type=oneshot +--- a/systemd/cloud-init.target ++++ b/systemd/cloud-init.target +@@ -12,4 +12,3 @@ Description=Cloud-init target + After=multi-user.target + ConditionPathExists=!/etc/cloud/cloud-init.disabled + ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled +--- a/systemd/cloud-init-hotplugd.service ++++ b/systemd/cloud-init-hotplugd.service +@@ -16,7 +16,6 @@ After=cloud-init.target + Requires=cloud-init-hotplugd.socket + ConditionPathExists=!/etc/cloud/cloud-init.disabled + ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + + [Service] + Type=oneshot +--- a/systemd/cloud-init-hotplugd.socket ++++ b/systemd/cloud-init-hotplugd.socket +@@ -8,7 +8,6 @@ Description=cloud-init hotplug hook sock + After=cloud-config.target + ConditionPathExists=!/etc/cloud/cloud-init.disabled + ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + + [Socket] + ListenFIFO=/run/cloud-init/hook-hotplug-cmd diff --git a/debian/patches/expire-on-hashed-users.patch b/debian/patches/expire-on-hashed-users.patch index 7e2468f04..a2ae50ce8 100644 --- a/debian/patches/expire-on-hashed-users.patch +++ b/debian/patches/expire-on-hashed-users.patch @@ -18,7 +18,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ in cc_set_passwords, hashed passwords will be expired. Previous to 22.3, --- a/tests/unittests/config/test_cc_set_passwords.py +++ b/tests/unittests/config/test_cc_set_passwords.py -@@ -194,61 +194,6 @@ class TestSetPasswordsHandle: +@@ -195,61 +195,6 @@ class TestSetPasswordsHandle: @pytest.mark.parametrize( "user_cfg", [ @@ -80,11 +80,13 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ {"expire": "false", "list": ["root:R", "ubuntu:RANDOM"]}, { "expire": "false", -@@ -508,6 +453,7 @@ expire_cases = [ +@@ -509,6 +454,9 @@ expire_cases = [ class TestExpire: @pytest.mark.parametrize("cfg", expire_cases) def test_expire(self, cfg, mocker, caplog): -+ features.EXPIRE_APPLIES_TO_HASHED_USERS = True ++ # Stable release sets EXPIRE_APPLIES_TO_HASHED_USERS=False ++ # This test wants True ++ mocker.patch.object(features, "EXPIRE_APPLIES_TO_HASHED_USERS", True) + cfg = copy.deepcopy(cfg) cloud = get_cloud() mocker.patch(f"{MODPATH}subp.subp") - mocker.patch.object(cloud.distro, "chpasswd") diff --git a/debian/patches/keep-dhclient-as-priority-client.patch b/debian/patches/keep-dhclient-as-priority-client.patch index d1c97e795..515b3d3ec 100644 --- a/debian/patches/keep-dhclient-as-priority-client.patch +++ b/debian/patches/keep-dhclient-as-priority-client.patch @@ -4,7 +4,7 @@ Last-Update: 2024-02-20 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl -@@ -301,7 +301,7 @@ system_info: +@@ -303,7 +303,7 @@ system_info: {% elif variant in ["ubuntu", "unknown"] %} {# SRU_BLOCKER: do not ship network renderers on Xenial, Bionic or Eoan #} network: diff --git a/debian/patches/netplan99-cannot-use-default.patch b/debian/patches/netplan99-cannot-use-default.patch index 81d8ee04b..4f8e101b7 100644 --- a/debian/patches/netplan99-cannot-use-default.patch +++ b/debian/patches/netplan99-cannot-use-default.patch @@ -11,18 +11,18 @@ Last-Update: 2023-05-19 This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py -@@ -121,7 +121,7 @@ def _extract_addresses(config: dict, ent +@@ -120,7 +120,7 @@ def _extract_addresses(config: dict, ent if subnet.get("gateway"): new_route = { "via": subnet.get("gateway"), - "to": "default", + "to": "::/0" if ":" in subnet["gateway"] else "0.0.0.0/0", } - try: - subnet_gateway = ipaddress.ip_address(subnet["gateway"]) + # If the gateway is not contained within the subnet's + # network, mark it as on-link so that it can still be --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py -@@ -403,7 +403,7 @@ network: +@@ -433,7 +433,7 @@ network: transmit-hash-policy: layer3+4 up-delay: 0 routes: @@ -31,25 +31,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 10.101.11.254 vlans: bond0.3502: -@@ -2505,7 +2505,7 @@ pre-down route del -net 10.0.0.0/8 gw 11 - - sacchromyces.maas - - brettanomyces.maas - routes: -- - to: default -+ - to: 0.0.0.0/0 - via: 192.168.0.1 - """ - ).rstrip(" "), -@@ -3255,7 +3255,7 @@ pre-down route del -net 10.0.0.0/8 gw 11 - transmit-hash-policy: layer3+4 - up-delay: 20 - routes: -- - to: default -+ - to: 0.0.0.0/0 - via: 192.168.0.1 - - to: 10.1.3.0/24 - via: 192.168.0.3 -@@ -6966,9 +6966,9 @@ class TestNetplanNetRendering: +@@ -3239,9 +3239,9 @@ class TestNetplanNetRendering: macaddress: 00:11:22:33:44:55 set-name: interface0 routes: @@ -61,7 +43,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 11.0.0.1 """, id="physical_gateway46", -@@ -7005,9 +7005,9 @@ class TestNetplanNetRendering: +@@ -3278,9 +3278,9 @@ class TestNetplanNetRendering: - eth0 - eth1 routes: @@ -73,7 +55,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 11.0.0.1 eth0: {} eth1: {} -@@ -7044,9 +7044,9 @@ class TestNetplanNetRendering: +@@ -3317,9 +3317,9 @@ class TestNetplanNetRendering: interfaces: - eth0 routes: @@ -85,7 +67,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 11.0.0.1 """, id="bridge_gateway46", -@@ -7080,9 +7080,9 @@ class TestNetplanNetRendering: +@@ -3353,9 +3353,9 @@ class TestNetplanNetRendering: id: 101 link: eth0 routes: @@ -97,7 +79,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 11.0.0.1 """, id="vlan_gateway46", -@@ -7131,7 +7131,7 @@ class TestNetplanNetRendering: +@@ -3404,7 +3404,7 @@ class TestNetplanNetRendering: - exemplary set-name: interface0 routes: @@ -106,7 +88,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 192.168.23.1 """, id="nameserver_gateway4", -@@ -7166,7 +7166,7 @@ class TestNetplanNetRendering: +@@ -3439,7 +3439,7 @@ class TestNetplanNetRendering: match: macaddress: 00:11:22:33:44:55 routes: @@ -115,7 +97,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 192.168.23.1 - to: 10.176.0.0/24 via: 10.184.225.121 -@@ -7201,7 +7201,7 @@ class TestNetplanNetRendering: +@@ -3474,7 +3474,7 @@ class TestNetplanNetRendering: match: macaddress: 00:11:22:33:44:55 routes: @@ -124,7 +106,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 192.168.23.1 - to: 192.167.225.122/24 via: 192.168.23.1 -@@ -7237,10 +7237,10 @@ class TestNetplanNetRendering: +@@ -3510,10 +3510,10 @@ class TestNetplanNetRendering: match: macaddress: 00:11:22:33:44:55 routes: @@ -139,7 +121,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ set-name: interface0 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py -@@ -199,7 +199,7 @@ network: +@@ -193,7 +193,7 @@ network: addresses: - 192.168.1.5/24 routes: @@ -148,7 +130,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 192.168.1.254 eth1: dhcp4: true -@@ -218,7 +218,7 @@ network: +@@ -212,7 +212,7 @@ network: addresses: - 2607:f0d0:1002:0011::2/64 routes: @@ -157,7 +139,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 2607:f0d0:1002:0011::1 eth1: dhcp4: true -@@ -1036,7 +1036,7 @@ class TestNetCfgDistroArch(TestNetCfgDis +@@ -1022,7 +1022,7 @@ class TestNetCfgDistroArch(TestNetCfgDis addresses: - 192.168.1.5/24 routes: @@ -166,3 +148,23 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ via: 192.168.1.254 eth1: dhcp4: true +--- a/tests/unittests/net/network_configs.py ++++ b/tests/unittests/net/network_configs.py +@@ -1684,7 +1684,7 @@ pre-down route del -net 10.0.0.0/8 gw 11 + - sacchromyces.maas + - brettanomyces.maas + routes: +- - to: default ++ - to: 0.0.0.0/0 + via: 192.168.0.1 + """ + ).rstrip(" "), +@@ -3119,7 +3119,7 @@ pre-down route del -net 10.0.0.0/8 gw 11 + transmit-hash-policy: layer3+4 + up-delay: 20 + routes: +- - to: default ++ - to: 0.0.0.0/0 + via: 192.168.0.1 + - to: 10.1.3.0/24 + via: 192.168.0.3 diff --git a/debian/patches/retain-ec2-default-net-update-events.patch b/debian/patches/retain-ec2-default-net-update-events.patch index 81404f9a6..20387bce2 100644 --- a/debian/patches/retain-ec2-default-net-update-events.patch +++ b/debian/patches/retain-ec2-default-net-update-events.patch @@ -7,7 +7,7 @@ Last-Update: 2024-02-15 This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py -@@ -105,13 +105,6 @@ class DataSourceEc2(sources.DataSource): +@@ -106,13 +106,6 @@ class DataSourceEc2(sources.DataSource): } } diff --git a/debian/patches/retain-netplan-world-readable.patch b/debian/patches/retain-netplan-world-readable.patch index 78c765cf3..4be7dba2f 100644 --- a/debian/patches/retain-netplan-world-readable.patch +++ b/debian/patches/retain-netplan-world-readable.patch @@ -20,7 +20,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ be written as a single root read-only file /etc/netplan/50-cloud-init.yaml. --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py -@@ -608,32 +608,41 @@ class TestNetCfgDistroUbuntuNetplan(Test +@@ -602,32 +602,41 @@ class TestNetCfgDistroUbuntuNetplan(Test (self.netplan_path(), V1_TO_V2_NET_CFG_OUTPUT, 0o600), ) @@ -77,7 +77,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ def test_apply_network_config_v2_passthrough_retain_orig_perms(self): """Custom permissions on existing netplan is kept when more strict.""" -@@ -673,11 +682,14 @@ class TestNetCfgDistroUbuntuNetplan(Test +@@ -667,11 +676,14 @@ class TestNetCfgDistroUbuntuNetplan(Test expected_cfgs = ( (self.netplan_path(), V2_PASSTHROUGH_NET_CFG_OUTPUT, 0o600), ) @@ -97,7 +97,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ self.assertIn("Passthrough netplan v2 config", self.logs.getvalue()) self.assertIn( "Selected renderer 'netplan' from priority list: ['netplan']", -@@ -1035,12 +1047,16 @@ class TestNetCfgDistroArch(TestNetCfgDis +@@ -1021,12 +1033,16 @@ class TestNetCfgDistroArch(TestNetCfgDis with mock.patch( "cloudinit.net.netplan.get_devicelist", return_value=[] ): diff --git a/debian/patches/retain-old-groups.patch b/debian/patches/retain-old-groups.patch index 2cc33f86a..84c4cb0a9 100644 --- a/debian/patches/retain-old-groups.patch +++ b/debian/patches/retain-old-groups.patch @@ -7,7 +7,7 @@ Last-Update: 2023-07-24 This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl -@@ -17,7 +17,7 @@ +@@ -18,7 +18,7 @@ "photon": "wheel", "openmandriva": "wheel, users, systemd-journal", "suse": "cdrom, users", diff --git a/debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch b/debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch index f97fb288d..a338d91f2 100644 --- a/debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch +++ b/debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch @@ -27,7 +27,7 @@ Last-Update: 2024-02-14 def get_template_filename(self, name): --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py -@@ -210,7 +210,6 @@ def handle(name: str, cfg: Config, cloud +@@ -79,7 +79,6 @@ def handle(name: str, cfg: Config, cloud f" '{type(lxd_cfg).__name__}'" ) @@ -37,7 +37,7 @@ Last-Update: 2024-02-14 preseed_str = lxd_cfg.get("preseed", "") --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py -@@ -191,7 +191,7 @@ def handle(name: str, cfg: Config, cloud +@@ -102,7 +102,7 @@ def handle(name: str, cfg: Config, cloud "Skipping module named %s, no 'snap' key in configuration", name ) return @@ -48,25 +48,17 @@ Last-Update: 2024-02-14 os.path.join(cloud.paths.get_ipath_cur(), "snapd.assertions"), --- a/cloudinit/config/cc_ubuntu_autoinstall.py +++ b/cloudinit/config/cc_ubuntu_autoinstall.py -@@ -6,7 +6,6 @@ import logging - import re - from textwrap import dedent - --from cloudinit import util - from cloudinit.cloud import Cloud - from cloudinit.config import Config - from cloudinit.config.schema import ( -@@ -84,7 +83,6 @@ def handle(name: str, cfg: Config, cloud +@@ -83,7 +83,6 @@ def handle(name: str, cfg: Config, cloud ) return - util.wait_for_snap_seeded(cloud) - snap_list, _ = subp(["snap", "list"]) + snap_list, _ = subp.subp(["snap", "list"]) installer_present = None for snap_name in LIVE_INSTALLER_SNAPS: --- a/cloudinit/util.py +++ b/cloudinit/util.py -@@ -69,7 +69,7 @@ from cloudinit import ( +@@ -72,7 +72,7 @@ from cloudinit import ( url_helper, version, ) @@ -75,7 +67,7 @@ Last-Update: 2024-02-14 if TYPE_CHECKING: # Avoid circular import -@@ -3083,18 +3083,6 @@ def wait_for_files(flist, maxwait, naple +@@ -3064,18 +3064,6 @@ def wait_for_files(flist, maxwait, naple return need @@ -96,14 +88,15 @@ Last-Update: 2024-02-14 result = get_mount_info(mount_point, get_mnt_opts=True) --- a/systemd/cloud-config.service.tmpl +++ b/systemd/cloud-config.service.tmpl -@@ -2,6 +2,7 @@ +@@ -2,7 +2,7 @@ [Unit] - Description=Apply the settings specified in cloud-config - After=network-online.target cloud-config.target -+After=snapd.seeded.service - Before=systemd-user-sessions.service + # https://cloudinit.readthedocs.io/en/latest/explanation/boot.html + Description=Cloud-init: Config Stage +-After=network-online.target cloud-config.target ++After=network-online.target cloud-config.target snapd.seeded.service Wants=network-online.target cloud-config.target ConditionPathExists=!/etc/cloud/cloud-init.disabled + ConditionKernelCommandLine=!cloud-init=disabled --- a/tests/integration_tests/modules/test_frequency_override.py +++ b/tests/integration_tests/modules/test_frequency_override.py @@ -1,7 +1,6 @@ @@ -254,7 +247,7 @@ Last-Update: 2024-02-14 -@mock.patch(MODPATH + "util.wait_for_snap_seeded") - @mock.patch(MODPATH + "subp") + @mock.patch(MODPATH + "subp.subp") class TestHandleAutoinstall: """Test cc_ubuntu_autoinstall handling of config.""" diff --git a/debian/patches/series b/debian/patches/series index 5aa214b34..7dffcbd01 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -5,15 +5,10 @@ netplan99-cannot-use-default.patch retain-old-groups.patch keep-dhclient-as-priority-client.patch revert-551f560d-cloud-config-after-snap-seeding.patch -do-not-block-user-login.patch status-do-not-remove-duplicated-data.patch retain-apt-pre-deb822.patch status-retain-recoverable-error-exit-code.patch retain-ec2-default-net-update-events.patch -cpick-a6f7577d-bug-package_update-avoid-snap-refresh-in-images-without cli-retain-file-argument-as-main-cmd-arg.patch -cpick-51c6569f-fix-snapd-ubuntu-do-not-snap-refresh-when-snap-absent -cpick-417ee551-fix-ec2-Ensure-metadata-exists-before-configuring-PBR -cpick-d6776632-fix-Check-renderer-for-netplan-specific-code-5321 -cpick-74dc7cce-test-Fix-failing-test_ec2.py-test-5324 -cpick-d771d1f4-fix-ec2-Correctly-identify-netplan-renderer-5361 +drop-unsupported-systemd-condition-environment.patch +deprecation-version-boundary.patch diff --git a/debian/patches/status-do-not-remove-duplicated-data.patch b/debian/patches/status-do-not-remove-duplicated-data.patch index 14bc47cd1..3a55f09b7 100644 --- a/debian/patches/status-do-not-remove-duplicated-data.patch +++ b/debian/patches/status-do-not-remove-duplicated-data.patch @@ -24,7 +24,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ """\ --- + _schema_version: '1' - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line datasource: '' detail: 'Running in stage: init' @@ -520,6 +521,23 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr @@ -33,7 +33,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ recoverable_errors: {} + schemas: + '1': -+ boot_status_code: enabled-by-kernel-cmdline ++ boot_status_code: enabled-by-kernel-command-line + datasource: '' + detail: 'Running in stage: init' + errors: [] @@ -51,14 +51,16 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ stage: init status: running ... -@@ -552,6 +570,25 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -552,6 +570,27 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr "init-local": {"finished": 123.46, "start": 123.45}, "last_update": "Thu, 01 Jan 1970 00:02:04 +0000", "recoverable_errors": {}, + "_schema_version": "1", + "schemas": { + "1": { -+ "boot_status_code": "enabled-by-kernel-cmdline", ++ "boot_status_code": ( ++ "enabled-by-kernel-command-line" ++ ), + "datasource": "", + "detail": "Running in stage: init", + "errors": [], @@ -77,21 +79,23 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ "stage": "init", }, id="running_json_format", -@@ -583,6 +620,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -583,6 +622,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr MyArgs(long=False, wait=False, format="json"), 1, { + "_schema_version": "1", - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": ( -@@ -604,6 +642,30 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -604,6 +644,32 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr }, "last_update": "Thu, 01 Jan 1970 00:02:05 +0000", "recoverable_errors": {}, + "schemas": { + "1": { -+ "boot_status_code": "enabled-by-kernel-cmdline", ++ "boot_status_code": ( ++ "enabled-by-kernel-command-line" ++ ), + "datasource": "nocloud", + "detail": "DataSourceNoCloud " + "[seed=/var/.../seed/nocloud-net][dsmode=net]", @@ -116,21 +120,21 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ "stage": None, }, id="running_json_format_with_errors", -@@ -666,6 +728,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -666,6 +732,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr MyArgs(long=False, wait=False, format="json"), 2, { + "_schema_version": "1", - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": ( -@@ -725,6 +788,89 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -725,6 +792,89 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr "don't try to open the hatch or we'll all be soup" ], }, + "schemas": { + "1": { -+ "boot_status_code": "enabled-by-kernel-cmdline", ++ "boot_status_code": "enabled-by-kernel-command-line", + "datasource": "nocloud", + "detail": "DataSourceNoCloud " + "[seed=/var/.../seed/nocloud-net][dsmode=net]", diff --git a/debian/patches/status-retain-recoverable-error-exit-code.patch b/debian/patches/status-retain-recoverable-error-exit-code.patch index d5d839060..37eb3f3bb 100644 --- a/debian/patches/status-retain-recoverable-error-exit-code.patch +++ b/debian/patches/status-retain-recoverable-error-exit-code.patch @@ -17,7 +17,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/tests/unittests/cmd/test_status.py +++ b/tests/unittests/cmd/test_status.py -@@ -726,7 +726,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr +@@ -730,7 +730,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr }, None, MyArgs(long=False, wait=False, format="json"), @@ -25,4 +25,4 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ + 0, { "_schema_version": "1", - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", diff --git a/doc-requirements.txt b/doc-requirements.txt index fccef7820..beb14dd90 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -1,9 +1,12 @@ +-r requirements.txt doc8 furo m2r2 pyyaml +setuptools sphinx==7.1.2 sphinx-design sphinx-copybutton sphinx-notfound-page +sphinxcontrib.datatemplates sphinxcontrib-spelling diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt index dd6a0f6aa..049680358 100644 --- a/doc/examples/cloud-config-apt.txt +++ b/doc/examples/cloud-config-apt.txt @@ -8,29 +8,6 @@ # Number: Set pipelining to some number (not recommended) apt_pipelining: False -## apt config via system_info: -# under the 'system_info', you can customize cloud-init's interaction -# with apt. -# system_info: -# apt_get_command: [command, argument, argument] -# apt_get_upgrade_subcommand: dist-upgrade -# -# apt_get_command: -# To specify a different 'apt-get' command, set 'apt_get_command'. -# This must be a list, and the subcommand (update, upgrade) is appended to it. -# default is: -# ['apt-get', '--option=Dpkg::Options::=--force-confold', -# '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet'] -# -# apt_get_upgrade_subcommand: "dist-upgrade" -# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'. -# This is the subcommand that is invoked for package_upgrade. -# -# apt_get_wrapper: -# command: eatmydata -# enabled: [True, False, "auto"] -# - # Install additional packages on first boot # # Default: none diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 87fc52e8b..2cafef88e 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -35,6 +35,10 @@ users: lock_passwd: true ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB csmith@fringe + - name: testuser + gecos: Mr. Test + homedir: /local/testdir + sudo: ["ALL=(ALL) NOPASSWD:ALL"] - name: cloudy gecos: Magic Cloud App Daemon User inactive: '5' @@ -100,6 +104,8 @@ users: # # Allow a user unrestricted sudo access. # sudo: ALL=(ALL) NOPASSWD:ALL +# or +# sudo: ["ALL=(ALL) NOPASSWD:ALL"] # # Adding multiple sudo rule strings. # sudo: @@ -137,13 +143,5 @@ users: # # users[0] (the first user in users) overrides the user directive. # -# The 'default' user above references the distro's config: -# system_info: -# default_user: -# name: Ubuntu -# plain_text_passwd: 'ubuntu' -# home: /home/ubuntu -# shell: /bin/bash -# lock_passwd: True -# gecos: Ubuntu -# groups: [adm, cdrom, dip, lxd, sudo] +# The 'default' user above references the distro's config set in +# /etc/cloud/cloud.cfg. diff --git a/doc/examples/cloud-config-yum-repo.txt b/doc/examples/cloud-config-yum-repo.txt index e8f2bbb41..6a4037e24 100644 --- a/doc/examples/cloud-config-yum-repo.txt +++ b/doc/examples/cloud-config-yum-repo.txt @@ -11,8 +11,9 @@ yum_repos: # Any repository configuration options # See: man yum.conf # - # This one is required! + # At least one of 'baseurl' or 'metalink' is required! baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch + metalink: https://mirrors.fedoraproject.org/metalink?repo=epel-$releasever&arch=$basearch&infra=$infra&content=$contentdir enabled: false failovermethod: priority gpgcheck: true diff --git a/doc/examples/kernel-cmdline.txt b/doc/examples/kernel-command-line.txt similarity index 100% rename from doc/examples/kernel-cmdline.txt rename to doc/examples/kernel-command-line.txt diff --git a/doc/module-docs/cc_ansible/data.yaml b/doc/module-docs/cc_ansible/data.yaml new file mode 100644 index 000000000..ca5ac3bfb --- /dev/null +++ b/doc/module-docs/cc_ansible/data.yaml @@ -0,0 +1,16 @@ +cc_ansible: + description: > + This module provides Ansible integration for augmenting cloud-init's + configuration of the local node. + + This module installs ``ansible`` during boot and then uses ``ansible-pull`` + to run the playbook repository at the remote URL. + examples: + - comment: | + Example 1: + file: cc_ansible/example1.yaml + - comment: | + Example 2: + file: cc_ansible/example2.yaml + name: Ansible + title: Configure Ansible for instance diff --git a/doc/module-docs/cc_ansible/example1.yaml b/doc/module-docs/cc_ansible/example1.yaml new file mode 100644 index 000000000..9fd5e5204 --- /dev/null +++ b/doc/module-docs/cc_ansible/example1.yaml @@ -0,0 +1,7 @@ +#cloud-config +ansible: + package_name: ansible-core + install_method: distro + pull: + url: https://github.com/holmanb/vmboot.git + playbook_name: ubuntu.yml diff --git a/doc/module-docs/cc_ansible/example2.yaml b/doc/module-docs/cc_ansible/example2.yaml new file mode 100644 index 000000000..2adb491d0 --- /dev/null +++ b/doc/module-docs/cc_ansible/example2.yaml @@ -0,0 +1,7 @@ +#cloud-config +ansible: + package_name: ansible-core + install_method: pip + pull: + url: https://github.com/holmanb/vmboot.git + playbook_name: ubuntu.yml diff --git a/doc/module-docs/cc_apk_configure/data.yaml b/doc/module-docs/cc_apk_configure/data.yaml new file mode 100644 index 000000000..893856166 --- /dev/null +++ b/doc/module-docs/cc_apk_configure/data.yaml @@ -0,0 +1,23 @@ +cc_apk_configure: + description: | + This module handles configuration of the Alpine Package Keeper (APK) + ``/etc/apk/repositories`` file. + + .. note:: + To ensure that APK configuration is valid YAML, any strings + containing special characters, especially colons, should be quoted + (":"). + examples: + - comment: > + Example 1: Keep the existing ``/etc/apk/repositories`` file unaltered. + file: cc_apk_configure/example1.yaml + - comment: > + Example 2: Create repositories file for Alpine v3.12 main and community + using default mirror site. + file: cc_apk_configure/example2.yaml + - comment: > + Example 3: Create repositories file for Alpine Edge main, community, and + testing using a specified mirror site and also a local repo. + file: cc_apk_configure/example3.yaml + name: APK Configure + title: Configure APK repositories file diff --git a/doc/module-docs/cc_apk_configure/example1.yaml b/doc/module-docs/cc_apk_configure/example1.yaml new file mode 100644 index 000000000..b680c9fe0 --- /dev/null +++ b/doc/module-docs/cc_apk_configure/example1.yaml @@ -0,0 +1,3 @@ +#cloud-config +apk_repos: + preserve_repositories: true diff --git a/doc/module-docs/cc_apk_configure/example2.yaml b/doc/module-docs/cc_apk_configure/example2.yaml new file mode 100644 index 000000000..7ab1c58bc --- /dev/null +++ b/doc/module-docs/cc_apk_configure/example2.yaml @@ -0,0 +1,5 @@ +#cloud-config +apk_repos: + alpine_repo: + community_enabled: true + version: 'v3.12' diff --git a/doc/module-docs/cc_apk_configure/example3.yaml b/doc/module-docs/cc_apk_configure/example3.yaml new file mode 100644 index 000000000..d2496f1b0 --- /dev/null +++ b/doc/module-docs/cc_apk_configure/example3.yaml @@ -0,0 +1,8 @@ +#cloud-config +apk_repos: + alpine_repo: + base_url: https://some-alpine-mirror/alpine + community_enabled: true + testing_enabled: true + version: edge + local_repo_base_url: https://my-local-server/local-alpine diff --git a/doc/module-docs/cc_apt_configure/data.yaml b/doc/module-docs/cc_apt_configure/data.yaml new file mode 100644 index 000000000..9d5509d1f --- /dev/null +++ b/doc/module-docs/cc_apt_configure/data.yaml @@ -0,0 +1,34 @@ +cc_apt_configure: + description: | + This module handles configuration of advanced package tool (APT) options + and adding source lists. There are configuration options such as + `apt_get_wrapper`` and ``apt_get_command`` that control how cloud-init + invokes ``apt-get``. These configuration options are handled on a + per-distro basis, so consult documentation for cloud-init's distro support + for instructions on using these config options. + + By default, cloud-init will generate default APT sources information in + ``deb822`` format at :file:`/etc/apt/sources.list.d/.sources`. When + the value of ``sources_list`` does not appear to be ``deb822`` format, or + stable distribution releases disable ``deb822`` format, + :file:`/etc/apt/sources.list` will be written instead. + + .. note:: + To ensure that APT configuration is valid YAML, any strings containing + special characters, especially colons, should be quoted (":"). + + .. note:: + For more information about APT configuration, see the "Additional APT + configuration" example. + examples: + - comment: | + Example 1: + file: cc_apt_configure/example1.yaml + - comment: > + Example 2: Cloud-init version 23.4 will generate a ``deb822``-formatted + ``sources`` file at ``/etc/apt/sources.list.d/.sources`` instead + of ``/etc/apt/sources.list`` when ``sources_list`` content is in + ``deb822`` format. + file: cc_apt_configure/example2.yaml + name: Apt Configure + title: Configure APT for the user diff --git a/doc/module-docs/cc_apt_configure/example1.yaml b/doc/module-docs/cc_apt_configure/example1.yaml new file mode 100644 index 000000000..3e7238b0b --- /dev/null +++ b/doc/module-docs/cc_apt_configure/example1.yaml @@ -0,0 +1,65 @@ +#cloud-config +apt: + preserve_sources_list: false + disable_suites: + - $RELEASE-updates + - backports + - $RELEASE + - mysuite + primary: + - arches: + - amd64 + - i386 + - default + uri: http://us.archive.ubuntu.com/ubuntu + search: + - http://cool.but-sometimes-unreachable.com/ubuntu + - http://us.archive.ubuntu.com/ubuntu + search_dns: false + - arches: + - s390x + - arm64 + uri: http://archive-to-use-for-arm64.example.com/ubuntu + + security: + - arches: + - default + search_dns: true + sources_list: | + deb $MIRROR $RELEASE main restricted + deb-src $MIRROR $RELEASE main restricted + deb $PRIMARY $RELEASE universe restricted + deb $SECURITY $RELEASE-security multiverse + debconf_selections: + set1: the-package the-package/some-flag boolean true + conf: | + APT { + Get { + Assume-Yes 'true'; + Fix-Broken 'true'; + } + } + proxy: http://[[user][:pass]@]host[:port]/ + http_proxy: http://[[user][:pass]@]host[:port]/ + ftp_proxy: ftp://[[user][:pass]@]host[:port]/ + https_proxy: https://[[user][:pass]@]host[:port]/ + sources: + source1: + keyid: keyid + keyserver: keyserverurl + source: deb [signed-by=$KEY_FILE] http:/// bionic main + source2: + source: ppa: + source3: + source: deb $MIRROR $RELEASE multiverse + key: | + ------BEGIN PGP PUBLIC KEY BLOCK------- + + ------END PGP PUBLIC KEY BLOCK------- + source4: + source: deb $MIRROR $RELEASE multiverse + append: false + key: | + ------BEGIN PGP PUBLIC KEY BLOCK------- + + ------END PGP PUBLIC KEY BLOCK------- diff --git a/doc/module-docs/cc_apt_configure/example2.yaml b/doc/module-docs/cc_apt_configure/example2.yaml new file mode 100644 index 000000000..920dc2fc1 --- /dev/null +++ b/doc/module-docs/cc_apt_configure/example2.yaml @@ -0,0 +1,7 @@ +#cloud-config +apt: + sources_list: | + Types: deb + URIs: http://archive.ubuntu.com/ubuntu/ + Suites: $RELEASE + Components: main diff --git a/doc/module-docs/cc_apt_pipelining/data.yaml b/doc/module-docs/cc_apt_pipelining/data.yaml new file mode 100644 index 000000000..fb0d95c5f --- /dev/null +++ b/doc/module-docs/cc_apt_pipelining/data.yaml @@ -0,0 +1,24 @@ +cc_apt_pipelining: + description: | + This module configures APT's ``Acquire::http::Pipeline-Depth`` option, + which controls how APT handles HTTP pipelining. It may be useful for + pipelining to be disabled, because some web servers (such as S3) do not + pipeline properly (LP: #948461). + + Value configuration options for this module are: + + - ``os``: (Default) use distro default + - ``false``: Disable pipelining altogether + - ````: Manually specify pipeline depth. This is not recommended. + examples: + - comment: | + Example 1: + file: cc_apt_pipelining/example1.yaml + - comment: | + Example 2: + file: cc_apt_pipelining/example2.yaml + - comment: | + Example 3: + file: cc_apt_pipelining/example3.yaml + name: Apt Pipelining + title: Configure APT pipelining diff --git a/doc/module-docs/cc_apt_pipelining/example1.yaml b/doc/module-docs/cc_apt_pipelining/example1.yaml new file mode 100644 index 000000000..758cf60f6 --- /dev/null +++ b/doc/module-docs/cc_apt_pipelining/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +apt_pipelining: false diff --git a/doc/module-docs/cc_apt_pipelining/example2.yaml b/doc/module-docs/cc_apt_pipelining/example2.yaml new file mode 100644 index 000000000..1890fe196 --- /dev/null +++ b/doc/module-docs/cc_apt_pipelining/example2.yaml @@ -0,0 +1,2 @@ +#cloud-config +apt_pipelining: os diff --git a/doc/module-docs/cc_apt_pipelining/example3.yaml b/doc/module-docs/cc_apt_pipelining/example3.yaml new file mode 100644 index 000000000..b4677a04c --- /dev/null +++ b/doc/module-docs/cc_apt_pipelining/example3.yaml @@ -0,0 +1,2 @@ +#cloud-config +apt_pipelining: 3 diff --git a/doc/module-docs/cc_bootcmd/data.yaml b/doc/module-docs/cc_bootcmd/data.yaml new file mode 100644 index 000000000..552e16887 --- /dev/null +++ b/doc/module-docs/cc_bootcmd/data.yaml @@ -0,0 +1,22 @@ +cc_bootcmd: + description: | + This module runs arbitrary commands very early in the boot process, only + slightly after a boothook would run. This is very similar to a boothook, + but more user friendly. The environment variable ``INSTANCE_ID`` will be + set to the current instance ID for all run commands. Commands can be + specified either as lists or strings. For invocation details, see + ``runcmd``. + + .. note:: + ``bootcmd`` should only be used for things that could not be done later + in the boot process. + + .. note:: + When writing files, do not use ``/tmp`` dir as it races with + ``systemd-tmpfiles-clean`` (LP: #1707222). Use ``/run/somedir`` instead. + examples: + - comment: | + Example 1: + file: cc_bootcmd/example1.yaml + name: Bootcmd + title: Run arbitrary commands early in the boot process diff --git a/doc/module-docs/cc_bootcmd/example1.yaml b/doc/module-docs/cc_bootcmd/example1.yaml new file mode 100644 index 000000000..92106c74a --- /dev/null +++ b/doc/module-docs/cc_bootcmd/example1.yaml @@ -0,0 +1,4 @@ +#cloud-config +bootcmd: +- echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts +- [cloud-init-per, once, mymkfs, mkfs, /dev/vdb] diff --git a/doc/module-docs/cc_byobu/data.yaml b/doc/module-docs/cc_byobu/data.yaml new file mode 100644 index 000000000..c9a0fa1da --- /dev/null +++ b/doc/module-docs/cc_byobu/data.yaml @@ -0,0 +1,26 @@ +cc_byobu: + description: | + This module controls whether Byobu is enabled or disabled system-wide and + for the default system user. If Byobu is to be enabled, this module will + ensure it is installed. Likewise, if Byobu is to be disabled, it will be + removed (if installed). + + Valid configuration options for this module are: + + - ``enable-system``: enable Byobu system-wide + - ``enable-user``: enable Byobu for the default user + - ``disable-system``: disable Byobu system-wide + - ``disable-user``: disable Byobu for the default user + - ``enable``: enable Byobu both system-wide and for the default user + - ``disable``: disable Byobu for all users + - ``user``: alias for ``enable-user`` + - ``system``: alias for ``enable-system`` + examples: + - comment: | + Example 1: + file: cc_byobu/example1.yaml + - comment: | + Example 2: + file: cc_byobu/example2.yaml + name: Byobu + title: Enable/disable Byobu system-wide and for the default user diff --git a/doc/module-docs/cc_byobu/example1.yaml b/doc/module-docs/cc_byobu/example1.yaml new file mode 100644 index 000000000..c14532495 --- /dev/null +++ b/doc/module-docs/cc_byobu/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +byobu_by_default: enable-user diff --git a/doc/module-docs/cc_byobu/example2.yaml b/doc/module-docs/cc_byobu/example2.yaml new file mode 100644 index 000000000..381c409a4 --- /dev/null +++ b/doc/module-docs/cc_byobu/example2.yaml @@ -0,0 +1,2 @@ +#cloud-config +byobu_by_default: disable-system diff --git a/doc/module-docs/cc_ca_certs/data.yaml b/doc/module-docs/cc_ca_certs/data.yaml new file mode 100644 index 000000000..d18451a29 --- /dev/null +++ b/doc/module-docs/cc_ca_certs/data.yaml @@ -0,0 +1,20 @@ +cc_ca_certs: + description: | + This module adds CA certificates to the system's CA store and updates any + related files using the appropriate OS-specific utility. The default CA + certificates can be disabled/deleted from use by the system with the + configuration option ``remove_defaults``. + + .. note:: + Certificates must be specified using valid YAML. To specify a + multi-line certificate, the YAML multi-line list syntax must be used. + + .. note:: + Alpine Linux requires the ``ca-certificates`` package to be installed + in order to provide the ``update-ca-certificates`` command. + examples: + - comment: | + Example 1: + file: cc_ca_certs/example1.yaml + name: CA Certificates + title: Add CA certificates diff --git a/doc/module-docs/cc_ca_certs/example1.yaml b/doc/module-docs/cc_ca_certs/example1.yaml new file mode 100644 index 000000000..a958ddb91 --- /dev/null +++ b/doc/module-docs/cc_ca_certs/example1.yaml @@ -0,0 +1,9 @@ +#cloud-config +ca_certs: + remove_defaults: true + trusted: + - single_line_cert + - | + -----BEGIN CERTIFICATE----- + YOUR-ORGS-TRUSTED-CA-CERT-HERE + -----END CERTIFICATE----- diff --git a/doc/module-docs/cc_chef/data.yaml b/doc/module-docs/cc_chef/data.yaml new file mode 100644 index 000000000..f78158f18 --- /dev/null +++ b/doc/module-docs/cc_chef/data.yaml @@ -0,0 +1,17 @@ +cc_chef: + description: | + This module enables Chef to be installed (from packages, gems, or from + omnibus). Before this occurs, Chef configuration is written to disk + (``validation.pem``, ``client.pem``, ``firstboot.json``, ``client.rb``), + and required directories are created (``/etc/chef`` and ``/var/log/chef`` + and so on). + + If configured, Chef will be installed and started in either daemon or + non-daemon mode. If run in non-daemon mode, post-run actions are executed + to do finishing activities such as removing ``validation.pem``. + examples: + - comment: | + Example 1: + file: cc_chef/example1.yaml + name: Chef + title: Module that installs, configures, and starts Chef diff --git a/doc/module-docs/cc_chef/example1.yaml b/doc/module-docs/cc_chef/example1.yaml new file mode 100644 index 000000000..d71ff3dfa --- /dev/null +++ b/doc/module-docs/cc_chef/example1.yaml @@ -0,0 +1,17 @@ +#cloud-config +chef: + directories: [/etc/chef, /var/log/chef] + encrypted_data_bag_secret: /etc/chef/encrypted_data_bag_secret + environment: _default + initial_attributes: + apache: + keepalive: false + prefork: {maxclients: 100} + install_type: omnibus + log_level: :auto + omnibus_url_retries: 2 + run_list: ['recipe[apache2]', 'role[db]'] + server_url: https://chef.yourorg.com:4000 + ssl_verify_mode: :verify_peer + validation_cert: system + validation_name: yourorg-validator diff --git a/doc/module-docs/cc_disable_ec2_metadata/data.yaml b/doc/module-docs/cc_disable_ec2_metadata/data.yaml new file mode 100644 index 000000000..ce47a30bd --- /dev/null +++ b/doc/module-docs/cc_disable_ec2_metadata/data.yaml @@ -0,0 +1,11 @@ +cc_disable_ec2_metadata: + description: | + This module can disable the EC2 datasource by rejecting the route to + ``169.254.169.254``, the usual route to the datasource. This module is + disabled by default. + examples: + - comment: | + Example 1: + file: cc_disable_ec2_metadata/example1.yaml + name: Disable EC2 Metadata + title: Disable AWS EC2 Metadata diff --git a/doc/module-docs/cc_disable_ec2_metadata/example1.yaml b/doc/module-docs/cc_disable_ec2_metadata/example1.yaml new file mode 100644 index 000000000..e43aa00c1 --- /dev/null +++ b/doc/module-docs/cc_disable_ec2_metadata/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +disable_ec2_metadata: true diff --git a/doc/module-docs/cc_disk_setup/data.yaml b/doc/module-docs/cc_disk_setup/data.yaml new file mode 100644 index 000000000..abc56e643 --- /dev/null +++ b/doc/module-docs/cc_disk_setup/data.yaml @@ -0,0 +1,31 @@ +cc_disk_setup: + description: | + This module configures simple partition tables and filesystems. + + .. note:: + For more detail about configuration options for disk setup, see the + disk setup example. + + .. note:: + If a swap partition is being created via ``disk_setup``, then an + ``fs_entry`` entry is also needed in order for ``mkswap`` to be run, + otherwise when swap activation is later attempted it will fail. + + For convenience, aliases can be specified for disks using the + ``device_aliases`` config key, which takes a dictionary of ``alias: path`` + mappings. There are automatic aliases for ``swap`` and ``ephemeral``, + where ``swap`` will always refer to the active swap partition and + ``ephemeral`` will refer to the block device of the ephemeral image. + + Disk partitioning is done using the ``disk_setup`` directive. This config + directive accepts a dictionary where each key is either a path to a + block device or an alias specified in ``device_aliases``, and each value is + the configuration options for the device. File system configuration is done + using the ``fs_setup`` directive. This config directive accepts a list of + filesystem configs. + examples: + - comment: | + Example 1: + file: cc_disk_setup/example1.yaml + name: Disk Setup + title: Configure partitions and filesystems diff --git a/doc/module-docs/cc_disk_setup/example1.yaml b/doc/module-docs/cc_disk_setup/example1.yaml new file mode 100644 index 000000000..fe2fe6e02 --- /dev/null +++ b/doc/module-docs/cc_disk_setup/example1.yaml @@ -0,0 +1,24 @@ +#cloud-config +device_aliases: {my_alias: /dev/sdb, swap_disk: /dev/sdc} +disk_setup: + /dev/sdd: {layout: true, overwrite: true, table_type: mbr} + my_alias: + layout: [50, 50] + overwrite: true + table_type: gpt + swap_disk: + layout: + - [100, 82] + overwrite: true + table_type: gpt +fs_setup: +- {cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s, device: my_alias.1, filesystem: ext4, + label: fs1} +- {device: my_alias.2, filesystem: ext4, label: fs2} +- {device: swap_disk.1, filesystem: swap, label: swap} +- {device: /dev/sdd1, filesystem: ext4, label: fs3} +mounts: +- [my_alias.1, /mnt1] +- [my_alias.2, /mnt2] +- [swap_disk.1, none, swap, sw, '0', '0'] +- [/dev/sdd1, /mnt3] diff --git a/doc/module-docs/cc_fan/data.yaml b/doc/module-docs/cc_fan/data.yaml new file mode 100644 index 000000000..eba08fcb2 --- /dev/null +++ b/doc/module-docs/cc_fan/data.yaml @@ -0,0 +1,19 @@ +cc_fan: + description: | + This module installs, configures and starts the Ubuntu fan network + system (`Read more about Ubuntu Fan `_). + + If cloud-init sees a ``fan`` entry in cloud-config it will: + + - Write ``config_path`` with the contents of the ``config`` key + - Install the package ``ubuntu-fan`` if it is not installed + - Ensure the service is started (or restarted if was previously running) + + Additionally, the ``ubuntu-fan`` package will be automatically installed + if not present. + examples: + - comment: | + Example 1: + file: cc_fan/example1.yaml + name: Fan + title: Configure Ubuntu fan networking diff --git a/doc/module-docs/cc_fan/example1.yaml b/doc/module-docs/cc_fan/example1.yaml new file mode 100644 index 000000000..ee2816d8a --- /dev/null +++ b/doc/module-docs/cc_fan/example1.yaml @@ -0,0 +1,9 @@ +#cloud-config +fan: + config: | + # fan 240 + 10.0.0.0/8 eth0/16 dhcp + 10.0.0.0/8 eth1/16 dhcp off + # fan 241 + 241.0.0.0/8 eth0/16 dhcp + config_path: /etc/network/fan diff --git a/doc/module-docs/cc_final_message/data.yaml b/doc/module-docs/cc_final_message/data.yaml new file mode 100644 index 000000000..1759909a5 --- /dev/null +++ b/doc/module-docs/cc_final_message/data.yaml @@ -0,0 +1,23 @@ +cc_final_message: + description: | + This module configures the final message that cloud-init writes. The + message is specified as a Jinja template with the following variables set: + + - ``version``: cloud-init version + - ``timestamp``: time at cloud-init finish + - ``datasource``: cloud-init data source + - ``uptime``: system uptime + + This message is written to the cloud-init log (usually + ``/var/log/cloud-init.log``) as well as stderr (which usually redirects to + ``/var/log/cloud-init-output.log``). + + Upon exit, this module writes the system uptime, timestamp, and cloud-init + version to ``/var/lib/cloud/instance/boot-finished`` independent of any + user data specified for this module. + examples: + - comment: | + Example 1: + file: cc_final_message/example1.yaml + name: Final Message + title: Output final message when cloud-init has finished diff --git a/doc/module-docs/cc_final_message/example1.yaml b/doc/module-docs/cc_final_message/example1.yaml new file mode 100644 index 000000000..749d7c977 --- /dev/null +++ b/doc/module-docs/cc_final_message/example1.yaml @@ -0,0 +1,7 @@ +#cloud-config +final_message: | + cloud-init has finished + version: $version + timestamp: $timestamp + datasource: $datasource + uptime: $uptime diff --git a/doc/module-docs/cc_growpart/data.yaml b/doc/module-docs/cc_growpart/data.yaml new file mode 100644 index 000000000..a6fc63771 --- /dev/null +++ b/doc/module-docs/cc_growpart/data.yaml @@ -0,0 +1,56 @@ +cc_growpart: + description: | + Growpart resizes partitions to fill the available disk space. This is + useful for cloud instances with a larger amount of disk space available + than the pristine image uses, as it allows the instance to automatically + make use of the extra space. + + Note that this only works if the partition to be resized is the last one + on a disk with classic partitioning scheme (MBR, BSD, GPT). LVM, Btrfs and + ZFS have no such restrictions. + + The devices on which to run growpart are specified as a list under the + ``devices`` key. + + There is some functionality overlap between this module and the + ``growroot`` functionality of ``cloud-initramfs-tools``. However, there + are some situations where one tool is able to function and the other is + not. The default configuration for both should work for most cloud + instances. To explicitly prevent ``cloud-initramfs-tools`` from running + ``growroot``, the file ``/etc/growroot-disabled`` can be created. + + By default, both ``growroot`` and ``cc_growpart`` will check for the + existence of this file and will not run if it is present. However, this + file can be ignored for ``cc_growpart`` by setting + ``ignore_growroot_disabled`` to ``true``. + `Read more about `_ + ``cloud-initramfs-tools``. + + On FreeBSD, there is also the ``growfs`` service, which has a lot of + overlap with ``cc_growpart`` and ``cc_resizefs``, but only works on the + root partition. In that configuration, we use it, otherwise, we fall back + to ``gpart``. + + .. note:: + ``growfs`` may insert a swap partition, if none is present, unless + instructed not to via ``growfs_swap_size=0`` in either ``kenv(1)``, or + ``rc.conf(5)``. + + Growpart is enabled by default on the root partition. The default config + for growpart is: + + .. code-block:: yaml + + growpart: + mode: auto + devices: [\"/\"] + ignore_growroot_disabled: false + examples: + - comment: | + Example 1: + file: cc_growpart/example1.yaml + - comment: | + Example 2: + file: cc_growpart/example2.yaml + name: Growpart + title: Grow partitions diff --git a/doc/module-docs/cc_growpart/example1.yaml b/doc/module-docs/cc_growpart/example1.yaml new file mode 100644 index 000000000..0d09125a0 --- /dev/null +++ b/doc/module-docs/cc_growpart/example1.yaml @@ -0,0 +1,5 @@ +#cloud-config +growpart: + devices: [/] + ignore_growroot_disabled: false + mode: auto diff --git a/doc/module-docs/cc_growpart/example2.yaml b/doc/module-docs/cc_growpart/example2.yaml new file mode 100644 index 000000000..045a78463 --- /dev/null +++ b/doc/module-docs/cc_growpart/example2.yaml @@ -0,0 +1,5 @@ +#cloud-config +growpart: + devices: [/, /dev/vdb1] + ignore_growroot_disabled: true + mode: growpart diff --git a/doc/module-docs/cc_grub_dpkg/data.yaml b/doc/module-docs/cc_grub_dpkg/data.yaml new file mode 100644 index 000000000..953002468 --- /dev/null +++ b/doc/module-docs/cc_grub_dpkg/data.yaml @@ -0,0 +1,20 @@ +cc_grub_dpkg: + description: | + Configure which device is used as the target for GRUB installation. This + module can be enabled/disabled using the ``enabled`` config key in the + ``grub_dpkg`` config dict. This module automatically selects a disk using + ``grub-probe`` if no installation device is specified. + + The value placed into the debconf database is in the format expected by the + GRUB post-install script expects. Normally, this is a ``/dev/disk/by-id/`` + value, but we do fallback to the plain disk name if a ``by-id`` name is not + present. + + If this module is executed inside a container, then the debconf database is + seeded with empty values, and ``install_devices_empty`` is set to ``true``. + examples: + - comment: | + Example 1: + file: cc_grub_dpkg/example1.yaml + name: GRUB dpkg + title: Configure GRUB debconf installation device diff --git a/doc/module-docs/cc_grub_dpkg/example1.yaml b/doc/module-docs/cc_grub_dpkg/example1.yaml new file mode 100644 index 000000000..91ead616d --- /dev/null +++ b/doc/module-docs/cc_grub_dpkg/example1.yaml @@ -0,0 +1,9 @@ +#cloud-config +grub_dpkg: + enabled: true + # BIOS mode (install_devices needs disk) + grub-pc/install_devices: /dev/sda + grub-pc/install_devices_empty: false + # EFI mode (install_devices needs partition) + grub-efi/install_devices: /dev/sda + diff --git a/doc/module-docs/cc_install_hotplug/data.yaml b/doc/module-docs/cc_install_hotplug/data.yaml new file mode 100644 index 000000000..32369719b --- /dev/null +++ b/doc/module-docs/cc_install_hotplug/data.yaml @@ -0,0 +1,21 @@ +cc_install_hotplug: + description: | + This module will install the udev rules to enable hotplug if supported by + the datasource and enabled in the userdata. The udev rules will be + installed as ``/etc/udev/rules.d/90-cloud-init-hook-hotplug.rules``. + + When hotplug is enabled, newly added network devices will be added to the + system by cloud-init. After udev detects the event, cloud-init will + refresh the instance metadata from the datasource, detect the device in + the updated metadata, then apply the updated network configuration. + + Currently supported datasources: Openstack, EC2 + examples: + - comment: | + Example 1: Enable hotplug of network devices + file: cc_install_hotplug/example1.yaml + - comment: | + Example 2: Enable network hotplug alongside boot event + file: cc_install_hotplug/example2.yaml + name: Install Hotplug + title: Install hotplug udev rules if supported and enabled diff --git a/doc/module-docs/cc_install_hotplug/example1.yaml b/doc/module-docs/cc_install_hotplug/example1.yaml new file mode 100644 index 000000000..9e49a6acd --- /dev/null +++ b/doc/module-docs/cc_install_hotplug/example1.yaml @@ -0,0 +1,4 @@ +#cloud-config +updates: + network: + when: [hotplug] diff --git a/doc/module-docs/cc_install_hotplug/example2.yaml b/doc/module-docs/cc_install_hotplug/example2.yaml new file mode 100644 index 000000000..dba1ca0f2 --- /dev/null +++ b/doc/module-docs/cc_install_hotplug/example2.yaml @@ -0,0 +1,4 @@ +#cloud-config +updates: + network: + when: [boot, hotplug] diff --git a/doc/module-docs/cc_keyboard/data.yaml b/doc/module-docs/cc_keyboard/data.yaml new file mode 100644 index 000000000..4e1e5da79 --- /dev/null +++ b/doc/module-docs/cc_keyboard/data.yaml @@ -0,0 +1,16 @@ +cc_keyboard: + description: | + Handle keyboard configuration. + examples: + - comment: | + Example 1: Set keyboard layout to "us" + file: cc_keyboard/example1.yaml + - comment: | + Example 2: Set specific keyboard layout, model, variant, options + file: cc_keyboard/example2.yaml + - comment: > + Example 3: For Alpine Linux, set specific keyboard layout and variant, + as used by ``setup-keymap``. Model and options are ignored. + file: cc_keyboard/example3.yaml + name: Keyboard + title: Set keyboard layout diff --git a/doc/module-docs/cc_keyboard/example1.yaml b/doc/module-docs/cc_keyboard/example1.yaml new file mode 100644 index 000000000..c74fa3571 --- /dev/null +++ b/doc/module-docs/cc_keyboard/example1.yaml @@ -0,0 +1,3 @@ +#cloud-config +keyboard: + layout: us diff --git a/doc/module-docs/cc_keyboard/example2.yaml b/doc/module-docs/cc_keyboard/example2.yaml new file mode 100644 index 000000000..0b606eece --- /dev/null +++ b/doc/module-docs/cc_keyboard/example2.yaml @@ -0,0 +1,6 @@ +#cloud-config +keyboard: + layout: de + model: pc105 + variant: nodeadkeys + options: compose:rwin diff --git a/doc/module-docs/cc_keyboard/example3.yaml b/doc/module-docs/cc_keyboard/example3.yaml new file mode 100644 index 000000000..9cfbb0cb0 --- /dev/null +++ b/doc/module-docs/cc_keyboard/example3.yaml @@ -0,0 +1,4 @@ +#cloud-config +keyboard: + layout: gb + variant: gb-extd diff --git a/doc/module-docs/cc_keys_to_console/data.yaml b/doc/module-docs/cc_keys_to_console/data.yaml new file mode 100644 index 000000000..8e1b9a879 --- /dev/null +++ b/doc/module-docs/cc_keys_to_console/data.yaml @@ -0,0 +1,26 @@ +cc_keys_to_console: + description: | + For security reasons it may be desirable not to write SSH host keys and + their fingerprints to the console. To avoid either of them being written + to the console, the ``emit_keys_to_console`` config key under the main + ``ssh`` config key can be used. + + To avoid the fingerprint of types of SSH host keys being written to + console the ``ssh_fp_console_blacklist`` config key can be used. By + default, all types of keys will have their fingerprints written to console. + + To avoid host keys of a key type being written to console the + ``ssh_key_console_blacklist`` config key can be used. By default, + all supported host keys are written to console. + examples: + - comment: | + Example 1: Do not print any SSH keys to system console + file: cc_keys_to_console/example1.yaml + - comment: | + Example 2: Do not print certain SSH key types to console + file: cc_keys_to_console/example2.yaml + - comment: | + Example 3: Do not print specific SSH key fingerprints to console + file: cc_keys_to_console/example3.yaml + name: Keys to Console + title: Control which SSH host keys may be written to console diff --git a/doc/module-docs/cc_keys_to_console/example1.yaml b/doc/module-docs/cc_keys_to_console/example1.yaml new file mode 100644 index 000000000..3c42d21fd --- /dev/null +++ b/doc/module-docs/cc_keys_to_console/example1.yaml @@ -0,0 +1,3 @@ +#cloud-config +ssh: + emit_keys_to_console: false diff --git a/doc/module-docs/cc_keys_to_console/example2.yaml b/doc/module-docs/cc_keys_to_console/example2.yaml new file mode 100644 index 000000000..3b82b0161 --- /dev/null +++ b/doc/module-docs/cc_keys_to_console/example2.yaml @@ -0,0 +1,2 @@ +#cloud-config +ssh_key_console_blacklist: [rsa] diff --git a/doc/module-docs/cc_keys_to_console/example3.yaml b/doc/module-docs/cc_keys_to_console/example3.yaml new file mode 100644 index 000000000..71127c0c7 --- /dev/null +++ b/doc/module-docs/cc_keys_to_console/example3.yaml @@ -0,0 +1,5 @@ +#cloud-config +ssh_fp_console_blacklist: +- E25451E0221B5773DEBFF178ECDACB160995AA89 +- FE76292D55E8B28EE6DB2B34B2D8A784F8C0AAB0 + diff --git a/doc/module-docs/cc_landscape/data.yaml b/doc/module-docs/cc_landscape/data.yaml new file mode 100644 index 000000000..bec53109b --- /dev/null +++ b/doc/module-docs/cc_landscape/data.yaml @@ -0,0 +1,46 @@ +cc_landscape: + description: | + This module installs and configures ``landscape-client``. The Landscape + client will only be installed if the key ``landscape`` is present in + config. + + Landscape client configuration is given under the ``client`` key under the + main ``landscape`` config key. The config parameters are not interpreted by + cloud-init, but rather are converted into a ``ConfigObj``-formatted file + and written out to the ``[client]`` section in + ``/etc/landscape/client.conf``. The following default client config is + provided, but can be overridden + + .. code-block:: yaml + + landscape: + client: + log_level: "info" + url: "https://landscape.canonical.com/message-system" + ping_url: "http://landscape.canoncial.com/ping" + data_path: "/var/lib/landscape/client" + + .. note:: + See `Landscape documentation `_ for + client config keys. + + .. note:: + If ``tags`` is defined, its contents should be a string delimited with + a comma (",") rather than a list. + examples: + - comment: > + To discover additional supported client keys, run + ``man landscape-config``. + + Example 1: + file: cc_landscape/example1.yaml + - comment: > + Example 2: Minimum viable config requires ``account_name`` and + ``computer_title``. + file: cc_landscape/example2.yaml + - comment: > + Example 3: To install ``landscape-client`` from a PPA, specify + ``apt.sources``. + file: cc_landscape/example3.yaml + name: Landscape + title: Install and configure Landscape client diff --git a/doc/module-docs/cc_landscape/example1.yaml b/doc/module-docs/cc_landscape/example1.yaml new file mode 100644 index 000000000..ae4312cf2 --- /dev/null +++ b/doc/module-docs/cc_landscape/example1.yaml @@ -0,0 +1,12 @@ +#cloud-config +landscape: + client: + url: https://landscape.canonical.com/message-system + ping_url: http://landscape.canonical.com/ping + data_path: /var/lib/landscape/client + http_proxy: http://my.proxy.com/foobar + https_proxy: https://my.proxy.com/foobar + tags: server,cloud + computer_title: footitle + registration_key: fookey + account_name: fooaccount diff --git a/doc/module-docs/cc_landscape/example2.yaml b/doc/module-docs/cc_landscape/example2.yaml new file mode 100644 index 000000000..771c41fc0 --- /dev/null +++ b/doc/module-docs/cc_landscape/example2.yaml @@ -0,0 +1,5 @@ +#cloud-config +landscape: + client: + computer_title: kiosk 1 + account_name: Joe's Biz diff --git a/doc/module-docs/cc_landscape/example3.yaml b/doc/module-docs/cc_landscape/example3.yaml new file mode 100644 index 000000000..1b5a38ebf --- /dev/null +++ b/doc/module-docs/cc_landscape/example3.yaml @@ -0,0 +1,9 @@ +#cloud-config +apt: + sources: + trunk-testing-ppa: + source: ppa:landscape/self-hosted-beta +landscape: + client: + account_name: myaccount + computer_title: himom diff --git a/doc/module-docs/cc_locale/data.yaml b/doc/module-docs/cc_locale/data.yaml new file mode 100644 index 000000000..d5c0961ff --- /dev/null +++ b/doc/module-docs/cc_locale/data.yaml @@ -0,0 +1,13 @@ +cc_locale: + description: | + Configure the system locale and apply it system-wide. By default, use the + locale specified by the datasource. + examples: + - comment: | + Example 1: Set the locale to ``"ar_AE"`` + file: cc_locale/example1.yaml + - comment: | + Example 2: Set the locale to ``"fr_CA"`` in ``/etc/alternate_path/locale`` + file: cc_locale/example2.yaml + name: Locale + title: Set system locale diff --git a/doc/module-docs/cc_locale/example1.yaml b/doc/module-docs/cc_locale/example1.yaml new file mode 100644 index 000000000..81cfeaaec --- /dev/null +++ b/doc/module-docs/cc_locale/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +locale: ar_AE diff --git a/doc/module-docs/cc_locale/example2.yaml b/doc/module-docs/cc_locale/example2.yaml new file mode 100644 index 000000000..5b70be781 --- /dev/null +++ b/doc/module-docs/cc_locale/example2.yaml @@ -0,0 +1,3 @@ +#cloud-config +locale: fr_CA +locale_configfile: /etc/alternate_path/locale diff --git a/doc/module-docs/cc_lxd/data.yaml b/doc/module-docs/cc_lxd/data.yaml new file mode 100644 index 000000000..5ca35c575 --- /dev/null +++ b/doc/module-docs/cc_lxd/data.yaml @@ -0,0 +1,34 @@ +cc_lxd: + description: | + This module configures LXD with user-specified options using ``lxd init``. + + - If ``lxd`` is not present on the system but LXD configuration is provided, + then ``lxd`` will be installed. + - If the selected storage backend userspace utility is not installed, it + will be installed. + - If network bridge configuration is provided, then ``lxd-bridge`` will be + configured accordingly. + examples: + - comment: | + Example 1: Simplest working directory-backed LXD configuration. + file: cc_lxd/example1.yaml + - comment: | + Example 2: ``lxd-init`` showcasing cloud-init's LXD config options. + file: cc_lxd/example2.yaml + - comment: > + Example 3: For more complex non-interactive LXD configuration of + networks, storage pools, profiles, projects, clusters and core config, + ``lxd:preseed`` config will be passed as stdin to the command: + ``lxd init --preseed``. + + See the + `LXD non-interactive configuration `_ + or run ``lxd init --dump`` to see viable preseed YAML allowed. + + Preseed settings configuring the LXD daemon for HTTPS connections on + 192.168.1.1 port 9999, a nested profile which allows for LXD nesting on + containers and a limited project allowing for RBAC approach when defining + behavior for sub-projects. + file: cc_lxd/example3.yaml + name: LXD + title: Configure LXD with ``lxd init`` and (optionally) ``lxd-bridge`` diff --git a/doc/module-docs/cc_lxd/example1.yaml b/doc/module-docs/cc_lxd/example1.yaml new file mode 100644 index 000000000..3632555c0 --- /dev/null +++ b/doc/module-docs/cc_lxd/example1.yaml @@ -0,0 +1,4 @@ +#cloud-config +lxd: + init: + storage_backend: dir diff --git a/doc/module-docs/cc_lxd/example2.yaml b/doc/module-docs/cc_lxd/example2.yaml new file mode 100644 index 000000000..290705d45 --- /dev/null +++ b/doc/module-docs/cc_lxd/example2.yaml @@ -0,0 +1,22 @@ +#cloud-config +lxd: + init: + network_address: 0.0.0.0 + network_port: 8443 + storage_backend: zfs + storage_pool: datapool + storage_create_loop: 10 + bridge: + mode: new + mtu: 1500 + name: lxdbr0 + ipv4_address: 10.0.8.1 + ipv4_netmask: 24 + ipv4_dhcp_first: 10.0.8.2 + ipv4_dhcp_last: 10.0.8.3 + ipv4_dhcp_leases: 250 + ipv4_nat: true + ipv6_address: fd98:9e0:3744::1 + ipv6_netmask: 64 + ipv6_nat: true + domain: lxd diff --git a/doc/module-docs/cc_lxd/example3.yaml b/doc/module-docs/cc_lxd/example3.yaml new file mode 100644 index 000000000..ccbd73de7 --- /dev/null +++ b/doc/module-docs/cc_lxd/example3.yaml @@ -0,0 +1,62 @@ +#cloud-config +lxd: + preseed: | + config: + core.https_address: 192.168.1.1:9999 + networks: + - config: + ipv4.address: 10.42.42.1/24 + ipv4.nat: true + ipv6.address: fd42:4242:4242:4242::1/64 + ipv6.nat: true + description: "" + name: lxdbr0 + type: bridge + project: default + storage_pools: + - config: + size: 5GiB + source: /var/snap/lxd/common/lxd/disks/default.img + description: "" + name: default + driver: zfs + profiles: + - config: {} + description: Default LXD profile + devices: + eth0: + name: eth0 + network: lxdbr0 + type: nic + root: + path: / + pool: default + type: disk + name: default + - config: {} + security.nesting: true + devices: + eth0: + name: eth0 + network: lxdbr0 + type: nic + root: + path: / + pool: default + type: disk + name: nested + projects: + - config: + features.images: true + features.networks: true + features.profiles: true + features.storage.volumes: true + description: Default LXD project + name: default + - config: + features.images: false + features.networks: true + features.profiles: false + features.storage.volumes: false + description: Limited Access LXD project + name: limited diff --git a/doc/module-docs/cc_mcollective/data.yaml b/doc/module-docs/cc_mcollective/data.yaml new file mode 100644 index 000000000..cec844310 --- /dev/null +++ b/doc/module-docs/cc_mcollective/data.yaml @@ -0,0 +1,26 @@ +cc_mcollective: + description: | + This module installs, configures and starts MCollective. If the + ``mcollective`` key is present in config, then MCollective will be + installed and started. + + Configuration for ``mcollective`` can be specified in the ``conf`` key + under ``mcollective``. Each config value consists of a key-value pair and + will be written to ``/etc/mcollective/server.cfg``. The ``public-cert`` + and ``private-cert`` keys, if present in conf may be used to specify the + public and private certificates for MCollective. Their values will be + written to ``/etc/mcollective/ssl/server-public.pem`` and + ``/etc/mcollective/ssl/server-private.pem``. + + .. warning:: + The EC2 metadata service is a network service and thus is readable by + non-root users on the system (i.e., ``ec2metadata --user-data``). If + security is a concern, use ``include-once`` and SSL URLS. + examples: + - comment: > + Example 1: Provide server private and public key, and provide the + ``loglevel: debug`` and ``plugin.stomp.host: dbhost`` config settings in + ``/etc/mcollective/server.cfg:`` + file: cc_mcollective/example1.yaml + name: MCollective + title: Install, configure and start MCollective diff --git a/doc/module-docs/cc_mcollective/example1.yaml b/doc/module-docs/cc_mcollective/example1.yaml new file mode 100644 index 000000000..2557d7b9c --- /dev/null +++ b/doc/module-docs/cc_mcollective/example1.yaml @@ -0,0 +1,13 @@ +#cloud-config +mcollective: + conf: + loglevel: debug + plugin.stomp.host: dbhost + public-cert: | + -------BEGIN CERTIFICATE-------- + + -------END CERTIFICATE-------- + private-cert: | + -------BEGIN CERTIFICATE-------- + + -------END CERTIFICATE-------- diff --git a/doc/module-docs/cc_mounts/data.yaml b/doc/module-docs/cc_mounts/data.yaml new file mode 100644 index 000000000..751b301d5 --- /dev/null +++ b/doc/module-docs/cc_mounts/data.yaml @@ -0,0 +1,69 @@ +cc_mounts: + description: | + This module can add or remove mount points from ``/etc/fstab`` as well as + configure swap. The ``mounts`` config key takes a list of ``fstab`` entries + to add. Each entry is specified as a list of ``[ fs_spec, fs_file, + fs_vfstype, fs_mntops, fs-freq, fs_passno ]``. + + For more information on these options, consult the manual for + ``/etc/fstab``. When specifying the ``fs_spec``, if the device name starts + with one of ``xvd``, ``sd``, ``hd``, or ``vd``, the leading ``/dev`` may be + omitted. Any mounts that do not appear to either an attached block device + or network resource will be skipped with a log like "Ignoring nonexistent + mount ...". + + Cloud-init will attempt to add the following mount directives if available + and unconfigured in ``/etc/fstab``: + + .. code-block:: yaml + + mounts: + - ["ephemeral0", "/mnt", "auto", "defaults,nofail,x-systemd.after=cloud-init.service", "0", "2"] + - ["swap", "none", "swap", "sw", "0", "0"] + + In order to remove a previously-listed mount, an entry can be added to the + ``mounts`` list containing ``fs_spec`` for the device to be removed but no + mount point (i.e. ``[ swap ]`` or ``[ swap, null ]``). + + The ``mount_default_fields`` config key allows default values to be + specified for the fields in a ``mounts`` entry that are not specified, + aside from the ``fs_spec`` and the ``fs_file`` fields. If specified, this + must be a list containing 6 values. It defaults to: + + .. code-block:: yaml + + mount_default_fields: [none, none, "auto", "defaults,nofail,x-systemd.after=cloud-init.service", "0", "2"] + + Non-systemd init systems will vary in ``mount_default_fields``. + + Swap files can be configured by setting the path to the swap file to create + with ``filename``, the size of the swap file with ``size``, maximum size + of the swap file if using an ``size: auto`` with ``maxsize``. By default, + no swap file is created. + + .. note:: + If multiple mounts are specified, where a subsequent mount's mount point + is inside of a previously-declared mount's mount point, (i.e. the 1st + mount has a mount point of ``/abc`` and the 2nd mount has a mount point + of ``/abc/def``) then this will not work as expected -- ``cc_mounts`` + first creates the directories for all the mount points **before** it + starts to perform any mounts and so the sub-mount point directory will + not be created correctly inside the parent mount point. + + For systems using ``util-linux``'s ``mount`` program, this issue can be + worked around by specifying ``X-mount.mkdir`` as part of a ``fs_mntops`` + value for the subsequent mount entry. + examples: + - comment: > + Example 1: Mount ``ephemeral0`` with ``noexec`` flag, ``/dev/sdc`` with + ``mount_default_fields``, and ``/dev/xvdh`` with custom ``fs_passno "0"`` + to avoid ``fsck`` on the mount. + + Also provide an automatically-sized swap with a max size of 10485760 bytes. + file: cc_mounts/example1.yaml + - comment: > + Example 2: Create a 2 GB swap file at ``/swapfile`` using human-readable + values. + file: cc_mounts/example2.yaml + name: Mounts + title: Configure mount points and swap files diff --git a/doc/module-docs/cc_mounts/example1.yaml b/doc/module-docs/cc_mounts/example1.yaml new file mode 100644 index 000000000..31aa102a4 --- /dev/null +++ b/doc/module-docs/cc_mounts/example1.yaml @@ -0,0 +1,10 @@ +#cloud-config +mounts: +- [ /dev/ephemeral0, /mnt, auto, "defaults,noexec" ] +- [ sdc, /opt/data ] +- [ xvdh, /opt/data, auto, "defaults,nofail", "0", "0" ] +mount_default_fields: [None, None, auto, "defaults,nofail", "0", "2"] +swap: + filename: /my/swapfile + size: auto + maxsize: 10485760 diff --git a/doc/module-docs/cc_mounts/example2.yaml b/doc/module-docs/cc_mounts/example2.yaml new file mode 100644 index 000000000..b1eedfaf6 --- /dev/null +++ b/doc/module-docs/cc_mounts/example2.yaml @@ -0,0 +1,5 @@ +#cloud-config +swap: + filename: /swapfile + size: 2G + maxsize: 2G diff --git a/doc/module-docs/cc_ntp/data.yaml b/doc/module-docs/cc_ntp/data.yaml new file mode 100644 index 000000000..0ce461482 --- /dev/null +++ b/doc/module-docs/cc_ntp/data.yaml @@ -0,0 +1,26 @@ +cc_ntp: + description: | + Handle Network Time Protocol (NTP) configuration. If ``ntp`` is not + installed on the system and NTP configuration is specified, ``ntp`` will + be installed. + + If there is a default NTP config file in the image or one is present in the + distro's ``ntp`` package, it will be copied to a file with ``.dist`` + appended to the filename before any changes are made. + + A list of NTP pools and NTP servers can be provided under the ``ntp`` + config key. + + If no NTP ``servers`` or ``pools`` are provided, 4 pools will be used + in the format: + + ``{0-3}.{distro}.pool.ntp.org`` + examples: + - comment: | + Example 1: Override NTP with chrony configuration on Ubuntu. + file: cc_ntp/example1.yaml + - comment: | + Example 2: Provide a custom NTP client configuration. + file: cc_ntp/example2.yaml + name: NTP + title: Enable and configure NTP diff --git a/doc/module-docs/cc_ntp/example1.yaml b/doc/module-docs/cc_ntp/example1.yaml new file mode 100644 index 000000000..0102ad337 --- /dev/null +++ b/doc/module-docs/cc_ntp/example1.yaml @@ -0,0 +1,4 @@ +#cloud-config +ntp: + enabled: true + ntp_client: chrony # Uses cloud-init default chrony configuration diff --git a/doc/module-docs/cc_ntp/example2.yaml b/doc/module-docs/cc_ntp/example2.yaml new file mode 100644 index 000000000..78a721dab --- /dev/null +++ b/doc/module-docs/cc_ntp/example2.yaml @@ -0,0 +1,40 @@ +#cloud-config +ntp: + enabled: true + ntp_client: myntpclient + config: + confpath: /etc/myntpclient/myntpclient.conf + check_exe: myntpclientd + packages: + - myntpclient + service_name: myntpclient + template: | + ## template:jinja + # My NTP Client config + {% if pools -%}# pools{% endif %} + {% for pool in pools -%} + pool {{pool}} iburst + {% endfor %} + {%- if servers %}# servers + {% endif %} + {% for server in servers -%} + server {{server}} iburst + {% endfor %} + {% if peers -%}# peers{% endif %} + {% for peer in peers -%} + peer {{peer}} + {% endfor %} + {% if allow -%}# allow{% endif %} + {% for cidr in allow -%} + allow {{cidr}} + {% endfor %} + pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] + servers: + - ntp.server.local + - ntp.ubuntu.com + - 192.168.23.2 + allow: + - 192.168.23.0/32 + peers: + - km001 + - km002 diff --git a/doc/module-docs/cc_package_update_upgrade_install/data.yaml b/doc/module-docs/cc_package_update_upgrade_install/data.yaml new file mode 100644 index 000000000..121720ab1 --- /dev/null +++ b/doc/module-docs/cc_package_update_upgrade_install/data.yaml @@ -0,0 +1,13 @@ +cc_package_update_upgrade_install: + description: | + This module allows packages to be updated, upgraded or installed during + boot. If any packages are to be installed or an upgrade is to be performed + then the package cache will be updated first. If a package installation or + upgrade requires a reboot, then a reboot can be performed if + ``package_reboot_if_required`` is specified. + examples: + - comment: | + Example 1: + file: cc_package_update_upgrade_install/example1.yaml + name: Package Update Upgrade Install + title: Update, upgrade, and install packages diff --git a/doc/module-docs/cc_package_update_upgrade_install/example1.yaml b/doc/module-docs/cc_package_update_upgrade_install/example1.yaml new file mode 100644 index 000000000..9f6adba91 --- /dev/null +++ b/doc/module-docs/cc_package_update_upgrade_install/example1.yaml @@ -0,0 +1,13 @@ +#cloud-config +package_reboot_if_required: true +package_update: true +package_upgrade: true +packages: +- pwgen +- pastebinit +- [libpython3.8, 3.8.10-0ubuntu1~20.04.2] +- snap: + - certbot + - [juju, --edge] + - [lxd, --channel=5.15/stable] +- apt: [mg] diff --git a/doc/module-docs/cc_phone_home/data.yaml b/doc/module-docs/cc_phone_home/data.yaml new file mode 100644 index 000000000..f5af3f6bd --- /dev/null +++ b/doc/module-docs/cc_phone_home/data.yaml @@ -0,0 +1,40 @@ +cc_phone_home: + description: | + This module can be used to post data to a remote host after boot is + complete. If the post URL contains the string ``$INSTANCE_ID`` it will be + replaced with the ID of the current instance. + + Either all data can be posted, or a list of keys to post. + + Available keys are: + + - ``pub_key_rsa`` + - ``pub_key_ecdsa`` + - ``pub_key_ed25519`` + - ``instance_id`` + - ``hostname`` + - ``fdqn`` + + Data is sent as ``x-www-form-urlencoded`` arguments. + + **Example HTTP POST**: + + .. code-block:: http + + POST / HTTP/1.1 + Content-Length: 1337 + User-Agent: Cloud-Init/21.4 + Accept-Encoding: gzip, deflate + Accept: */* + Content-Type: application/x-www-form-urlencoded + + pub_key_rsa=rsa_contents&pub_key_ecdsa=ecdsa_contents&pub_key_ed25519=ed25519_contents&instance_id=i-87018aed&hostname=myhost&fqdn=myhost.internal + examples: + - comment: | + Example 1: + file: cc_phone_home/example1.yaml + - comment: | + Example 2: + file: cc_phone_home/example2.yaml + name: Phone Home + title: Post data to URL diff --git a/doc/module-docs/cc_phone_home/example1.yaml b/doc/module-docs/cc_phone_home/example1.yaml new file mode 100644 index 000000000..1278f497e --- /dev/null +++ b/doc/module-docs/cc_phone_home/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +phone_home: {post: all, url: 'http://example.com/$INSTANCE_ID/'} diff --git a/doc/module-docs/cc_phone_home/example2.yaml b/doc/module-docs/cc_phone_home/example2.yaml new file mode 100644 index 000000000..fe9ec638f --- /dev/null +++ b/doc/module-docs/cc_phone_home/example2.yaml @@ -0,0 +1,5 @@ +#cloud-config +phone_home: + post: [pub_key_rsa, pub_key_ecdsa, pub_key_ed25519, instance_id, hostname, fqdn] + tries: 5 + url: http://example.com/$INSTANCE_ID/ diff --git a/doc/module-docs/cc_power_state_change/data.yaml b/doc/module-docs/cc_power_state_change/data.yaml new file mode 100644 index 000000000..2aed5081b --- /dev/null +++ b/doc/module-docs/cc_power_state_change/data.yaml @@ -0,0 +1,28 @@ +cc_power_state_change: + description: | + This module handles shutdown/reboot after all config modules have been + run. By default it will take no action, and the system will keep running + unless a package installation/upgrade requires a system reboot (e.g. + installing a new kernel) and ``package_reboot_if_required`` is ``true``. + + Using this module ensures that cloud-init is entirely finished with modules + that would be executed. An example to distinguish delay from timeout: + + If you delay 5 (5 minutes) and have a timeout of 120 (2 minutes), the max + time until shutdown will be 7 minutes, though it could be as soon as 5 + minutes. Cloud-init will invoke 'shutdown +5' after the process finishes, + or when 'timeout' seconds have elapsed. + + .. note:: + With Alpine Linux any message value specified is ignored as Alpine's + ``halt``, ``poweroff``, and ``reboot`` commands do not support + broadcasting a message. + examples: + - comment: | + Example 1: + file: cc_power_state_change/example1.yaml + - comment: | + Example 2: + file: cc_power_state_change/example2.yaml + name: Power State Change + title: Change power state diff --git a/doc/module-docs/cc_power_state_change/example1.yaml b/doc/module-docs/cc_power_state_change/example1.yaml new file mode 100644 index 000000000..a45eab43b --- /dev/null +++ b/doc/module-docs/cc_power_state_change/example1.yaml @@ -0,0 +1,7 @@ +#cloud-config +power_state: + delay: now + mode: poweroff + message: Powering off + timeout: 2 + condition: true diff --git a/doc/module-docs/cc_power_state_change/example2.yaml b/doc/module-docs/cc_power_state_change/example2.yaml new file mode 100644 index 000000000..af0ea04e5 --- /dev/null +++ b/doc/module-docs/cc_power_state_change/example2.yaml @@ -0,0 +1,6 @@ +#cloud-config +power_state: + delay: 30 + mode: reboot + message: Rebooting machine + condition: test -f /var/tmp/reboot_me diff --git a/doc/module-docs/cc_puppet/data.yaml b/doc/module-docs/cc_puppet/data.yaml new file mode 100644 index 000000000..47c5bb9eb --- /dev/null +++ b/doc/module-docs/cc_puppet/data.yaml @@ -0,0 +1,28 @@ +cc_puppet: + description: | + This module handles Puppet installation and configuration. If the + ``puppet`` key does not exist in global configuration, no action will be + taken. + + If a config entry for ``puppet`` is present, then by default the latest + version of Puppet will be installed. If the ``puppet`` config key exists in + the config archive, this module will attempt to start puppet even if no + installation was performed. + + The module also provides keys for configuring the new Puppet 4 paths and + installing the ``puppet`` package from the + `puppetlabs repositories `_. + + The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and + ``csr_attributes_path``. If unset, their values will default to ones that + work with Puppet 3.X, and with distributions that ship modified Puppet 4.X, + that use the old paths. + examples: + - comment: | + Example 1: + file: cc_puppet/example1.yaml + - comment: | + Example 2: + file: cc_puppet/example2.yaml + name: Puppet + title: Install, configure and start Puppet diff --git a/doc/module-docs/cc_puppet/example1.yaml b/doc/module-docs/cc_puppet/example1.yaml new file mode 100644 index 000000000..14bb40b42 --- /dev/null +++ b/doc/module-docs/cc_puppet/example1.yaml @@ -0,0 +1,38 @@ +#cloud-config +puppet: + install: true + version: "7.7.0" + install_type: "aio" + collection: "puppet7" + aio_install_url: 'https://git.io/JBhoQ' + cleanup: true + conf_file: "/etc/puppet/puppet.conf" + ssl_dir: "/var/lib/puppet/ssl" + csr_attributes_path: "/etc/puppet/csr_attributes.yaml" + exec: true + exec_args: ['--test'] + conf: + agent: + server: "puppetserver.example.org" + certname: "%i.%f" + ca_cert: | + -----BEGIN CERTIFICATE----- + MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe + Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf + MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc + b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu + 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA + qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv + T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd + BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG + SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf + +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb + hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d + -----END CERTIFICATE----- + csr_attributes: + custom_attributes: + 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 + extension_requests: + pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E + pp_image_name: my_ami_image + pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 diff --git a/doc/module-docs/cc_puppet/example2.yaml b/doc/module-docs/cc_puppet/example2.yaml new file mode 100644 index 000000000..806c9177a --- /dev/null +++ b/doc/module-docs/cc_puppet/example2.yaml @@ -0,0 +1,5 @@ +#cloud-config +puppet: + install_type: "packages" + package_name: "puppet" + exec: false diff --git a/doc/module-docs/cc_resizefs/data.yaml b/doc/module-docs/cc_resizefs/data.yaml new file mode 100644 index 000000000..909778c9a --- /dev/null +++ b/doc/module-docs/cc_resizefs/data.yaml @@ -0,0 +1,25 @@ +cc_resizefs: + description: | + Resize a filesystem to use all available space on partition. This module is + useful along with ``cc_growpart`` and will ensure that if the root + partition has been resized, the root filesystem will be resized along with + it. + + By default, ``cc_resizefs`` will resize the root partition and will block + the boot process while the ``resize`` command is running. + + Optionally, the resize operation can be performed in the background + while cloud-init continues running modules. This can be enabled by setting + ``resize_rootfs`` to ``noblock``. + + This module can be disabled altogether by setting ``resize_rootfs`` to + ``false``. + examples: + - comment: | + Example 1: Disable root filesystem resize operation. + file: cc_resizefs/example1.yaml + - comment: | + Example 2: Runs resize operation in the background. + file: cc_resizefs/example2.yaml + name: Resizefs + title: Resize filesystem diff --git a/doc/module-docs/cc_resizefs/example1.yaml b/doc/module-docs/cc_resizefs/example1.yaml new file mode 100644 index 000000000..92428b58e --- /dev/null +++ b/doc/module-docs/cc_resizefs/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +resize_rootfs: false diff --git a/doc/module-docs/cc_resizefs/example2.yaml b/doc/module-docs/cc_resizefs/example2.yaml new file mode 100644 index 000000000..dca563500 --- /dev/null +++ b/doc/module-docs/cc_resizefs/example2.yaml @@ -0,0 +1,2 @@ +#cloud-config +resize_rootfs: noblock diff --git a/doc/module-docs/cc_resolv_conf/data.yaml b/doc/module-docs/cc_resolv_conf/data.yaml new file mode 100644 index 000000000..f775c77ed --- /dev/null +++ b/doc/module-docs/cc_resolv_conf/data.yaml @@ -0,0 +1,33 @@ +cc_resolv_conf: + description: | + You should not use this module unless manually editing + :file:`/etc/resolv.conf` is the correct way to manage nameserver + information on your operating system. + + Many distros have moved away from manually editing ``resolv.conf`` so + please verify that this is the preferred nameserver management method for + your distro before using this module. Note that using :ref:`network_config` + is preferred, rather than using this module, when possible. + + This module is intended to manage ``resolv.conf`` in environments where + early configuration of ``resolv.conf`` is necessary for further + bootstrapping and/or where configuration management such as Puppet or Chef + own DNS configuration. + + When using a :ref:`datasource_config_drive` and a RHEL-like system, + ``resolv.conf`` will also be managed automatically due to the available + information provided for DNS servers in the :ref:`network_config_v2` + format. For those who wish to have different settings, use this module. + + For the ``resolv_conf`` section to be applied, ``manage_resolv_conf`` must + be set ``true``. + + .. note:: + For Red Hat with ``sysconfig``, be sure to set ``PEERDNS=no`` for all + DHCP-enabled NICs. + examples: + - comment: | + Example 1: + file: cc_resolv_conf/example1.yaml + name: Resolv Conf + title: Configure ``resolv.conf`` diff --git a/doc/module-docs/cc_resolv_conf/example1.yaml b/doc/module-docs/cc_resolv_conf/example1.yaml new file mode 100644 index 000000000..3c990cd11 --- /dev/null +++ b/doc/module-docs/cc_resolv_conf/example1.yaml @@ -0,0 +1,8 @@ +#cloud-config +manage_resolv_conf: true +resolv_conf: + domain: example.com + nameservers: [8.8.8.8, 8.8.4.4] + options: {rotate: true, timeout: 1} + searchdomains: [foo.example.com, bar.example.com] + sortlist: [10.0.0.1/255, 10.0.0.2] diff --git a/doc/module-docs/cc_rh_subscription/data.yaml b/doc/module-docs/cc_rh_subscription/data.yaml new file mode 100644 index 000000000..e87a95afb --- /dev/null +++ b/doc/module-docs/cc_rh_subscription/data.yaml @@ -0,0 +1,25 @@ +cc_rh_subscription: + description: | + Register a Red Hat system, either by username and password **or** by + activation and org. + + Following a successful registration, you can: + + - auto-attach subscriptions + - set the service level + - add subscriptions based on pool ID + - enable/disable yum repositories based on repo ID + - alter the ``rhsm_baseurl`` and ``server-hostname`` in + ``/etc/rhsm/rhs.conf``. + examples: + - comment: | + Example 1: + file: cc_rh_subscription/example1.yaml + - comment: | + Example 2: + file: cc_rh_subscription/example2.yaml + - comment: | + Example 3: + file: cc_rh_subscription/example3.yaml + name: Red Hat Subscription + title: Register Red Hat Enterprise Linux-based system diff --git a/doc/module-docs/cc_rh_subscription/example1.yaml b/doc/module-docs/cc_rh_subscription/example1.yaml new file mode 100644 index 000000000..bdb44887c --- /dev/null +++ b/doc/module-docs/cc_rh_subscription/example1.yaml @@ -0,0 +1,5 @@ +#cloud-config +rh_subscription: + username: joe@foo.bar + ## Quote your password if it has symbols to be safe + password: '1234abcd' diff --git a/doc/module-docs/cc_rh_subscription/example2.yaml b/doc/module-docs/cc_rh_subscription/example2.yaml new file mode 100644 index 000000000..72328f938 --- /dev/null +++ b/doc/module-docs/cc_rh_subscription/example2.yaml @@ -0,0 +1,4 @@ +#cloud-config +rh_subscription: + activation-key: foobar + org: "ABC" diff --git a/doc/module-docs/cc_rh_subscription/example3.yaml b/doc/module-docs/cc_rh_subscription/example3.yaml new file mode 100644 index 000000000..a8b741ccd --- /dev/null +++ b/doc/module-docs/cc_rh_subscription/example3.yaml @@ -0,0 +1,19 @@ +#cloud-config +rh_subscription: + activation-key: foobar + org: 12345 + auto-attach: true + service-level: self-support + add-pool: + - 1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a + - 2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b + enable-repo: + - repo-id-to-enable + - other-repo-id-to-enable + disable-repo: + - repo-id-to-disable + - other-repo-id-to-disable + # Alter the baseurl in /etc/rhsm/rhsm.conf + rhsm-baseurl: http://url + # Alter the server hostname in /etc/rhsm/rhsm.conf + server-hostname: foo.bar.com diff --git a/doc/module-docs/cc_rsyslog/data.yaml b/doc/module-docs/cc_rsyslog/data.yaml new file mode 100644 index 000000000..2c045499d --- /dev/null +++ b/doc/module-docs/cc_rsyslog/data.yaml @@ -0,0 +1,28 @@ +cc_rsyslog: + description: | + This module configures remote system logging using rsyslog. + + Configuration for remote servers can be specified in ``configs``, but for + convenience it can be specified as key-value pairs in ``remotes``. + + This module can install rsyslog if not already present on the system using + the ``install_rsyslog``, ``packages``, and ``check_exe`` options. + Installation may not work on systems where this module runs before + networking is up. + + .. note:: + On BSD, cloud-init will attempt to disable and stop the base system + syslogd. This may fail on a first run. We recommend creating images + with ``service syslogd disable``. + examples: + - comment: | + Example 1: + file: cc_rsyslog/example1.yaml + - comment: | + Example 2: + file: cc_rsyslog/example2.yaml + - comment: | + Example 3: Default (no) configuration with package installation on FreeBSD. + file: cc_rsyslog/example3.yaml + name: Rsyslog + title: Configure system logging via rsyslog diff --git a/doc/module-docs/cc_rsyslog/example1.yaml b/doc/module-docs/cc_rsyslog/example1.yaml new file mode 100644 index 000000000..8abbc1eeb --- /dev/null +++ b/doc/module-docs/cc_rsyslog/example1.yaml @@ -0,0 +1,4 @@ +#cloud-config +rsyslog: + remotes: {juju: 10.0.4.1, maas: 192.168.1.1} + service_reload_command: auto diff --git a/doc/module-docs/cc_rsyslog/example2.yaml b/doc/module-docs/cc_rsyslog/example2.yaml new file mode 100644 index 000000000..a33c85486 --- /dev/null +++ b/doc/module-docs/cc_rsyslog/example2.yaml @@ -0,0 +1,12 @@ +#cloud-config +rsyslog: + config_dir: /opt/etc/rsyslog.d + config_filename: 99-late-cloud-config.conf + configs: + - '*.* @@192.158.1.1' + - {content: '*.* @@192.0.2.1:10514', filename: 01-example.conf} + - {content: '*.* @@syslogd.example.com + + '} + remotes: {juju: 10.0.4.1, maas: 192.168.1.1} + service_reload_command: [your, syslog, restart, command] diff --git a/doc/module-docs/cc_rsyslog/example3.yaml b/doc/module-docs/cc_rsyslog/example3.yaml new file mode 100644 index 000000000..bf21f8bd0 --- /dev/null +++ b/doc/module-docs/cc_rsyslog/example3.yaml @@ -0,0 +1,6 @@ +#cloud-config +rsyslog: + check_exe: rsyslogd + config_dir: /usr/local/etc/rsyslog.d + install_rsyslog: true + packages: [rsyslogd] diff --git a/doc/module-docs/cc_runcmd/data.yaml b/doc/module-docs/cc_runcmd/data.yaml new file mode 100644 index 000000000..54915fd73 --- /dev/null +++ b/doc/module-docs/cc_runcmd/data.yaml @@ -0,0 +1,27 @@ +cc_runcmd: + description: | + Run arbitrary commands at a ``rc.local``-like time-frame with output to the + console. Each item can be either a list or a string. The item type affects + how it is executed: + + - If the item is a string, it will be interpreted by ``sh``. + - If the item is a list, the items will be executed as if passed to + ``execve(3)`` (with the first argument as the command). + + The ``runcmd`` module only writes the script to be run later. The module + that actually runs the script is ``scripts_user`` in the + :ref:`Final boot stage `. + + .. note:: + All commands must be proper YAML, so you must quote any characters YAML + would eat (":" can be problematic). + + .. note:: + When writing files, do not use ``/tmp`` dir as it races with + ``systemd-tmpfiles-clean`` (LP: #1707222). Use ``/run/somedir`` instead. + examples: + - comment: | + Example 1: + file: cc_runcmd/example1.yaml + name: Runcmd + title: Run arbitrary commands diff --git a/doc/module-docs/cc_runcmd/example1.yaml b/doc/module-docs/cc_runcmd/example1.yaml new file mode 100644 index 000000000..03812f926 --- /dev/null +++ b/doc/module-docs/cc_runcmd/example1.yaml @@ -0,0 +1,7 @@ +#cloud-config +runcmd: +- [ls, -l, /] +- [sh, -xc, 'echo $(date) '': hello world!'''] +- [sh, -c, echo "=========hello world'========="] +- ls -l /root +- [wget, 'http://example.org', -O, /tmp/index.html] diff --git a/doc/module-docs/cc_salt_minion/data.yaml b/doc/module-docs/cc_salt_minion/data.yaml new file mode 100644 index 000000000..d0003c2f7 --- /dev/null +++ b/doc/module-docs/cc_salt_minion/data.yaml @@ -0,0 +1,24 @@ +cc_salt_minion: + description: | + This module installs, configures and starts Salt Minion. If the + ``salt_minion`` key is present in the config parts, then Salt Minion will + be installed and started. + + Configuration for Salt Minion can be specified in the ``conf`` key under + ``salt_minion``. Any config values present there will be assigned in + ``/etc/salt/minion``. The public and private keys to use for Salt Minion + can be specified with ``public_key`` and ``private_key`` respectively. + + If you have a custom package name, service name, or config directory, you + can specify them with ``pkg_name``, ``service_name``, and ``config_dir`` + respectively. + + Salt keys can be manually generated by ``salt-key --gen-keys=GEN_KEYS``, + where ``GEN_KEYS`` is the name of the keypair, e.g. ''minion''. The keypair + will be copied to ``/etc/salt/pki`` on the Minion instance. + examples: + - comment: | + Example 1: + file: cc_salt_minion/example1.yaml + name: Salt Minion + title: Set up and run Salt Minion diff --git a/doc/module-docs/cc_salt_minion/example1.yaml b/doc/module-docs/cc_salt_minion/example1.yaml new file mode 100644 index 000000000..f4bf6c961 --- /dev/null +++ b/doc/module-docs/cc_salt_minion/example1.yaml @@ -0,0 +1,27 @@ +#cloud-config +salt_minion: + conf: + file_client: local + fileserver_backend: [gitfs] + gitfs_remotes: ['https://github.com/_user_/_repo_.git'] + master: salt.example.com + config_dir: /etc/salt + grains: + role: [web] + pkg_name: salt-minion + pki_dir: /etc/salt/pki/minion + private_key: '------BEGIN PRIVATE KEY------ + + + + ------END PRIVATE KEY------- + + ' + public_key: '------BEGIN PUBLIC KEY------- + + + + ------END PUBLIC KEY------- + + ' + service_name: salt-minion diff --git a/doc/module-docs/cc_scripts_per_boot/data.yaml b/doc/module-docs/cc_scripts_per_boot/data.yaml new file mode 100644 index 000000000..16ed96fbc --- /dev/null +++ b/doc/module-docs/cc_scripts_per_boot/data.yaml @@ -0,0 +1,8 @@ +cc_scripts_per_boot: + description: | + Any scripts in the ``scripts/per-boot`` directory on the datasource will + be run every time the system boots. Scripts will be run in alphabetical + order. This module does not accept any config keys. + examples: [] + name: Scripts Per Boot + title: Run per-boot scripts diff --git a/doc/module-docs/cc_scripts_per_instance/data.yaml b/doc/module-docs/cc_scripts_per_instance/data.yaml new file mode 100644 index 000000000..8d3de0663 --- /dev/null +++ b/doc/module-docs/cc_scripts_per_instance/data.yaml @@ -0,0 +1,11 @@ +cc_scripts_per_instance: + description: | + Any scripts in the ``scripts/per-instance`` directory on the datasource + will be run when a new instance is first booted. Scripts will be run in + alphabetical order. This module does not accept any config keys. + + Some cloud platforms change ``instance-id`` if a significant change was + made to the system. As a result, per-instance scripts will run again. + examples: [] + name: Scripts Per Instance + title: Run per-instance scripts diff --git a/doc/module-docs/cc_scripts_per_once/data.yaml b/doc/module-docs/cc_scripts_per_once/data.yaml new file mode 100644 index 000000000..6f9f2ad57 --- /dev/null +++ b/doc/module-docs/cc_scripts_per_once/data.yaml @@ -0,0 +1,10 @@ +cc_scripts_per_once: + description: | + Any scripts in the ``scripts/per-once`` directory on the datasource will + be run only once. Changes to the instance will not force a re-run. The + only way to re-run these scripts is to run the ``clean`` subcommand and + reboot. Scripts will be run in alphabetical order. This module does not + accept any config keys. + examples: [] + name: Scripts Per Once + title: Run one-time scripts diff --git a/doc/module-docs/cc_scripts_user/data.yaml b/doc/module-docs/cc_scripts_user/data.yaml new file mode 100644 index 000000000..120cc2e07 --- /dev/null +++ b/doc/module-docs/cc_scripts_user/data.yaml @@ -0,0 +1,10 @@ +cc_scripts_user: + description: | + This module runs all user scripts present in the ``scripts`` directory in + the instance configuration. Any cloud-config parts with a ``#!`` will be + treated as a script and run. Scripts specified as cloud-config parts will + be run in the order they are specified in the configuration. This module + does not accept any config keys. + examples: [] + name: Scripts User + title: Run user scripts diff --git a/doc/module-docs/cc_scripts_vendor/data.yaml b/doc/module-docs/cc_scripts_vendor/data.yaml new file mode 100644 index 000000000..51ab8337c --- /dev/null +++ b/doc/module-docs/cc_scripts_vendor/data.yaml @@ -0,0 +1,19 @@ +cc_scripts_vendor: + description: | + On select Datasources, vendors have a channel for the consumption of all + supported user data types via a special channel called vendor data. Any + scripts in the ``scripts/vendor`` directory in the datasource will be run + when a new instance is first booted. Scripts will be run in alphabetical + order. This module allows control over the execution of vendor data. + examples: + - comment: | + Example 1: + file: cc_scripts_vendor/example1.yaml + - comment: | + Example 2: + file: cc_scripts_vendor/example2.yaml + - comment: | + Example 3: Vendor data will not be processed. + file: cc_scripts_vendor/example3.yaml + name: Scripts Vendor + title: Run vendor scripts diff --git a/doc/module-docs/cc_scripts_vendor/example1.yaml b/doc/module-docs/cc_scripts_vendor/example1.yaml new file mode 100644 index 000000000..a6e0d7265 --- /dev/null +++ b/doc/module-docs/cc_scripts_vendor/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +vendor_data: {enabled: true, prefix: /usr/bin/ltrace} diff --git a/doc/module-docs/cc_scripts_vendor/example2.yaml b/doc/module-docs/cc_scripts_vendor/example2.yaml new file mode 100644 index 000000000..342739602 --- /dev/null +++ b/doc/module-docs/cc_scripts_vendor/example2.yaml @@ -0,0 +1,4 @@ +#cloud-config +vendor_data: + enabled: true + prefix: [timeout, 30] diff --git a/doc/module-docs/cc_scripts_vendor/example3.yaml b/doc/module-docs/cc_scripts_vendor/example3.yaml new file mode 100644 index 000000000..97651375b --- /dev/null +++ b/doc/module-docs/cc_scripts_vendor/example3.yaml @@ -0,0 +1,2 @@ +#cloud-config +vendor_data: {enabled: false} diff --git a/doc/module-docs/cc_seed_random/data.yaml b/doc/module-docs/cc_seed_random/data.yaml new file mode 100644 index 000000000..f16383ce9 --- /dev/null +++ b/doc/module-docs/cc_seed_random/data.yaml @@ -0,0 +1,27 @@ +cc_seed_random: + description: | + All cloud instances started from the same image will produce similar data + when they are first booted as they are all starting with the same + seed for the kernel's entropy keyring. To avoid this, random seed data can + be provided to the instance, either as a string or by specifying a command + to run to generate the data. + + Configuration for this module is under the ``random_seed`` config key. If + the cloud provides its own random seed data, it will be appended to + ``data`` before it is written to ``file``. + + If the ``command`` key is specified, the given command will be executed. + This will happen after ``file`` has been populated. That command's + environment will contain the value of the ``file`` key as + ``RANDOM_SEED_FILE``. If a command is specified that cannot be run, no + error will be reported unless ``command_required`` is set to ``true``. + examples: + - comment: | + Example 1: + file: cc_seed_random/example1.yaml + - comment: > + Example 2: Use ``pollinate`` to gather data from a remote entropy + server and write it to ``/dev/urandom``: + file: cc_seed_random/example2.yaml + name: Seed Random + title: Provide random seed data diff --git a/doc/module-docs/cc_seed_random/example1.yaml b/doc/module-docs/cc_seed_random/example1.yaml new file mode 100644 index 000000000..41c54e5a0 --- /dev/null +++ b/doc/module-docs/cc_seed_random/example1.yaml @@ -0,0 +1,7 @@ +#cloud-config +random_seed: + command: [sh, -c, dd if=/dev/urandom of=$RANDOM_SEED_FILE] + command_required: true + data: my random string + encoding: raw + file: /dev/urandom diff --git a/doc/module-docs/cc_seed_random/example2.yaml b/doc/module-docs/cc_seed_random/example2.yaml new file mode 100644 index 000000000..9211c690d --- /dev/null +++ b/doc/module-docs/cc_seed_random/example2.yaml @@ -0,0 +1,5 @@ +#cloud-config +random_seed: + command: [pollinate, '--server=http://local.pollinate.server'] + command_required: true + file: /dev/urandom diff --git a/doc/module-docs/cc_set_hostname/data.yaml b/doc/module-docs/cc_set_hostname/data.yaml new file mode 100644 index 000000000..8e25af6e1 --- /dev/null +++ b/doc/module-docs/cc_set_hostname/data.yaml @@ -0,0 +1,41 @@ +cc_set_hostname: + description: | + This module handles setting the system hostname and fully qualified domain + name (FQDN). If ``preserve_hostname`` is set, then the hostname will not be + altered. + + A hostname and FQDN can be provided by specifying a full domain name under + the ``fqdn`` key. Alternatively, a hostname can be specified using the + ``hostname`` key, and the FQDN of the cloud will be used. If a FQDN is + specified with the ``hostname`` key, it will be handled properly, although + it is better to use the ``fqdn`` config key. If both ``fqdn`` and + ``hostname`` are set, then ``prefer_fqdn_over_hostname`` will force use of + FQDN in all distros when true, and when false it will force the short + hostname. Otherwise, the hostname to use is distro-dependent. + + .. note:: + We strip a trailing . from the FQDN, if it is present. This causes problems + with a lot of tools, if it is left in place. + + This module will run in the init-local stage before networking is + configured if the hostname is set by metadata or user data on the local + system. + + This will occur on datasources like NoCloud and OVF where metadata and user + data are available locally. This ensures that the desired hostname is + applied before any DHCP requests are performed on these platforms where + dynamic DNS is based on initial hostname. + examples: + - comment: | + Example 1: + file: cc_set_hostname/example1.yaml + - comment: | + Example 2: + file: cc_set_hostname/example2.yaml + - comment: > + Example 3: On a machine without an ``/etc/hostname`` file, don't create + it. In most clouds, this will result in a DHCP-configured hostname + provided by the cloud. + file: cc_set_hostname/example3.yaml + name: Set Hostname + title: Set hostname and FQDN diff --git a/doc/module-docs/cc_set_hostname/example1.yaml b/doc/module-docs/cc_set_hostname/example1.yaml new file mode 100644 index 000000000..6edec76ad --- /dev/null +++ b/doc/module-docs/cc_set_hostname/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +preserve_hostname: true diff --git a/doc/module-docs/cc_set_hostname/example2.yaml b/doc/module-docs/cc_set_hostname/example2.yaml new file mode 100644 index 000000000..1bc1e2e7e --- /dev/null +++ b/doc/module-docs/cc_set_hostname/example2.yaml @@ -0,0 +1,5 @@ +#cloud-config +hostname: myhost +create_hostname_file: true +fqdn: myhost.example.com +prefer_fqdn_over_hostname: true diff --git a/doc/module-docs/cc_set_hostname/example3.yaml b/doc/module-docs/cc_set_hostname/example3.yaml new file mode 100644 index 000000000..785e6167d --- /dev/null +++ b/doc/module-docs/cc_set_hostname/example3.yaml @@ -0,0 +1,2 @@ +#cloud-config +create_hostname_file: false diff --git a/doc/module-docs/cc_set_passwords/data.yaml b/doc/module-docs/cc_set_passwords/data.yaml new file mode 100644 index 000000000..dae5b5d59 --- /dev/null +++ b/doc/module-docs/cc_set_passwords/data.yaml @@ -0,0 +1,40 @@ +cc_set_passwords: + description: | + This module consumes three top-level config keys: ``ssh_pwauth``, + ``chpasswd`` and ``password``. + + The ``ssh_pwauth`` config key determines whether or not sshd will be + configured to accept password authentication. + + The ``chpasswd`` config key accepts a dictionary containing either (or + both) of ``users`` and ``expire``. + + - The ``users`` key is used to assign a password to a corresponding + pre-existing user. + - The ``expire`` key is used to set whether to expire all user passwords + specified by this module, such that a password will need to be reset on + the user's next login. + + .. note:: + Prior to cloud-init 22.3, the ``expire`` key only applies to plain text + (including ``RANDOM``) passwords. Post-22.3, the ``expire`` key applies + to both plain text and hashed passwords. + + The ``password`` config key is used to set the default user's password. It + is ignored if the ``chpasswd`` ``users`` is used. Note that the ``list`` + keyword is deprecated in favor of ``users``. + examples: + - comment: | + Example 1: Set a default password, to be changed at first login. + file: cc_set_passwords/example1.yaml + - comment: | + Example 2: + - Disable SSH password authentication. + - Don't require users to change their passwords on next login. + - Set the password for user1 to be 'password1' (OS does hashing). + - Set the password for user2 to a pre-hashed password. + - Set the password for user3 to be a randomly generated password, which + will be written to the system console. + file: cc_set_passwords/example2.yaml + name: Set Passwords + title: Set user passwords and enable/disable SSH password auth diff --git a/doc/module-docs/cc_set_passwords/example1.yaml b/doc/module-docs/cc_set_passwords/example1.yaml new file mode 100644 index 000000000..487424191 --- /dev/null +++ b/doc/module-docs/cc_set_passwords/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +{password: password1, ssh_pwauth: true} diff --git a/doc/module-docs/cc_set_passwords/example2.yaml b/doc/module-docs/cc_set_passwords/example2.yaml new file mode 100644 index 000000000..98ce8b5e8 --- /dev/null +++ b/doc/module-docs/cc_set_passwords/example2.yaml @@ -0,0 +1,8 @@ +#cloud-config +chpasswd: + expire: false + users: + - {name: user1, password: password1, type: text} + - {name: user2, password: $6$rounds=4096$5DJ8a9WMTEzIo5J4$Yms6imfeBvf3Yfu84mQBerh18l7OR1Wm1BJXZqFSpJ6BVas0AYJqIjP7czkOaAZHZi1kxQ5Y1IhgWN8K9NgxR1} + - {name: user3, type: RANDOM} +ssh_pwauth: false diff --git a/doc/module-docs/cc_snap/data.yaml b/doc/module-docs/cc_snap/data.yaml new file mode 100644 index 000000000..c3ae845c5 --- /dev/null +++ b/doc/module-docs/cc_snap/data.yaml @@ -0,0 +1,42 @@ +cc_snap: + description: | + This module provides a simple configuration namespace in cloud-init for + setting up snapd and installing snaps. + + Both ``assertions`` and ``commands`` values can be either a dictionary or a + list. If these configs are provided as a dictionary, the keys are only used + to order the execution of the assertions or commands and the dictionary is + merged with any vendor data the snap configuration provided. If a list is + provided by the user instead of a dict, any vendor data snap configuration + is ignored. + + The ``assertions`` configuration option is a dictionary or list of + properly-signed snap assertions, which will run before any snap commands. + They will be added to snapd's ``assertion`` database by invoking + ``snap ack ``. + + Snap ``commands`` is a dictionary or list of individual snap commands to + run on the target system. These commands can be used to create snap users, + install snaps, and provide snap configuration. + + .. note:: + If 'side-loading' private/unpublished snaps on an instance, it is best + to create a snap seed directory and ``seed.yaml`` manifest in + ``/var/lib/snapd/seed/`` which snapd automatically installs on startup. + examples: + - comment: | + Example 1: + file: cc_snap/example1.yaml + - comment: > + Example 2: For convenience, the ``snap`` command can be omitted when + specifying commands as a list - ``snap`` will be automatically prepended. + The following commands are all equivalent: + file: cc_snap/example2.yaml + - comment: | + Example 3: You can use a list of commands. + file: cc_snap/example4.yaml + - comment: | + Example 4: You can also use a list of assertions. + file: cc_snap/example4.yaml + name: Snap + title: Install, configure and manage snapd and snap packages diff --git a/doc/module-docs/cc_snap/example1.yaml b/doc/module-docs/cc_snap/example1.yaml new file mode 100644 index 000000000..a65072fdb --- /dev/null +++ b/doc/module-docs/cc_snap/example1.yaml @@ -0,0 +1,11 @@ +#cloud-config +snap: + assertions: + 00: | + signed_assertion_blob_here + 02: | + signed_assertion_blob_here + commands: + 00: snap create-user --sudoer --known @mydomain.com + 01: snap install canonical-livepatch + 02: canonical-livepatch enable diff --git a/doc/module-docs/cc_snap/example2.yaml b/doc/module-docs/cc_snap/example2.yaml new file mode 100644 index 000000000..615c7c741 --- /dev/null +++ b/doc/module-docs/cc_snap/example2.yaml @@ -0,0 +1,7 @@ +#cloud-config +snap: + commands: + 0: [install, vlc] + 1: [snap, install, vlc] + 2: snap install vlc + 3: snap install vlc diff --git a/doc/module-docs/cc_snap/example3.yaml b/doc/module-docs/cc_snap/example3.yaml new file mode 100644 index 000000000..0aeb0f6ae --- /dev/null +++ b/doc/module-docs/cc_snap/example3.yaml @@ -0,0 +1,7 @@ +#cloud-config +snap: + commands: + - [install, vlc] + - [snap, install, vlc] + - snap install vlc + - snap install vlc diff --git a/doc/module-docs/cc_snap/example4.yaml b/doc/module-docs/cc_snap/example4.yaml new file mode 100644 index 000000000..5f7ed5299 --- /dev/null +++ b/doc/module-docs/cc_snap/example4.yaml @@ -0,0 +1,6 @@ +#cloud-config +snap: + assertions: + - signed_assertion_blob_here + - | + signed_assertion_blob_here diff --git a/doc/module-docs/cc_spacewalk/data.yaml b/doc/module-docs/cc_spacewalk/data.yaml new file mode 100644 index 000000000..b7e556f41 --- /dev/null +++ b/doc/module-docs/cc_spacewalk/data.yaml @@ -0,0 +1,16 @@ +cc_spacewalk: + description: | + This module installs Spacewalk and applies basic configuration. If the + Spacewalk config key is present, Spacewalk will be installed. The server + to connect to after installation must be provided in the ``server`` in + Spacewalk configuration. A proxy to connect through and an activation key + may optionally be specified. + + For more details about spacewalk see the + `Fedora documentation `_. + examples: + - comment: | + Example 1: + file: cc_spacewalk/example1.yaml + name: Spacewalk + title: Install and configure spacewalk diff --git a/doc/module-docs/cc_spacewalk/example1.yaml b/doc/module-docs/cc_spacewalk/example1.yaml new file mode 100644 index 000000000..810c9fdbf --- /dev/null +++ b/doc/module-docs/cc_spacewalk/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +spacewalk: {activation_key: , proxy: , server: } diff --git a/doc/module-docs/cc_ssh/data.yaml b/doc/module-docs/cc_ssh/data.yaml new file mode 100644 index 000000000..ccb21375b --- /dev/null +++ b/doc/module-docs/cc_ssh/data.yaml @@ -0,0 +1,94 @@ +cc_ssh: + description: | + This module handles most configuration for SSH, and for both host and + authorized SSH keys. + + **Authorized keys** + + Authorized keys are a list of public SSH keys that are allowed to connect + to a user account on a system. They are stored in ``.ssh/authorized_keys`` + in that account's home directory. Authorized keys for the default user + defined in ``users`` can be specified using ``ssh_authorized_keys``. Keys + should be specified as a list of public keys. + + .. note:: + See the ``cc_set_passwords`` module documentation to enable/disable SSH + password authentication. + + Root login can be enabled/disabled using the ``disable_root`` config key. + Root login options can be manually specified with ``disable_root_opts``. + + Supported public key types for the ssh_authorized_keys are: + + - rsa + - ecdsa + - ed25519 + - ecdsa-sha2-nistp256-cert-v01@openssh.com + - ecdsa-sha2-nistp256 + - ecdsa-sha2-nistp384-cert-v01@openssh.com + - ecdsa-sha2-nistp384 + - ecdsa-sha2-nistp521-cert-v01@openssh.com + - ecdsa-sha2-nistp521 + - sk-ecdsa-sha2-nistp256-cert-v01@openssh.com + - sk-ecdsa-sha2-nistp256@openssh.com + - sk-ssh-ed25519-cert-v01@openssh.com + - sk-ssh-ed25519@openssh.com + - ssh-ed25519-cert-v01@openssh.com + - ssh-ed25519 + - ssh-rsa-cert-v01@openssh.com + - ssh-rsa + - ssh-xmss-cert-v01@openssh.com + - ssh-xmss@openssh.com + + .. note:: + This list has been filtered out from the supported key types of + `OpenSSH `_ + source, where the ``sigonly`` keys are removed. See ``ssh_util`` for + more information. + + ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy, as they are + valid public keys in some older distros. They may be removed in the + future when support for the older distros is dropped. + + **Host keys** + + Host keys are for authenticating a specific instance. Many images have + default host SSH keys, which can be removed using ``ssh_deletekeys``. + + Host keys can be added using the ``ssh_keys`` configuration key. + + When host keys are generated the output of the ``ssh-keygen`` command(s) + can be displayed on the console using the ``ssh_quiet_keygen`` + configuration key. + + .. note:: + When specifying private host keys in cloud-config, take care to ensure + that communication between the data source and the instance is secure. + + If no host keys are specified using ``ssh_keys``, then keys will be + generated using ``ssh-keygen``. By default, one public/private pair of + each supported host key type will be generated. The key types to generate + can be specified using the ``ssh_genkeytypes`` config flag, which accepts a + list of host key types to use. For each host key type for which this module + has been instructed to create a keypair, if a key of the same type is + already present on the system (i.e. if ``ssh_deletekeys`` was set to + false), no key will be generated. + + Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` + config flags are: + + - ecdsa + - ed25519 + - rsa + + Unsupported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` + config flags are: + + - ecdsa-sk + - ed25519-sk + examples: + - comment: | + Example 1: + file: cc_ssh/example1.yaml + name: SSH + title: Configure SSH and SSH keys diff --git a/doc/module-docs/cc_ssh/example1.yaml b/doc/module-docs/cc_ssh/example1.yaml new file mode 100644 index 000000000..cc54f6ede --- /dev/null +++ b/doc/module-docs/cc_ssh/example1.yaml @@ -0,0 +1,24 @@ +#cloud-config +allow_public_ssh_keys: true +disable_root: true +disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding +ssh_authorized_keys: [ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ..., ssh-rsa + AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...] +ssh_deletekeys: true +ssh_genkeytypes: [rsa, ecdsa, ed25519] +ssh_keys: {rsa_certificate: 'ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt + ... + + ', rsa_private: '-----BEGIN RSA PRIVATE KEY----- + + MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco + + ... + + -----END RSA PRIVATE KEY----- + + ', rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...} +ssh_publish_hostkeys: + blacklist: [rsa] + enabled: true +ssh_quiet_keygen: true diff --git a/doc/module-docs/cc_ssh_authkey_fingerprints/data.yaml b/doc/module-docs/cc_ssh_authkey_fingerprints/data.yaml new file mode 100644 index 000000000..c034d5d2a --- /dev/null +++ b/doc/module-docs/cc_ssh_authkey_fingerprints/data.yaml @@ -0,0 +1,14 @@ +cc_ssh_authkey_fingerprints: + description: | + Write fingerprints of authorized keys for each user to log. This is enabled + by default, but can be disabled using ``no_ssh_fingerprints``. The hash + type for the keys can be specified, but defaults to ``sha256``. + examples: + - comment: | + Example 1: + file: cc_ssh_authkey_fingerprints/example1.yaml + - comment: | + Example 2: + file: cc_ssh_authkey_fingerprints/example2.yaml + name: SSH AuthKey Fingerprints + title: Log fingerprints of user SSH keys diff --git a/doc/module-docs/cc_ssh_authkey_fingerprints/example1.yaml b/doc/module-docs/cc_ssh_authkey_fingerprints/example1.yaml new file mode 100644 index 000000000..7bae7a9d7 --- /dev/null +++ b/doc/module-docs/cc_ssh_authkey_fingerprints/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +no_ssh_fingerprints: true diff --git a/doc/module-docs/cc_ssh_authkey_fingerprints/example2.yaml b/doc/module-docs/cc_ssh_authkey_fingerprints/example2.yaml new file mode 100644 index 000000000..2212058fe --- /dev/null +++ b/doc/module-docs/cc_ssh_authkey_fingerprints/example2.yaml @@ -0,0 +1,2 @@ +#cloud-config +authkey_hash: sha512 diff --git a/doc/module-docs/cc_ssh_import_id/data.yaml b/doc/module-docs/cc_ssh_import_id/data.yaml new file mode 100644 index 000000000..242130983 --- /dev/null +++ b/doc/module-docs/cc_ssh_import_id/data.yaml @@ -0,0 +1,13 @@ +cc_ssh_import_id: + description: | + This module imports SSH keys from either a public keyserver (usually + Launchpad), or GitHub, using ``ssh-import-id``. Keys are referenced by the + username they are associated with on the keyserver. The keyserver can be + specified by prepending either ``lp:`` for Launchpad or ``gh:`` for + GitHub to the username. + examples: + - comment: | + Example 1: + file: cc_ssh_import_id/example1.yaml + name: SSH Import ID + title: Import SSH ID diff --git a/doc/module-docs/cc_ssh_import_id/example1.yaml b/doc/module-docs/cc_ssh_import_id/example1.yaml new file mode 100644 index 000000000..c52bc9a51 --- /dev/null +++ b/doc/module-docs/cc_ssh_import_id/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +ssh_import_id: [user, 'gh:user', 'lp:user'] diff --git a/doc/module-docs/cc_timezone/data.yaml b/doc/module-docs/cc_timezone/data.yaml new file mode 100644 index 000000000..def136d73 --- /dev/null +++ b/doc/module-docs/cc_timezone/data.yaml @@ -0,0 +1,10 @@ +cc_timezone: + description: | + Sets the `system timezone `_ based on the + value provided. + examples: + - comment: | + Example 1: + file: cc_timezone/example1.yaml + name: Timezone + title: Set the system timezone diff --git a/doc/module-docs/cc_timezone/example1.yaml b/doc/module-docs/cc_timezone/example1.yaml new file mode 100644 index 000000000..c54cadce4 --- /dev/null +++ b/doc/module-docs/cc_timezone/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +timezone: US/Eastern diff --git a/doc/module-docs/cc_ubuntu_drivers/data.yaml b/doc/module-docs/cc_ubuntu_drivers/data.yaml new file mode 100644 index 000000000..d704a620f --- /dev/null +++ b/doc/module-docs/cc_ubuntu_drivers/data.yaml @@ -0,0 +1,10 @@ +cc_ubuntu_drivers: + description: | + This module interacts with the ``ubuntu-drivers`` command to install third + party driver packages. + examples: + - comment: | + Example 1: + file: cc_ubuntu_drivers/example1.yaml + name: Ubuntu Drivers + title: Interact with third party drivers in Ubuntu diff --git a/doc/module-docs/cc_ubuntu_drivers/example1.yaml b/doc/module-docs/cc_ubuntu_drivers/example1.yaml new file mode 100644 index 000000000..659c904bd --- /dev/null +++ b/doc/module-docs/cc_ubuntu_drivers/example1.yaml @@ -0,0 +1,3 @@ +#cloud-config +drivers: + nvidia: {license-accepted: true} diff --git a/doc/module-docs/cc_ubuntu_pro/data.yaml b/doc/module-docs/cc_ubuntu_pro/data.yaml new file mode 100644 index 000000000..3706f6381 --- /dev/null +++ b/doc/module-docs/cc_ubuntu_pro/data.yaml @@ -0,0 +1,52 @@ +cc_ubuntu_pro: + description: | + Attach machine to an existing Ubuntu Pro support contract and enable or + disable support services such as Livepatch, ESM, FIPS and FIPS Updates. + + When attaching a machine to Ubuntu Pro, one can also specify services to + enable. When the ``enable`` list is present, only named services will be + activated. If the ``enable`` list is not present, the contract's default + services will be enabled. + + On Pro instances, when ``ubuntu_pro`` config is provided to cloud-init, + Pro's auto-attach feature will be disabled and cloud-init will perform + the Pro auto-attach, ignoring the ``token`` key. The ``enable`` and + ``enable_beta`` values will strictly determine what services will be + enabled, ignoring contract defaults. + + Note that when enabling FIPS or FIPS updates you will need to schedule a + reboot to ensure the machine is running the FIPS-compliant kernel. See the + Power State Change module for information on how to configure cloud-init to + perform this reboot. + examples: + - comment: > + Example 1: Attach the machine to an Ubuntu Pro support contract with a + Pro contract token obtained from https://ubuntu.com/pro. + file: cc_ubuntu_pro/example1.yaml + - comment: > + Example 2: Attach the machine to an Ubuntu Pro support contract, enabling + only FIPS and ESM services. Services will only be enabled if the + environment supports that service. Otherwise, warnings will be logged for + incompatible services. + file: cc_ubuntu_pro/example2.yaml + - comment: > + Example 3: Attach the machine to an Ubuntu Pro support contract and + enable the FIPS service. Perform a reboot once cloud-init has completed. + file: cc_ubuntu_pro/example3.yaml + - comment: > + Example 4: Set a HTTP(s) proxy before attaching the machine to an Ubuntu + Pro support contract and enabling the FIPS service. + file: cc_ubuntu_pro/example4.yaml + - comment: > + Example 5: On Ubuntu Pro instances, auto-attach but don't enable any Pro + services. + file: cc_ubuntu_pro/example5.yaml + - comment: > + Example 6: Enable ESM and beta Real-time Ubuntu services in Ubuntu Pro + instances. + file: cc_ubuntu_pro/example6.yaml + - comment: > + Example 7: Disable auto-attach in Ubuntu Pro instances. + file: cc_ubuntu_pro/example7.yaml + name: Ubuntu Pro + title: Configure Ubuntu Pro support services diff --git a/doc/module-docs/cc_ubuntu_pro/example1.yaml b/doc/module-docs/cc_ubuntu_pro/example1.yaml new file mode 100644 index 000000000..ae9cdf2f2 --- /dev/null +++ b/doc/module-docs/cc_ubuntu_pro/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +ubuntu_pro: {token: } diff --git a/doc/module-docs/cc_ubuntu_pro/example2.yaml b/doc/module-docs/cc_ubuntu_pro/example2.yaml new file mode 100644 index 000000000..c9cf5c3bf --- /dev/null +++ b/doc/module-docs/cc_ubuntu_pro/example2.yaml @@ -0,0 +1,4 @@ +#cloud-config +ubuntu_pro: + enable: [fips, esm] + token: diff --git a/doc/module-docs/cc_ubuntu_pro/example3.yaml b/doc/module-docs/cc_ubuntu_pro/example3.yaml new file mode 100644 index 000000000..e4aa01a01 --- /dev/null +++ b/doc/module-docs/cc_ubuntu_pro/example3.yaml @@ -0,0 +1,5 @@ +#cloud-config +power_state: {mode: reboot} +ubuntu_pro: + enable: [fips] + token: diff --git a/doc/module-docs/cc_ubuntu_pro/example4.yaml b/doc/module-docs/cc_ubuntu_pro/example4.yaml new file mode 100644 index 000000000..5fac53750 --- /dev/null +++ b/doc/module-docs/cc_ubuntu_pro/example4.yaml @@ -0,0 +1,12 @@ +#cloud-config +ubuntu_pro: + token: + config: + http_proxy: 'http://some-proxy:8088' + https_proxy: 'https://some-proxy:8088' + global_apt_https_proxy: 'https://some-global-apt-proxy:8088/' + global_apt_http_proxy: 'http://some-global-apt-proxy:8088/' + ua_apt_http_proxy: 'http://10.0.10.10:3128' + ua_apt_https_proxy: 'https://10.0.10.10:3128' + enable: + - fips diff --git a/doc/module-docs/cc_ubuntu_pro/example5.yaml b/doc/module-docs/cc_ubuntu_pro/example5.yaml new file mode 100644 index 000000000..915c0c23d --- /dev/null +++ b/doc/module-docs/cc_ubuntu_pro/example5.yaml @@ -0,0 +1,4 @@ +#cloud-config +ubuntu_pro: + enable: [] + enable_beta: [] diff --git a/doc/module-docs/cc_ubuntu_pro/example6.yaml b/doc/module-docs/cc_ubuntu_pro/example6.yaml new file mode 100644 index 000000000..dfb1f0832 --- /dev/null +++ b/doc/module-docs/cc_ubuntu_pro/example6.yaml @@ -0,0 +1,4 @@ +#cloud-config +ubuntu_pro: + enable: [esm] + enable_beta: [realtime-kernel] diff --git a/doc/module-docs/cc_ubuntu_pro/example7.yaml b/doc/module-docs/cc_ubuntu_pro/example7.yaml new file mode 100644 index 000000000..72dd926bb --- /dev/null +++ b/doc/module-docs/cc_ubuntu_pro/example7.yaml @@ -0,0 +1,3 @@ +#cloud-config +ubuntu_pro: + features: {disable_auto_attach: true} diff --git a/doc/module-docs/cc_update_etc_hosts/data.yaml b/doc/module-docs/cc_update_etc_hosts/data.yaml new file mode 100644 index 000000000..c723a8a0d --- /dev/null +++ b/doc/module-docs/cc_update_etc_hosts/data.yaml @@ -0,0 +1,55 @@ +cc_update_etc_hosts: + description: | + This module will update the contents of the local hosts database (hosts + file, usually ``/etc/hosts``) based on the hostname/FQDN specified in + config. Management of the hosts file is controlled using + ``manage_etc_hosts``. If this is set to ``false``, cloud-init will not + manage the hosts file at all. This is the default behavior. + + If set to ``true``, cloud-init will generate the hosts file using the + template located in ``/etc/cloud/templates/hosts.tmpl``. In the + ``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` + and ``$fqdn`` will be replaced with the hostname and FQDN respectively. + + If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not + rewrite the hosts file entirely, but rather will ensure that an entry for + the FQDN with a distribution-dependent IP is present (i.e., + ``ping `` will ping ``127.0.0.1`` or ``127.0.1.1`` or other IP). + + .. note:: + If ``manage_etc_hosts`` is set to ``true``, the contents of the + ``hosts`` file will be updated every boot. To make any changes to the + ``hosts`` file persistent they must be made in + ``/etc/cloud/templates/hosts.tmpl``. + + .. note:: + For instructions on specifying hostname and FQDN, see documentation for + the ``cc_set_hostname`` module. + examples: + - comment: > + Example 1: Do not update or manage ``/etc/hosts`` at all. This is the + default behavior. Whatever is present at instance boot time will be + present after boot. User changes will not be overwritten. + file: cc_update_etc_hosts/example1.yaml + - comment: > + Example 2: Manage ``/etc/hosts`` with cloud-init. On every boot, + ``/etc/hosts`` will be re-written from + ``/etc/cloud/templates/hosts.tmpl``. + + The strings ``$hostname`` and ``$fqdn`` are replaced in the template + with the appropriate values, either from the config-config ``fqdn`` or + ``hostname`` if provided. When absent, the cloud metadata will be + checked for ``local-hostname`` which can be split into + ``.``. + + To make modifications persistent across a reboot, you must modify + ``/etc/cloud/templates/hosts.tmpl``. + file: cc_update_etc_hosts/example2.yaml + - comment: > + Example 3: Update ``/etc/hosts`` every boot, providing a "localhost" + ``127.0.1.1`` entry with the latest hostname and FQDN as provided by + either IMDS or cloud-config. All other entries will be left alone. + ``ping hostname`` will ping ``127.0.1.1``. + file: cc_update_etc_hosts/example3.yaml + name: Update Etc Hosts + title: Update the hosts file (usually ``/etc/hosts``) diff --git a/doc/module-docs/cc_update_etc_hosts/example1.yaml b/doc/module-docs/cc_update_etc_hosts/example1.yaml new file mode 100644 index 000000000..d8f31267f --- /dev/null +++ b/doc/module-docs/cc_update_etc_hosts/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +manage_etc_hosts: false diff --git a/doc/module-docs/cc_update_etc_hosts/example2.yaml b/doc/module-docs/cc_update_etc_hosts/example2.yaml new file mode 100644 index 000000000..9bc3b33e3 --- /dev/null +++ b/doc/module-docs/cc_update_etc_hosts/example2.yaml @@ -0,0 +1,2 @@ +#cloud-config +manage_etc_hosts: true diff --git a/doc/module-docs/cc_update_etc_hosts/example3.yaml b/doc/module-docs/cc_update_etc_hosts/example3.yaml new file mode 100644 index 000000000..e1f85816c --- /dev/null +++ b/doc/module-docs/cc_update_etc_hosts/example3.yaml @@ -0,0 +1,2 @@ +#cloud-config +manage_etc_hosts: localhost diff --git a/doc/module-docs/cc_update_hostname/data.yaml b/doc/module-docs/cc_update_hostname/data.yaml new file mode 100644 index 000000000..cfe5dc569 --- /dev/null +++ b/doc/module-docs/cc_update_hostname/data.yaml @@ -0,0 +1,38 @@ +cc_update_hostname: + description: | + This module will update the system hostname and FQDN. If + ``preserve_hostname`` is set to ``true``, then the hostname will not be + altered. + + .. note:: + For instructions on specifying hostname and FQDN, see documentation for + the ``cc_set_hostname`` module. + examples: + - comment: > + Example 1: By default, when ``preserve_hostname`` is not specified, + cloud-init updates ``/etc/hostname`` per-boot based on the cloud provided + ``local-hostname`` setting. If you manually change ``/etc/hostname`` + after boot cloud-init will no longer modify it. + + This default cloud-init behavior is equivalent to this cloud-config: + file: cc_update_hostname/example1.yaml + - comment: | + Example 2: Prevent cloud-init from updating the system hostname. + file: cc_update_hostname/example2.yaml + - comment: | + Example 3: Prevent cloud-init from updating ``/etc/hostname``. + file: cc_update_hostname/example3.yaml + - comment: | + Example 4: Set hostname to ``external.fqdn.me`` instead of ``myhost``. + file: cc_update_hostname/example4.yaml + - comment: > + Example 5: Set hostname to ``external`` instead of ``external.fqdn.me`` when + cloud metadata provides the ``local-hostname``: ``external.fqdn.me``. + file: cc_update_hostname/example5.yaml + - comment: > + Example 6: On a machine without an ``/etc/hostname`` file, don''t create + it. In most clouds, this will result in a DHCP-configured hostname + provided by the cloud. + file: cc_update_hostname/example6.yaml + name: Update Hostname + title: Update hostname and FQDN diff --git a/doc/module-docs/cc_update_hostname/example1.yaml b/doc/module-docs/cc_update_hostname/example1.yaml new file mode 100644 index 000000000..2ca4689d1 --- /dev/null +++ b/doc/module-docs/cc_update_hostname/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +preserve_hostname: false diff --git a/doc/module-docs/cc_update_hostname/example2.yaml b/doc/module-docs/cc_update_hostname/example2.yaml new file mode 100644 index 000000000..6edec76ad --- /dev/null +++ b/doc/module-docs/cc_update_hostname/example2.yaml @@ -0,0 +1,2 @@ +#cloud-config +preserve_hostname: true diff --git a/doc/module-docs/cc_update_hostname/example3.yaml b/doc/module-docs/cc_update_hostname/example3.yaml new file mode 100644 index 000000000..6edec76ad --- /dev/null +++ b/doc/module-docs/cc_update_hostname/example3.yaml @@ -0,0 +1,2 @@ +#cloud-config +preserve_hostname: true diff --git a/doc/module-docs/cc_update_hostname/example4.yaml b/doc/module-docs/cc_update_hostname/example4.yaml new file mode 100644 index 000000000..8f7ecfbb4 --- /dev/null +++ b/doc/module-docs/cc_update_hostname/example4.yaml @@ -0,0 +1,5 @@ +#cloud-config +fqdn: external.fqdn.me +hostname: myhost +prefer_fqdn_over_hostname: true +create_hostname_file: true diff --git a/doc/module-docs/cc_update_hostname/example5.yaml b/doc/module-docs/cc_update_hostname/example5.yaml new file mode 100644 index 000000000..51717bd5e --- /dev/null +++ b/doc/module-docs/cc_update_hostname/example5.yaml @@ -0,0 +1,2 @@ +#cloud-config +prefer_fqdn_over_hostname: false diff --git a/doc/module-docs/cc_update_hostname/example6.yaml b/doc/module-docs/cc_update_hostname/example6.yaml new file mode 100644 index 000000000..785e6167d --- /dev/null +++ b/doc/module-docs/cc_update_hostname/example6.yaml @@ -0,0 +1,2 @@ +#cloud-config +create_hostname_file: false diff --git a/doc/module-docs/cc_users_groups/data.yaml b/doc/module-docs/cc_users_groups/data.yaml new file mode 100644 index 000000000..03bb32594 --- /dev/null +++ b/doc/module-docs/cc_users_groups/data.yaml @@ -0,0 +1,107 @@ +cc_users_groups: + description: | + This module configures users and groups. For more detailed information + on user options, see the :ref:`Including users and groups ` + config example. + + Groups to add to the system can be specified under the ``groups`` key as a + string of comma-separated groups to create, or a list. Each item in the + list should either contain a string of a single group to create, or a + dictionary with the group name as the key and string of a single user as a + member of that group or a list of users who should be members of the group. + + .. note:: + Groups are added before users, so any users in a group list must already + exist on the system. + + Users to add can be specified as a string or list under the ``users`` key. + Each entry in the list should either be a string or a dictionary. If a + string is specified, that string can be comma-separated usernames to + create, or the reserved string ``default`` which represents the primary + admin user used to access the system. The ``default`` user varies per + distribution and is generally configured in ``/etc/cloud/cloud.cfg`` by the + ``default_user key``. + + Each ``users`` dictionary item must contain either a ``name`` or + ``snapuser`` key, otherwise it will be ignored. Omission of ``default`` as + the first item in the ``users`` list skips creation the default user. If + no ``users`` key is provided, the default behavior is to create the + default user via this config: + + .. code-block:: yaml + + users: + - default + + .. note:: + Specifying a hash of a user's password with ``passwd`` is a security + risk if the cloud-config can be intercepted. SSH authentication is + preferred. + + .. note:: + If specifying a ``doas`` rule for a user, ensure that the syntax for + the rule is valid, as the only checking performed by cloud-init is to + ensure that the user referenced in the rule is the correct user. + + .. note:: + If specifying a ``sudo`` rule for a user, ensure that the syntax for + the rule is valid, as it is not checked by cloud-init. + + .. note:: + Most of these configuration options will not be honored if the user + already exists. The following options are the exceptions, and are + applied to already-existing users; ``plain_text_passwd``, ``doas``, + ``hashed_passwd``, ``lock_passwd``, ``sudo``, ``ssh_authorized_keys``, + ``ssh_redirect_user``. + + The ``user`` key can be used to override the ``default_user`` configuration + defined in ``/etc/cloud/cloud.cfg``. The ``user`` value should be a + dictionary which supports the same config keys as the ``users`` dictionary + items. + examples: + - comment: > + Example 1: Add the ``default_user`` from ``/etc/cloud/cloud.cfg``. This + is also the default behavior of cloud-init when no ``users`` key is + provided. + file: cc_users_groups/example1.yaml + - comment: > + Example 2: Add the ``admingroup`` with members ``root`` and ``sys``, and + an empty group ``cloud-users``. + file: cc_users_groups/example2.yaml + - comment: > + Example 3: Skip creation of the ``default`` user and only create + ``newsuper``. Password-based login is rejected, but the GitHub user + ``TheRealFalcon`` and the Launchpad user ``falcojr`` can SSH as + ``newsuper``. The default shell for ``newsuper`` is bash instead of + system default. + file: cc_users_groups/example3.yaml + - comment: > + Example 4: Skip creation of the ``default`` user and only create + ``newsuper``. Password-based login is rejected, but the GitHub user + ``TheRealFalcon`` and the Launchpad user ``falcojr`` can SSH as + ``newsuper``. ``doas``/``opendoas`` is configured to permit this user to + run commands as other users (without being prompted for a password) + except not as root. + file: cc_users_groups/example4.yaml + - comment: > + Example 5: On a system with SELinux enabled, add ``youruser`` and set the + SELinux user to ``staff_u``. When omitted on SELinux, the system will + select the configured default SELinux user. + file: cc_users_groups/example5.yaml + - comment: > + Example 6: To redirect a legacy username to the ``default`` user for a + distribution, ``ssh_redirect_user`` will accept an SSH connection and + emit a message telling the client to SSH as the ``default`` user. + SSH clients will get the message; + file: cc_users_groups/example6.yaml + - comment: > + Example 7: Override any ``default_user`` config in + ``/etc/cloud/cloud.cfg`` with supplemental config options. This config + will make the default user ``mynewdefault`` and change the user to not + have ``sudo`` rights. + file: cc_users_groups/example7.yaml + - comment: > + Example 8: Avoid creating any ``default_user``. + file: cc_users_groups/example8.yaml + name: Users and Groups + title: Configure users and groups diff --git a/doc/module-docs/cc_users_groups/example1.yaml b/doc/module-docs/cc_users_groups/example1.yaml new file mode 100644 index 000000000..b129d9e9c --- /dev/null +++ b/doc/module-docs/cc_users_groups/example1.yaml @@ -0,0 +1,2 @@ +#cloud-config +users: [default] diff --git a/doc/module-docs/cc_users_groups/example2.yaml b/doc/module-docs/cc_users_groups/example2.yaml new file mode 100644 index 000000000..595f4e454 --- /dev/null +++ b/doc/module-docs/cc_users_groups/example2.yaml @@ -0,0 +1,4 @@ +#cloud-config +groups: +- admingroup: [root, sys] +- cloud-users diff --git a/doc/module-docs/cc_users_groups/example3.yaml b/doc/module-docs/cc_users_groups/example3.yaml new file mode 100644 index 000000000..ed79daa9d --- /dev/null +++ b/doc/module-docs/cc_users_groups/example3.yaml @@ -0,0 +1,9 @@ +#cloud-config +users: +- gecos: Big Stuff + groups: users, admin + lock_passwd: true + name: newsuper + shell: /bin/bash + ssh_import_id: ['lp:falcojr', 'gh:TheRealFalcon'] + sudo: ALL=(ALL) NOPASSWD:ALL diff --git a/doc/module-docs/cc_users_groups/example4.yaml b/doc/module-docs/cc_users_groups/example4.yaml new file mode 100644 index 000000000..4c764b9c6 --- /dev/null +++ b/doc/module-docs/cc_users_groups/example4.yaml @@ -0,0 +1,8 @@ +#cloud-config +users: +- doas: [permit nopass newsuper, deny newsuper as root] + gecos: Big Stuff + groups: users, admin + lock_passwd: true + name: newsuper + ssh_import_id: ['lp:falcojr', 'gh:TheRealFalcon'] diff --git a/doc/module-docs/cc_users_groups/example5.yaml b/doc/module-docs/cc_users_groups/example5.yaml new file mode 100644 index 000000000..2de78aa97 --- /dev/null +++ b/doc/module-docs/cc_users_groups/example5.yaml @@ -0,0 +1,4 @@ +#cloud-config +users: +- default +- {name: youruser, selinux_user: staff_u} diff --git a/doc/module-docs/cc_users_groups/example6.yaml b/doc/module-docs/cc_users_groups/example6.yaml new file mode 100644 index 000000000..05a7013dc --- /dev/null +++ b/doc/module-docs/cc_users_groups/example6.yaml @@ -0,0 +1,4 @@ +#cloud-config +users: +- default +- {name: nosshlogins, ssh_redirect_user: true} diff --git a/doc/module-docs/cc_users_groups/example7.yaml b/doc/module-docs/cc_users_groups/example7.yaml new file mode 100644 index 000000000..98525e7eb --- /dev/null +++ b/doc/module-docs/cc_users_groups/example7.yaml @@ -0,0 +1,3 @@ +#cloud-config +ssh_import_id: [chad.smith] +user: {name: mynewdefault, sudo: null} diff --git a/doc/module-docs/cc_users_groups/example8.yaml b/doc/module-docs/cc_users_groups/example8.yaml new file mode 100644 index 000000000..c6e6d4c39 --- /dev/null +++ b/doc/module-docs/cc_users_groups/example8.yaml @@ -0,0 +1,2 @@ +#cloud-config +users: [] diff --git a/doc/module-docs/cc_wireguard/data.yaml b/doc/module-docs/cc_wireguard/data.yaml new file mode 100644 index 000000000..d49af7ca8 --- /dev/null +++ b/doc/module-docs/cc_wireguard/data.yaml @@ -0,0 +1,43 @@ +cc_wireguard: + description: | + The WireGuard module provides a dynamic interface for configuring + WireGuard (as a peer or server) in a straightforward way. + + This module takes care of; + + - writing interface configuration files + - enabling and starting interfaces + - installing wireguard-tools package + - loading WireGuard kernel module + - executing readiness probes + + **What is a readiness probe?** + + The idea behind readiness probes is to ensure WireGuard connectivity before + continuing the cloud-init process. This could be useful if you need access + to specific services like an internal APT Repository Server (e.g., + Landscape) to install/update packages. + + **Example** + + An edge device can't access the internet but uses cloud-init modules which + will install packages (e.g. ``landscape``, ``packages``, + ``ubuntu_advantage``). Those modules will fail due to missing internet + connection. The ``wireguard`` module fixes that problem as it waits until + all readiness probes (which can be arbitrary commands, e.g. checking if a + proxy server is reachable over WireGuard network) are finished, before + continuing the cloud-init ``config`` stage. + + .. note:: + In order to use DNS with WireGuard you have to install the + ``resolvconf`` package or symlink it to systemd's ``resolvectl``, + otherwise ``wg-quick`` commands will throw an error message that + executable ``resolvconf`` is missing, which leads the ``wireguard`` + module to fail. + examples: + - comment: > + Configure one or more WireGuard interfaces and provide optional readiness + probes. + file: cc_wireguard/example1.yaml + name: Wireguard + title: Module to configure WireGuard tunnel diff --git a/doc/module-docs/cc_wireguard/example1.yaml b/doc/module-docs/cc_wireguard/example1.yaml new file mode 100644 index 000000000..a7f003137 --- /dev/null +++ b/doc/module-docs/cc_wireguard/example1.yaml @@ -0,0 +1,27 @@ +#cloud-config +wireguard: + interfaces: + - name: wg0 + config_path: /etc/wireguard/wg0.conf + content: | + [Interface] + PrivateKey = + Address =
+ [Peer] + PublicKey = + Endpoint = : + AllowedIPs = , , ... + - name: wg1 + config_path: /etc/wireguard/wg1.conf + content: | + [Interface] + PrivateKey = + Address =
+ [Peer] + PublicKey = + Endpoint = : + AllowedIPs = + readinessprobe: + - 'systemctl restart service' + - 'curl https://webhook.endpoint/example' + - 'nc -zv some-service-fqdn 443' diff --git a/doc/module-docs/cc_write_files/data.yaml b/doc/module-docs/cc_write_files/data.yaml new file mode 100644 index 000000000..c59b8e2ea --- /dev/null +++ b/doc/module-docs/cc_write_files/data.yaml @@ -0,0 +1,40 @@ +cc_write_files: + description: | + Write out arbitrary content to files, optionally setting permissions. + Parent folders in the path are created if absent. Content can be specified + in plain text or binary. Data encoded with either base64 or binary gzip + data can be specified and will be decoded before being written. For empty + file creation, content can be omitted. + + .. note:: + If multi-line data is provided, care should be taken to ensure it + follows YAML formatting standards. To specify binary data, use the YAML + option ``!!binary``. + + .. note:: + Do not write files under ``/tmp`` during boot because of a race with + ``systemd-tmpfiles-clean`` that can cause temporary files to be cleaned + during the early boot process. Use ``/run/somedir`` instead to avoid + a race (LP: #1707222). + + .. warning:: + Existing files will be overridden. + examples: + - comment: | + Example 1: Write out base64-encoded content to ``/etc/sysconfig/selinux``. + file: cc_write_files/example1.yaml + - comment: | + Example 2: Appending content to an existing file. + file: cc_write_files/example2.yaml + - comment: | + Example 3: Provide gzipped binary content + file: cc_write_files/example3.yaml + - comment: | + Example 4: Create an empty file on the system + file: cc_write_files/example4.yaml + - comment: > + Example 5: Defer writing the file until after the package (Nginx) is + installed and its user is created. + file: cc_write_files/example5.yaml + name: Write Files + title: Write arbitrary files diff --git a/doc/module-docs/cc_write_files/example1.yaml b/doc/module-docs/cc_write_files/example1.yaml new file mode 100644 index 000000000..ab2eb6f27 --- /dev/null +++ b/doc/module-docs/cc_write_files/example1.yaml @@ -0,0 +1,7 @@ +#cloud-config +write_files: +- encoding: b64 + content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4... + owner: root:root + path: /etc/sysconfig/selinux + permissions: '0644' diff --git a/doc/module-docs/cc_write_files/example2.yaml b/doc/module-docs/cc_write_files/example2.yaml new file mode 100644 index 000000000..e7d7eda15 --- /dev/null +++ b/doc/module-docs/cc_write_files/example2.yaml @@ -0,0 +1,6 @@ +#cloud-config +write_files: +- content: | + 15 * * * * root ship_logs + path: /etc/crontab + append: true diff --git a/doc/module-docs/cc_write_files/example3.yaml b/doc/module-docs/cc_write_files/example3.yaml new file mode 100644 index 000000000..749b91436 --- /dev/null +++ b/doc/module-docs/cc_write_files/example3.yaml @@ -0,0 +1,7 @@ +#cloud-config +write_files: +- encoding: gzip + content: !!binary | + H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= + path: /usr/bin/hello + permissions: '0755' diff --git a/doc/module-docs/cc_write_files/example4.yaml b/doc/module-docs/cc_write_files/example4.yaml new file mode 100644 index 000000000..7c299bd9c --- /dev/null +++ b/doc/module-docs/cc_write_files/example4.yaml @@ -0,0 +1,3 @@ +#cloud-config +write_files: +- path: /root/CLOUD_INIT_WAS_HERE diff --git a/doc/module-docs/cc_write_files/example5.yaml b/doc/module-docs/cc_write_files/example5.yaml new file mode 100644 index 000000000..a99bdf434 --- /dev/null +++ b/doc/module-docs/cc_write_files/example5.yaml @@ -0,0 +1,15 @@ +#cloud-config +write_files: +- path: /etc/nginx/conf.d/example.com.conf + content: | + server { + server_name example.com; + listen 80; + root /var/www; + location / { + try_files $uri $uri/ $uri.html =404; + } + } + owner: 'nginx:nginx' + permissions: '0640' + defer: true diff --git a/doc/module-docs/cc_yum_add_repo/data.yaml b/doc/module-docs/cc_yum_add_repo/data.yaml new file mode 100644 index 000000000..7875654fb --- /dev/null +++ b/doc/module-docs/cc_yum_add_repo/data.yaml @@ -0,0 +1,27 @@ +cc_yum_add_repo: + description: | + Add yum repository configuration to ``/etc/yum.repos.d``. Configuration + files are named based on the opaque dictionary key under the ``yum_repos`` + they are specified with. If a config file already exists with the same + name as a config entry, the config entry will be skipped. + examples: + - comment: | + Example 1: + file: cc_yum_add_repo/example1.yaml + - comment: > + Example 2: Enable cloud-init upstream's daily testing repo for EPEL 8 to + install the latest cloud-init from tip of ``main`` for testing. + file: cc_yum_add_repo/example2.yaml + - comment: > + Example 3: Add the file ``/etc/yum.repos.d/epel_testing.repo`` which can + then subsequently be used by yum for later operations. + file: cc_yum_add_repo/example3.yaml + - comment: > + Example 4: Any yum repo configuration can be passed directly into the + repository file created. See ``man yum.conf`` for supported config keys. + + Write ``/etc/yum.conf.d/my-package-stream.repo`` with ``gpgkey`` checks + on the repo data of the repository enabled. + file: cc_yum_add_repo/example4.yaml + name: Yum Add Repo + title: Add yum repository configuration to the system diff --git a/doc/module-docs/cc_yum_add_repo/example1.yaml b/doc/module-docs/cc_yum_add_repo/example1.yaml new file mode 100644 index 000000000..ed9642bb9 --- /dev/null +++ b/doc/module-docs/cc_yum_add_repo/example1.yaml @@ -0,0 +1,5 @@ +#cloud-config +yum_repos: + my_repo: + baseurl: http://blah.org/pub/epel/testing/5/$basearch/ +yum_repo_dir: /store/custom/yum.repos.d diff --git a/doc/module-docs/cc_yum_add_repo/example2.yaml b/doc/module-docs/cc_yum_add_repo/example2.yaml new file mode 100644 index 000000000..f5ae4735c --- /dev/null +++ b/doc/module-docs/cc_yum_add_repo/example2.yaml @@ -0,0 +1,10 @@ +#cloud-config +yum_repos: + cloud-init-daily: + name: Copr repo for cloud-init-dev owned by @cloud-init + baseurl: https://download.copr.fedorainfracloud.org/results/@cloud-init/cloud-init-dev/epel-8-$basearch/ + type: rpm-md + skip_if_unavailable: true + gpgcheck: true + gpgkey: https://download.copr.fedorainfracloud.org/results/@cloud-init/cloud-init-dev/pubkey.gpg + enabled_metadata: 1 diff --git a/doc/module-docs/cc_yum_add_repo/example3.yaml b/doc/module-docs/cc_yum_add_repo/example3.yaml new file mode 100644 index 000000000..717ca8c4e --- /dev/null +++ b/doc/module-docs/cc_yum_add_repo/example3.yaml @@ -0,0 +1,10 @@ +#cloud-config +yum_repos: +# The name of the repository + epel-testing: + baseurl: https://download.copr.fedorainfracloud.org/results/@cloud-init/cloud-init-dev/pubkey.gpg + enabled: false + failovermethod: priority + gpgcheck: true + gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL + name: Extra Packages for Enterprise Linux 5 - Testing diff --git a/doc/module-docs/cc_yum_add_repo/example4.yaml b/doc/module-docs/cc_yum_add_repo/example4.yaml new file mode 100644 index 000000000..01188159d --- /dev/null +++ b/doc/module-docs/cc_yum_add_repo/example4.yaml @@ -0,0 +1,8 @@ +#cloud-config +yum_repos: + my package stream: + baseurl: http://blah.org/pub/epel/testing/5/$basearch/ + mirrorlist: http://some-url-to-list-of-baseurls + repo_gpgcheck: 1 + enable_gpgcheck: true + gpgkey: https://url.to.ascii-armored-gpg-key diff --git a/doc/module-docs/cc_zypper_add_repo/data.yaml b/doc/module-docs/cc_zypper_add_repo/data.yaml new file mode 100644 index 000000000..6ad5ad59c --- /dev/null +++ b/doc/module-docs/cc_zypper_add_repo/data.yaml @@ -0,0 +1,23 @@ +cc_zypper_add_repo: + description: | + Zypper behavior can be configured using the ``config`` key, which will + modify ``/etc/zypp/zypp.conf``. The configuration writer will only append + the provided configuration options to the configuration file. Any duplicate + options will be resolved by the way the ``zypp.conf`` INI file is parsed. + + .. note:: + Setting ``configdir`` is not supported and will be skipped. + + The ``repos`` key may be used to add repositories to the system. Beyond the + required ``id`` and ``baseurl`` attributions, no validation is performed on + the ``repos`` entries. + + It is assumed the user is familiar with the Zypper repository file format. + This configuration is also applicable for systems with + transactional-updates. + examples: + - comment: | + Example 1: + file: cc_zypper_add_repo/example1.yaml + name: Zypper Add Repo + title: Configure Zypper behavior and add Zypper repositories diff --git a/doc/module-docs/cc_zypper_add_repo/example1.yaml b/doc/module-docs/cc_zypper_add_repo/example1.yaml new file mode 100644 index 000000000..720169db7 --- /dev/null +++ b/doc/module-docs/cc_zypper_add_repo/example1.yaml @@ -0,0 +1,8 @@ +#cloud-config +zypper: + config: {download.use_deltarpm: true, reposdir: /etc/zypp/repos.dir, servicesdir: /etc/zypp/services.d} + repos: + - {autorefresh: 1, baseurl: 'http://dl.opensuse.org/dist/leap/v/repo/oss/', enabled: 1, + id: opensuse-oss, name: os-oss} + - {baseurl: 'http://dl.opensuse.org/dist/leap/v/update', id: opensuse-oss-update, + name: os-oss-up} diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 08556f8a6..1ca6a85a2 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -1,8 +1,15 @@ import datetime +import glob import os import sys from cloudinit import version +from cloudinit.config.schema import ( + flatten_schema_all_of, + flatten_schema_refs, + get_schema, +) +from cloudinit.handlers.jinja_template import render_jinja_payload # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -36,6 +43,7 @@ "sphinx.ext.autodoc", "sphinx.ext.autosectionlabel", "sphinx.ext.viewcode", + "sphinxcontrib.datatemplates", "sphinxcontrib.spelling", ] @@ -44,6 +52,7 @@ # https://docs.ubuntu.com/styleguide/en/ spelling_warning = True +templates_path = ["templates"] # Uses case-independent spelling matches from doc/rtd/spelling_word_list.txt spelling_filters = ["spelling.WordListFilter"] @@ -73,7 +82,8 @@ # Sphinx-copybutton config options: 1) prompt to be stripped from copied code. # 2) Set to copy all lines (not just prompt lines) to ensure multiline snippets # can be copied even if they don't contain an EOF line. -copybutton_prompt_text = "$ " +copybutton_prompt_text = r"\$ |PS> " +copybutton_prompt_is_regexp = True copybutton_only_copy_prompt_lines = False # -- Options for HTML output -------------------------------------------------- @@ -146,15 +156,147 @@ r"https://github.com/canonical/ubuntu-pro-client.*", ) +new_doc_issue_link = ( + "https://github.com/canonical/cloud-init/issues/new?" + "labels=documentation%2C+new&projects=&template=documentation.md&" + "title=%5Bdocs%5D%3A+missing+redirect" +) +docs_url = "https://docs.cloud-init.io" + # Sphinx-copybutton config options: notfound_body = ( "

Page not found

Sorry we missed you! Our docs have had a" - " remodel and some deprecated links have changed.

" - "Back to our" - " homepage now hosted at" - " https://canonical-cloud-init.readthedocs-hosted.com

" + " remodel and some deprecated links have changed. Please" + f" file a documentation bug and" + " we'll fix this redirect.

" + f"Back to our homepage hosted at {docs_url}

" ) + notfound_context = { "title": "Page not found", "body": notfound_body, } + + +def get_types_str(prop_cfg): + """Return formatted string for all supported config types.""" + types = "" + + # When oneOf present, join each alternative with an '/' + types += "/".join( + get_types_str(oneof_cfg) for oneof_cfg in prop_cfg.get("oneOf", []) + ) + if "items" in prop_cfg: + types = f"{prop_cfg['type']} of " + types += get_types_str(prop_cfg["items"]) + elif "enum" in prop_cfg: + types += f"{'/'.join([f'``{enum}``' for enum in prop_cfg['enum']])}" + elif "type" in prop_cfg: + if isinstance(prop_cfg["type"], list): + types = "/".join(prop_cfg["type"]) + else: + types = prop_cfg["type"] + return types + + +def get_changed_str(prop_name, prop_cfg): + changed_cfg = {} + if prop_cfg.get("changed"): + changed_cfg = prop_cfg + for oneof_cfg in prop_cfg.get("oneOf", []): + if oneof_cfg.get("changed"): + changed_cfg = oneof_cfg + break + if changed_cfg: + with open("templates/property_changed.tmpl", "r") as stream: + content = "## template: jinja\n" + stream.read() + return render_jinja_payload( + content, f"changed_{prop_name}", changed_cfg + ) + return "" + + +def get_deprecated_str(prop_name, prop_cfg): + deprecated_cfg = {} + if prop_cfg.get("deprecated"): + deprecated_cfg = prop_cfg + for oneof_cfg in prop_cfg.get("oneOf", []): + if oneof_cfg.get("deprecated"): + deprecated_cfg = oneof_cfg + break + if deprecated_cfg: + with open("templates/property_deprecation.tmpl", "r") as stream: + content = "## template: jinja\n" + stream.read() + return render_jinja_payload( + content, f"deprecation_{prop_name}", deprecated_cfg + ) + return "" + + +def render_property_template(prop_name, prop_cfg, prefix=""): + if prop_cfg.get("description"): + description = f" {prop_cfg['description']}" + else: + description = "" + description += get_deprecated_str(prop_name, prop_cfg) + description += get_changed_str(prop_name, prop_cfg) + jinja_vars = { + "prefix": prefix, + "name": prop_name, + "description": description, + "types": get_types_str(prop_cfg), + "prop_cfg": prop_cfg, + } + with open("templates/module_property.tmpl", "r") as stream: + content = "## template: jinja\n" + stream.read() + return render_jinja_payload(content, f"doc_module_{prop_name}", jinja_vars) + + +def render_nested_properties(prop_cfg, defs, prefix): + prop_str = "" + flatten_schema_refs(prop_cfg, defs) + if "items" in prop_cfg: + prop_str += render_nested_properties(prop_cfg["items"], defs, prefix) + if not set(["properties", "patternProperties"]).intersection(prop_cfg): + return prop_str + for prop_name, nested_cfg in prop_cfg.get("properties", {}).items(): + flatten_schema_all_of(nested_cfg) + flatten_schema_refs(nested_cfg, defs) + prop_str += render_property_template(prop_name, nested_cfg, prefix) + prop_str += render_nested_properties(nested_cfg, defs, prefix + " ") + for prop_name, nested_cfg in prop_cfg.get("patternProperties", {}).items(): + flatten_schema_all_of(nested_cfg) + flatten_schema_refs(nested_cfg, defs) + if nested_cfg.get("label"): + prop_name = nested_cfg.get("label") + prop_str += render_property_template(prop_name, nested_cfg, prefix) + prop_str += render_nested_properties(nested_cfg, defs, prefix + " ") + return prop_str + + +def render_module_schemas(): + from cloudinit.importer import import_module + + mod_docs = {} + schema = get_schema() + defs = schema.get("$defs", {}) + + for mod_path in glob.glob("../../cloudinit/config/cc_*py"): + mod_name = os.path.basename(mod_path).replace(".py", "") + mod = import_module(f"cloudinit.config.{mod_name}") + cc_key = mod.meta["id"] + mod_docs[cc_key] = { + "meta": mod.meta, + } + if cc_key in defs: + mod_docs[cc_key]["schema_doc"] = render_nested_properties( + defs[cc_key], defs, "" + ) + else: + mod_docs[cc_key][ + "schema_doc" + ] = "No schema definitions for this module" + return mod_docs + + +html_context = render_module_schemas() diff --git a/doc/rtd/development/docs_layout.rst b/doc/rtd/development/docs_layout.rst index 0f981a2af..1e4e7c767 100644 --- a/doc/rtd/development/docs_layout.rst +++ b/doc/rtd/development/docs_layout.rst @@ -11,6 +11,7 @@ directory: /doc/ - examples/ - man/ + - module-docs/ - rtd/ - tutorial/ - howto/ @@ -29,17 +30,50 @@ directory: - sources/ -``examples/`` -============= +examples/ +========= -``man/`` -======== +man/ +==== This subdirectory contains the Linux man pages for the binaries provided by cloud-init. +module-docs/ +============ + +The documentation for modules is generated automatically using YAML files and +templates. Each module has its own sub-directory, containing: + +- ``data.yaml`` file: + Contains the text and descriptions rendered on the + :ref:`modules documentation ` page. +- ``example*.yaml`` files: + These examples stand alone as valid cloud-config. They always start with + ``#cloud-config``, and ideally, should also have some accompanying discussion + or context in the ``comment`` field in the ``data.yaml`` file to explain + what's happening. + +Edit existing module docs +------------------------- + +In the ``data.yaml`` file, the fields support reStructuredText markup in the +``description`` and ``comment`` fields. With the pipe character (``|``) +preceding these fields, the text will be preserved so that using rST directives +(such as notes or code blocks) will render correctly in the documentation. If +you don't need to use directives, you can use the greater-than character +(``>``), which will fold broken lines together into paragraphs (while +respecting empty lines). + +Create new module docs +---------------------- + +Creating documentation for a **new** module involves a little more work, and +the process for that is outlined in the :ref:`contributing modules ` +page. + ``rtd/`` ======== diff --git a/doc/rtd/development/logging.rst b/doc/rtd/development/logging.rst index c294ce493..fed9f35ba 100644 --- a/doc/rtd/development/logging.rst +++ b/doc/rtd/development/logging.rst @@ -271,7 +271,7 @@ With defaults used:: For more information on ``rsyslog`` configuration, see -:ref:`our module reference page`. +:ref:`our module reference page `. .. LINKS: .. _python logging config: https://docs.python.org/3/library/logging.config.html#configuration-file-format diff --git a/doc/rtd/development/module_creation.rst b/doc/rtd/development/module_creation.rst index 9c873ccaa..32240ab3e 100644 --- a/doc/rtd/development/module_creation.rst +++ b/doc/rtd/development/module_creation.rst @@ -7,8 +7,60 @@ Much of ``cloud-init``'s functionality is provided by :ref:`modules`. All modules follow a similar layout in order to provide consistent execution and documentation. Use the example provided here to create a new module. -Example -======= +.. _module_creation-Guidelines: + +Your Python module +================== + +Modules are located in the ``cloudinit/config/`` directory, where the naming +convention for modules is to use ``cc_`` (with underscores as the +separators). + +The handle function +------------------- + +Your module must include a ``handle`` function. The arguments are: + +- ``name``: The module name specified in the configuration. +- ``cfg``: A configuration object that is the result of the merging of + cloud-config configuration with any datasource-provided configuration. +- ``cloud``: A cloud object that can be used to access various datasource + and paths for the given distro and data provided by the various datasource + instance types. +- ``args``: An argument list. This is usually empty and is only populated + if the module is called independently from the command line or if the + module definition in :file:`/etc/cloud/cloud.cfg[.d]` has been modified + to pass arguments to this module. + +Schema definition +----------------- + +If your module introduces any new cloud-config keys, you must provide a schema +definition in `cloud-init-schema.json`_. + +- The ``meta`` variable must exist and be of type `MetaSchema`_. + + - ``id``: The module ID. In most cases this will be the filename without + the ``.py`` extension. + - ``distros``: Defines the list of supported distros. It can contain + any of the values (not keys) defined in the `OSFAMILIES`_ map or + ``[ALL_DISTROS]`` if there is no distro restriction. + - ``frequency``: Defines how often module runs. It must be one of: + + - ``PER_ALWAYS``: Runs on every boot. + - ``ONCE``: Runs only on first boot. + - ``PER_INSTANCE``: Runs once per instance. When exactly this happens + is dependent on the datasource, but may be triggered any time there + would be a significant change to the instance metadata. An example + could be an instance being moved to a different subnet. + + - ``activate_by_schema_keys``: Optional list of cloud-config keys that will + activate this module. When this list not empty, the config module will be + skipped unless one of the ``activate_by_schema_keys`` are present in merged + cloud-config instance-data. + +Example module.py file +====================== .. code-block:: python @@ -18,94 +70,87 @@ Example import logging from cloudinit.cloud import Cloud from cloudinit.config import Config - from cloudinit.config.schema import MetaSchema, get_meta_doc + from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE - MODULE_DESCRIPTION = """\ - Description that will be used in module documentation. - - This will likely take multiple lines. - """ - LOG = logging.getLogger(__name__) meta: MetaSchema = { "id": "cc_example", - "name": "Example Module", - "title": "Shows how to create a module", - "description": MODULE_DESCRIPTION, "distros": [ALL_DISTROS], "frequency": PER_INSTANCE, "activate_by_schema_keys": ["example_key, example_other_key"], - "examples": [ - "example_key: example_value", - "example_other_key: ['value', 2]", - ], - } - - __doc__ = get_meta_doc(meta) - + } # type: ignore def handle( name: str, cfg: Config, cloud: Cloud, args: list ) -> None: LOG.debug(f"Hi from module {name}") -.. _module_creation-Guidelines: +Module documentation +==================== -Guidelines -========== - -* Create a new module in the :file:`cloudinit/config` directory with a ``cc_`` - prefix. -* Your module must include a ``handle`` function. The arguments are: - - * ``name``: The module name specified in the configuration. - * ``cfg``: A configuration object that is the result of the merging of - cloud-config configuration with any datasource-provided configuration. - * ``cloud``: A cloud object that can be used to access various datasource - and paths for the given distro and data provided by the various datasource - instance types. - * ``args``: An argument list. This is usually empty and is only populated - if the module is called independently from the command line or if the - module definition in :file:`/etc/cloud/cloud.cfg[.d]` has been modified - to pass arguments to this module. - -* If your module introduces any new cloud-config keys, you must provide a - schema definition in `cloud-init-schema.json`_. -* The ``meta`` variable must exist and be of type `MetaSchema`_. - - * ``id``: The module ID. In most cases this will be the filename without - the ``.py`` extension. - * ``distros``: Defines the list of supported distros. It can contain - any of the values (not keys) defined in the `OSFAMILIES`_ map or - ``[ALL_DISTROS]`` if there is no distro restriction. - * ``frequency``: Defines how often module runs. It must be one of: +Every module has a folder in the ``doc/module-docs/`` directory, containing +a ``data.yaml`` file, and one or more ``example*.yaml`` files. - * ``PER_ALWAYS``: Runs on every boot. - * ``ONCE``: Runs only on first boot. - * ``PER_INSTANCE``: Runs once per instance. When exactly this happens - is dependent on the datasource, but may triggered any time there - would be a significant change to the instance metadata. An example - could be an instance being moved to a different subnet. +- The ``data.yaml`` file contains most of the documentation fields. At a + minimum, your module should be provided with this file. Examples are not + strictly required, but are helpful to readers of the documentation so it is + preferred for at least one example to be included. +- The ``example*.yaml`` files are illustrative demonstrations of using the + module, but should be self-contained and in correctly-formatted YAML. These + will be automatically tested against the defined schema. - * ``activate_by_schema_keys``: Optional list of cloud-config keys that will - activate this module. When this list not empty, the config module will be - skipped unless one of the ``activate_by_schema_keys`` are present in merged - cloud-config instance-data. - * ``examples``: Lists examples of any cloud-config keys this module reacts - to. These examples will be rendered in the module reference documentation - and will automatically be tested against the defined schema - during testing. +Example data.yaml file +---------------------- + +.. code-block:: yaml + + cc_module_name: + description: > + This module provides some functionality, which you can describe here. + + For straightforward text examples, use a greater-than (``>``) symbol + next to ``description: `` to ensure proper rendering in the + documentation. Empty lines will be respected, but line-breaks are + folded together to create proper paragraphs. + + If you need to use call-outs or code blocks, use a pipe (``|``) symbol + instead of ``>`` so that reStructuredText formatting (e.g. for + directives, which take varying levels of indentation) is respected. + examples: + - comment: | + Example 1: (optional) description of the expected behavior of the example + file: cc_module_name/example1.yaml + - comment: | + Example 2: (optional) description of a second example. + file: cc_module_name/example2.yaml + name: Module Name + title: Very brief (1 sentence) tag line describing what your module does + +Rendering the module docs +------------------------- -* ``__doc__ = get_meta_doc(meta)`` is necessary to provide proper module - documentation. +The module documentation is auto-generated via the +:file:`doc/rtd/reference/modules.rst` file. + +For your module documentation to be shown in the cloud-init docs, you will +need to add an entry to this page. Modules are listed in alphabetical order. +The entry should be in the following reStructuredText format: + +.. code-block:: text + + .. datatemplate:yaml:: ../../module-docs/cc_ansible/data.yaml + :template: modules.tmpl + +The template pulls information from both your ``module.py`` file, and from its +corresponding entry in the the ``module-docs`` directory. Module execution ================ -In order for a module to be run, it must be defined in a module run section in +For a module to be run, it must be defined in a module run section in :file:`/etc/cloud/cloud.cfg` or :file:`/etc/cloud/cloud.cfg.d` on the launched instance. The three module sections are `cloud_init_modules`_, `cloud_config_modules`_, and `cloud_final_modules`_, @@ -119,7 +164,6 @@ dependencies or is not necessary for a later boot stage, it should be placed in the ``cloud_final_modules`` section before the ``final-message`` module. - .. _MetaSchema: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/config/schema.py#L58 .. _OSFAMILIES: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/distros/__init__.py#L35 .. _settings.py: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/settings.py#L66 diff --git a/doc/rtd/explanation/configuration.rst b/doc/rtd/explanation/configuration.rst index 456ded972..4f4c43b69 100644 --- a/doc/rtd/explanation/configuration.rst +++ b/doc/rtd/explanation/configuration.rst @@ -11,17 +11,28 @@ higher priority source overwrites the lower priority source. Base configuration ================== +The base configuration format uses `YAML version 1.1`_, but may be +declared as jinja templates which cloud-init will render at runtime with +:ref:`instance data ` variables. + From lowest priority to highest, configuration sources are: -- **Hardcoded config**: Config_ that lives within the source of ``cloud-init`` +- **Hardcoded config** Config_ that lives within the source of ``cloud-init`` and cannot be changed. - **Configuration directory**: Anything defined in :file:`/etc/cloud/cloud.cfg` - and :file:`/etc/cloud/cloud.cfg.d`. + and :file:`/etc/cloud/cloud.cfg.d/*.cfg`. - **Runtime config**: Anything defined in :file:`/run/cloud-init/cloud.cfg`. - **Kernel command line**: On the kernel command line, anything found between ``cc:`` and ``end_cc`` will be interpreted as cloud-config user data. -These four sources make up the base configuration. +These four sources make up the base configuration. The contents of this +configuration are defined in the +:ref:`base configuration reference page`. + +.. note:: + Base configuration may contain + :ref:`cloud-config` which may be + overridden by vendor data and user data. Vendor and user data ==================== @@ -31,11 +42,6 @@ Added to the base configuration are :ref:`vendor data` and These get fetched from the datasource and are defined at instance launch. -.. note:: - While much of what is defined in the base configuration can be overridden by - vendor data and user data, base configuration sources do not conform to - :ref:`#cloud-config`. - Network configuration ===================== @@ -79,3 +85,4 @@ images. .. _Config: https://github.com/canonical/cloud-init/blob/b861ea8a5e1fd0eb33096f60f54eeff42d80d3bd/cloudinit/settings.py#L22 .. _cloud.cfg template: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl +.. _YAML version 1.1: https://yaml.org/spec/1.1/current.html diff --git a/doc/rtd/explanation/failure_states.rst b/doc/rtd/explanation/failure_states.rst index 0f1680c52..21a1c4422 100644 --- a/doc/rtd/explanation/failure_states.rst +++ b/doc/rtd/explanation/failure_states.rst @@ -46,6 +46,8 @@ module-level keys: ``init-local``, ``init``, ``modules-config``, See :ref:`this more detailed explanation` for to learn how to use cloud-init's exported errors. +.. _error_codes: + Cloud-init error codes ---------------------- diff --git a/doc/rtd/explanation/format.rst b/doc/rtd/explanation/format.rst index 26bfe6657..c1eda9006 100644 --- a/doc/rtd/explanation/format.rst +++ b/doc/rtd/explanation/format.rst @@ -3,29 +3,27 @@ User data formats ***************** -User data that will be acted upon by ``cloud-init`` must be in one of the -following types. +User data is opaque configuration data provided by a platform to an instance at +launch configure the instance. User data can be one of the following types. .. _user_data_formats-cloud_config: Cloud config data ================= -Cloud-config is the simplest way to accomplish some things via user data. -Using cloud-config syntax, the user can specify certain things in a -human-friendly format. +Cloud-config is the preferred user data format. The cloud config format is a +declarative syntax which uses `YAML version 1.1`_ with keys which describe +desired instance state. Cloud-config can be used to define how an instance +should be configured in a human-friendly format. -These things include: +These things may include: -- ``apt upgrade`` should be run on first boot -- a different ``apt`` mirror should be used -- additional ``apt`` sources should be added -- certain SSH keys should be imported +- performing package upgrades on first boot +- configuration of different package mirrors or sources +- initial user or group setup +- importing certain SSH keys or host keys - *and many more...* -.. note:: - This file must be valid YAML syntax. - See the :ref:`yaml_examples` section for a commented set of examples of supported cloud config formats. @@ -34,7 +32,7 @@ using a MIME archive. .. note:: Cloud config data can also render cloud instance metadata variables using - jinja templating. See :ref:`instance_metadata` for more information. + :ref:`jinja templates `. .. _user_data_script: @@ -47,7 +45,7 @@ Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME archive. User data scripts can optionally render cloud instance metadata variables using -jinja templating. See :ref:`instance_metadata` for more information. +:ref:`jinja templates `. Example script -------------- @@ -70,7 +68,8 @@ Kernel command line When using the NoCloud datasource, users can pass user data via the kernel command line parameters. See the :ref:`NoCloud datasource` -and :ref:`kernel_cmdline` documentation for more details. +and :ref:`explanation/kernel-command-line:Kernel command line` documentation +for more details. Gzip compressed content ======================= @@ -221,5 +220,6 @@ appliances. Setting ``allow_userdata: false`` in the configuration will disable ``cloud-init`` from processing user data. .. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py +.. _YAML version 1.1: https://yaml.org/spec/1.1/current.html .. [#] See your cloud provider for applicable user-data size limitations... .. _this blog post: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html diff --git a/doc/rtd/explanation/index.rst b/doc/rtd/explanation/index.rst index 0dd248c6d..503c7098a 100644 --- a/doc/rtd/explanation/index.rst +++ b/doc/rtd/explanation/index.rst @@ -19,6 +19,7 @@ knowledge and become better at using and configuring ``cloud-init``. vendordata.rst security.rst analyze.rst - kernel-cmdline.rst + kernel-command-line.rst failure_states.rst exported_errors.rst + return_codes.rst diff --git a/doc/rtd/explanation/instancedata.rst b/doc/rtd/explanation/instancedata.rst index 373319582..650efa794 100644 --- a/doc/rtd/explanation/instancedata.rst +++ b/doc/rtd/explanation/instancedata.rst @@ -8,7 +8,7 @@ Instance metadata :maxdepth: 1 :hidden: - kernel-cmdline.rst + kernel-command-line.rst What is ``instance-data?`` ========================== diff --git a/doc/rtd/explanation/kernel-cmdline.rst b/doc/rtd/explanation/kernel-command-line.rst similarity index 99% rename from doc/rtd/explanation/kernel-cmdline.rst rename to doc/rtd/explanation/kernel-command-line.rst index a252aeebf..501812b8c 100644 --- a/doc/rtd/explanation/kernel-cmdline.rst +++ b/doc/rtd/explanation/kernel-command-line.rst @@ -1,5 +1,3 @@ -.. _kernel_cmdline: - Kernel command line ******************* diff --git a/doc/rtd/explanation/return_codes.rst b/doc/rtd/explanation/return_codes.rst new file mode 100644 index 000000000..31a0f40fb --- /dev/null +++ b/doc/rtd/explanation/return_codes.rst @@ -0,0 +1,150 @@ +.. _return_codes: + +Why did `cloud-init status` start returning exit code 2? +======================================================== + +Cloud-init introduced :ref:`a new error code` +in 23.4. This page describes the purpose of this change and +gives some context for why this change was made. + +.. _return_codes_history: + +Background +---------- + +Since cloud-init provides access to cloud instances, the +paradigm for handling errors was "log errors, but proceed". +Exiting on failure conditions doesn't make sense when that +may prevent one from accessing the system to debug it. + +Since cloud-init's behavior is heavily tied to specific cloud +platforms, reproducing cloud-init bugs without exactly +reproducing a specific cloud environment is often impossible, +and often requires guesswork. To make debugging cloud-init +possible without reproducing exactly, cloud-init logs are +quite verbose. + +.. _return_codes_pain_points: + +Pain points +----------- + +1) Invalid configurations were historically ignored. +2) Log verbosity is unfriendly to end users that may not know + what to look for. Verbose logs means users often ignore real + errors. +3) Cloud-init's reported status was only capable of telling the user + whether cloud-init crashed. Cloud-init would report a status of + "done" in the following cases: + + * a user's configuration was invalid + * if the operating system or cloud environment experienced some error that + prevented cloud-init from configuring the instance + * if cloud-init internally experienced an error - all of these previously + reported a status of "done". + +.. _return_codes_improvements: + +Efforts to improve cloud-init +----------------------------- + +Several changes have been introduced to cloud-init to address the pain +points described above. + +JSON schema +^^^^^^^^^^^ + +Cloud-init has defined a JSON schema which fully documents the user-data +cloud-config. This JSON schema may be used in several different ways: + +Text editor integration +""""""""""""""""""""""" + +Thanks to `yaml-language-server`_, cloud-init's JSON schema may be +used for YAML syntax checking, warnings when invalid keys are used, and +autocompletion. Several different text editors are capable of this. +See this `blog post on configuring this for neovim`_, or for VScode one +can install the `extension`_ and then a file named ``cloud-config.yaml`` +will automatically use cloud-init's JSON schema. + + +Cloud-init schema subcommand +"""""""""""""""""""""""""""" + +The cloud-init package includes a cloud-init subcommand, +:ref:`cloud-init schema` which uses the schema +to validate either the configuration passed to the instance that you are +running the command on, or to validate an arbitrary text file containing a +configuration. + +Return codes +^^^^^^^^^^^^ + +Cloud-init historically used two return codes from the +:code:`cloud-init status` subcommand: 0 to indicate success and 1 to indicate +failure. These return codes lacked nuance. Return code 0 (success) included +the in-between when something went wrong, but cloud-init was able to finish. + +Many users of cloud-init run :code:`cloud-init status --wait` and expect that +when complete, cloud-init has finished. Since cloud-init is not guaranteed to +succeed, users should also be check the return code of this command. + +As of 23.4, errors that do not crash cloud-init will have an exit code of 2. +Exit code of 1 means that cloud-init crashed, and an exit code 0 more correctly +means that cloud-init succeeded. Anyone that previously checked for exit code 0 +should probably update their assumptions in one of the following two ways: + +Users that wish to take advantage of cloud-init's error reporting +capabilities should check for exit code of 2 from :code:`cloud-init status`. +An example of this: + +.. code-block:: python + + from logging import getLogger + from json import loads + from subprocess import run + from sys import exit + + logger = getLogger(__name__) + completed = run("cloud-init status --format json") + output = loads(completed.stdout) + + if 2 == completed.return_code: + # something bad might have happened - we should check it out + logger.warning("cloud-init experienced a recoverable error") + logger.warning("status: %s", output.get("extended_status")) + logger.warning("recoverable error: %s", output.get("recoverable_errors")) + + elif 1 == completed.return_code: + # cloud-init completely failed + logger.error("cloud-init crashed, all bets are off!") + exit(1) + +Users that wish to use ignore cloud-init's errors and check the return code in +a backwards-compatible way should check that the return code is not equal to +1. This will provide the same behavior before and after the changed exit code. +See an example of this: + +.. code-block:: python + + from logging import getLogger + from subprocess import run + from sys import exit + + logger = getLogger(__name__) + completed = run("cloud-init status --format json") + + if 1 == completed.return_code: + # cloud-init completely failed + logger.error("cloud-init crashed, all bets are off!") + exit(1) + + # cloud-init might have failed, but this code ignores that possibility + # in preference of backwards compatibility + +See :ref:`our explanation of failure states` for more +information. + +.. _yaml-language-server: https://github.com/redhat-developer/yaml-language-server +.. _extension: https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml +.. _blog post on configuring this for neovim: https://phoenix-labs.xyz/blog/setup-neovim-cloud-init-completion/ diff --git a/doc/rtd/howto/debugging.rst b/doc/rtd/howto/debugging.rst index ed510f3bc..c8b2a2634 100644 --- a/doc/rtd/howto/debugging.rst +++ b/doc/rtd/howto/debugging.rst @@ -108,7 +108,7 @@ system, or the result of a user configuration. External reasons ---------------- -- Failed dependent services in the boot. +- Other services failed or are stuck. - Bugs in the kernel or drivers. - Bugs in external userspace tools that are called by ``cloud-init``. @@ -142,16 +142,28 @@ To start debugging See :ref:`our guide on exported errors` for more information on these exported errors. -4. Identify which cloud-init :ref:`boot stage` is currently - running: +4. Inspect running services :ref:`boot stage`: .. code-block:: - systemctl status cloud-init-local.service cloud-init.service\ - cloud-config.service cloud-final.service + $ systemctl list-jobs --after + JOB UNIT TYPE STATE + 150 cloud-final.service start waiting + └─ waiting for job 147 (cloud-init.target/start) - - + 155 blocking-daemon.service start running + └─ waiting for job 150 (cloud-final.service/start) - - + 147 cloud-init.target start waiting - Cloud-init may have started to run, but not completed. This shows how many, - and which, cloud-init stages completed. + 3 jobs listed. + + + In the above example we can see that ``cloud-final.service`` is + waiting and is ordered before ``cloud-init.target``, and that + ``blocking-daemon.service`` is currently running and is ordered + before ``cloud-final.service``. From this output, we deduce that cloud-init + is not complete because the service named ``blocking-daemon.service`` hasn't + yet completed, and that we should investigate ``blocking-daemon.service`` + to understand why it is still running. 5. Use the PID of the running service to find all running subprocesses. Any running process that was spawned by cloud-init may be blocking diff --git a/doc/rtd/howto/disable_cloud_init.rst b/doc/rtd/howto/disable_cloud_init.rst index 721e20c2c..686e48a88 100644 --- a/doc/rtd/howto/disable_cloud_init.rst +++ b/doc/rtd/howto/disable_cloud_init.rst @@ -22,10 +22,10 @@ Example: $ touch /etc/cloud/cloud-init.disabled -Method 2: kernel commandline -============================ +Method 2: kernel command line +============================= -To disable cloud-init, add ``cloud-init=disabled`` to the kernel commandline. +To disable cloud-init, add ``cloud-init=disabled`` to the kernel command line. Example (using GRUB2 with Ubuntu): diff --git a/doc/rtd/howto/rerun_cloud_init.rst b/doc/rtd/howto/rerun_cloud_init.rst index b4e593f79..b7adb30ff 100644 --- a/doc/rtd/howto/rerun_cloud_init.rst +++ b/doc/rtd/howto/rerun_cloud_init.rst @@ -23,8 +23,8 @@ Remove the logs and cache, then reboot -------------------------------------- This method will reboot the system as if cloud-init never ran. This -command does not remove all cloud-init artefacts from previous runs of -cloud-init, but it will clean enough artefacts to allow cloud-init to +command does not remove all cloud-init artifacts from previous runs of +cloud-init, but it will clean enough artifacts to allow cloud-init to think that it hasn't run yet. It will then re-run after a reboot. .. code-block:: shell-session diff --git a/doc/rtd/howto/status.rst b/doc/rtd/howto/status.rst index d6cdbe092..b39a55d3b 100644 --- a/doc/rtd/howto/status.rst +++ b/doc/rtd/howto/status.rst @@ -107,11 +107,11 @@ contain any of the following states: which prevents cloud-init from ever running - ``'disabled-by-generator'``: ``ds-identify`` determined no applicable cloud-init datasources -- ``'disabled-by-kernel-cmdline'``: kernel command line contained +- ``'disabled-by-kernel-command-line'``: kernel command line contained cloud-init=disabled - ``'disabled-by-environment-variable'``: environment variable ``KERNEL_CMDLINE`` contained ``cloud-init=disabled`` -- ``'enabled-by-kernel-cmdline'``: kernel command line contained +- ``'enabled-by-kernel-command-line'``: kernel command line contained cloud-init=enabled - ``'enabled-by-generator'``: ``ds-identify`` detected possible cloud-init datasources diff --git a/doc/rtd/howto/ubuntu_test_prerelease.rst b/doc/rtd/howto/ubuntu_test_prerelease.rst index 572384f9d..cc685a843 100644 --- a/doc/rtd/howto/ubuntu_test_prerelease.rst +++ b/doc/rtd/howto/ubuntu_test_prerelease.rst @@ -49,7 +49,7 @@ Do this to avoid unintentionally installing other unreleased packages. rm -f /etc/apt/sources.list.d/proposed.list apt update -Remove artefacts and reboot +Remove artifacts and reboot --------------------------- This will cause cloud-init to rerun as if it is a first boot. diff --git a/doc/rtd/reference/base_config_reference.rst b/doc/rtd/reference/base_config_reference.rst index 5d291aac0..9686d456d 100644 --- a/doc/rtd/reference/base_config_reference.rst +++ b/doc/rtd/reference/base_config_reference.rst @@ -116,11 +116,11 @@ Both keys will be processed independently. - ``distro``: Name of distro being used. - ``default_user``: Defines the default user for the system using the same - user configuration as :ref:`Users and Groups`. Note that - this CAN be overridden if a ``users`` configuration + user configuration as :ref:`Users and Groups`. Note + that this CAN be overridden if a ``users`` configuration is specified without a ``- default`` entry. - ``ntp_client``: The default NTP client for the distro. Takes the same - form as ``ntp_client`` defined in :ref:`NTP`. + form as ``ntp_client`` defined in :ref:`NTP`. - ``package_mirrors``: Defines the package mirror info for apt. - ``ssh_svcname``: The SSH service name. For most distros this will be either ``ssh`` or ``sshd``. diff --git a/doc/rtd/reference/breaking_changes.rst b/doc/rtd/reference/breaking_changes.rst new file mode 100644 index 000000000..0df6fcfde --- /dev/null +++ b/doc/rtd/reference/breaking_changes.rst @@ -0,0 +1,98 @@ +.. _breaking_changes: + +Breaking changes +**************** + +This section provides guidance on specific breaking changes to cloud-init +releases. + +.. note:: + These changes may not be present in all distributions of cloud-init as + many operating system vendors patch out breaking changes in + cloud-init to ensure consistent behavior on their platform. + +24.1 +==== + +Removal of ``--file`` top-level option +-------------------------------------- + +The ``--file`` top-level option has been removed from cloud-init. It only +applied to a handful of subcommands so it did not make sense as a top-level +option. Instead, ``--file`` may be passed to a subcommand that supports it. +For example, the following command will no longer work: + +.. code-block:: bash + + cloud-init --file=userdata.yaml modules --mode config + +Instead, use: + +.. code-block:: bash + + cloud-init modules --file=userdata.yaml --mode config + + +Removed Ubuntu's ordering dependency on snapd.seeded +---------------------------------------------------- + +In Ubuntu releases, cloud-init will no longer wait on ``snapd`` pre-seeding to +run. If a user-provided script relies on a snap, it must now be prefixed with +``snap wait system seed.loaded`` to ensure the snaps are ready for use. For +example, a cloud config that previously included: + +.. code-block:: yaml + + runcmd: + - [ snap, install, mc-installer ] + + +Will now need to be: + +.. code-block:: yaml + + runcmd: + - [ snap, wait, system, seed.loaded ] + - [ snap, install, mc-installer ] + + +23.2-24.1 - Datasource identification +===================================== + +**23.2** + If the detected ``datasource_list`` contains a single datasource or + that datasource plus ``None``, automatically use that datasource without + checking to see if it is available. This allows for using datasources that + don't have a way to be deterministically detected. +**23.4** + If the detected ``datasource_list`` contains a single datasource plus + ``None``, no longer automatically use that datasource because ``None`` is + a valid datasource that may be used if the primary datasource is + not available. +**24.1** + ds-identify no longer automatically appends ``None`` to a + datasource list with a single entry provided under ``/etc/cloud``. + If ``None`` is desired as a fallback, it must be explicitly added to the + customized datasource list. + +23.4 - added status code for recoverable error +============================================== + +Cloud-init return codes have been extended with a new error code (2), +which will be returned when cloud-init experiences an error that it can +recover from. See :ref:`this page which documents the change `. + + +23.2 - kernel command line +========================== + +The ``ds=`` kernel command line value is used to forcibly select a specific +datasource in cloud-init. Prior to 23.2, this only optionally selected +the ``NoCloud`` datasource. + +Anyone that previously had a matching ``ds=nocloud*`` in their kernel command +line that did not want to use the ``NoCloud`` datasource may experience broken +behavior as a result of this change. + +Workarounds include updating the kernel command line and optionally configuring +a ``datasource_list`` in ``/etc/cloud/cloud.cfg.d/*.cfg``. diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst index 2488df520..0a6bc55ff 100644 --- a/doc/rtd/reference/cli.rst +++ b/doc/rtd/reference/cli.rst @@ -212,7 +212,7 @@ Example output: Generally run by OS init systems to execute ``cloud-init``'s stages: *init* and *init-local*. See :ref:`boot_stages` for more info. -Can be run on the commandline, but is generally gated to run only once +Can be run on the command line, but is generally gated to run only once due to semaphores in :file:`/var/lib/cloud/instance/sem/` and :file:`/var/lib/cloud/sem`. @@ -242,6 +242,10 @@ to semaphores in :file:`/var/lib/cloud/`. See :ref:`boot_stages` for more info. * :command:`--file` : Use additional yaml configuration files. +.. warning:: + `--mode init` is deprecated in 24.1 and scheduled to be removed in 29.1. + Use :command:`cloud-init init` instead. + .. _cli_query: :command:`query` @@ -400,12 +404,14 @@ module default frequency of ``instance``: :command:`status` ================= -Report whether ``cloud-init`` is running, done, disabled or errored. Exits -non-zero if an error is detected in ``cloud-init``. +Report cloud-init's current status. + +Exits 1 if ``cloud-init`` crashes, 2 if ``cloud-init`` finishes but experienced +recoverable errors, and 0 if ``cloud-init`` ran without error. * :command:`--long`: Detailed status information. * :command:`--wait`: Block until ``cloud-init`` completes. -* :command:`--format [yaml|json|tabular]`: Machine-readable JSON or YAML +* :command:`--format [yaml|json]`: Machine-readable JSON or YAML detailed output. The :command:`status` command can be used simply as follows: @@ -415,7 +421,8 @@ The :command:`status` command can be used simply as follows: $ cloud-init status Which shows whether ``cloud-init`` is currently running, done, disabled, or in -error, as in this example output: +error. Note that the ``extended_status`` key in ``--long`` or ``--format json`` +contains more accurate and complete status information. Example output: .. code-block:: @@ -432,19 +439,24 @@ Example output when ``cloud-init`` is running: .. code-block:: status: running - time: Fri, 26 Jan 2018 21:39:43 +0000 - detail: - Running in stage: init-local + extended_status: running + boot_status_code: enabled-by-generator + last_update: Wed, 13 Mar 2024 18:46:26 +0000 + detail: DataSourceLXD + errors: [] + recoverable_errors: {} Example output when ``cloud-init`` is done: .. code-block:: status: done + extended_status: done boot_status_code: enabled-by-generator - last_update: Tue, 16 Aug 2022 19:12:58 +0000 - detail: - DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] + last_update: Wed, 13 Mar 2024 18:46:26 +0000 + detail: DataSourceLXD + errors: [] + recoverable_errors: {} The detailed output can be shown in machine-readable JSON or YAML with the :command:`format` option, for example: @@ -457,13 +469,40 @@ Which would produce the following example output: .. code-block:: - { - "boot_status_code": "enabled-by-generator", - "datasource": "nocloud", - "detail": "DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]", - "errors": [], - "last_update": "Tue, 16 Aug 2022 19:12:58 +0000", - "status": "done" - } + { + "boot_status_code": "enabled-by-generator", + "datasource": "lxd", + "detail": "DataSourceLXD", + "errors": [], + "extended_status": "done", + "init": { + "errors": [], + "finished": 1710355584.3603137, + "recoverable_errors": {}, + "start": 1710355584.2216876 + }, + "init-local": { + "errors": [], + "finished": 1710355582.279756, + "recoverable_errors": {}, + "start": 1710355582.2255273 + }, + "last_update": "Wed, 13 Mar 2024 18:46:26 +0000", + "modules-config": { + "errors": [], + "finished": 1710355585.5042186, + "recoverable_errors": {}, + "start": 1710355585.334438 + }, + "modules-final": { + "errors": [], + "finished": 1710355586.9038777, + "recoverable_errors": {}, + "start": 1710355586.8076844 + }, + "recoverable_errors": {}, + "stage": null, + "status": "done" + } .. _More details on machine-id: https://www.freedesktop.org/software/systemd/man/machine-id.html diff --git a/doc/rtd/reference/datasource_dsname_map.rst b/doc/rtd/reference/datasource_dsname_map.rst index 552a7f9de..3861ec349 100644 --- a/doc/rtd/reference/datasource_dsname_map.rst +++ b/doc/rtd/reference/datasource_dsname_map.rst @@ -4,7 +4,7 @@ Datasource dsname ***************** Each datasource has an attribute called dsname. This may be used in the -kernel commandline to +kernel command line to :ref:`override datasource detection`. The ``dsname`` on the kernel command line may be a case-insensitive match. See the mapping between datasource module names and ``dsname`` in the table below. diff --git a/doc/rtd/reference/datasources.rst b/doc/rtd/reference/datasources.rst index 9c3996bb2..9826fa563 100644 --- a/doc/rtd/reference/datasources.rst +++ b/doc/rtd/reference/datasources.rst @@ -26,7 +26,7 @@ There are exceptions, however, when the :ref:`datasource does not identify` itself to ``cloud-init``. For these exceptions, one can override datasource detection either by configuring a single datasource in the :ref:`datasource_list`, -or by using :ref:`kernel commandline arguments`. +or by using :ref:`kernel command line arguments`. .. _datasources_supported: @@ -54,6 +54,7 @@ The following is a list of documentation for each supported datasource: datasources/lxd.rst datasources/maas.rst datasources/nocloud.rst + datasources/none.rst datasources/nwcs.rst datasources/opennebula.rst datasources/openstack.rst @@ -67,4 +68,3 @@ The following is a list of documentation for each supported datasource: datasources/vultr.rst datasources/wsl.rst datasources/zstack.rst - diff --git a/doc/rtd/reference/datasources/azure.rst b/doc/rtd/reference/datasources/azure.rst index 1907e4b5d..8cab989f3 100644 --- a/doc/rtd/reference/datasources/azure.rst +++ b/doc/rtd/reference/datasources/azure.rst @@ -52,7 +52,7 @@ The settings that may be configured are: * :command:`disk_aliases` A dictionary defining which device paths should be interpreted as ephemeral - images. See :ref:`cc_disk_setup ` module for more info. + images. See :ref:`cc_disk_setup ` module for more info. Configuration for the datasource can also be read from a ``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in ``dscfg`` node is diff --git a/doc/rtd/reference/datasources/nocloud.rst b/doc/rtd/reference/datasources/nocloud.rst index 498698670..3033869f6 100644 --- a/doc/rtd/reference/datasources/nocloud.rst +++ b/doc/rtd/reference/datasources/nocloud.rst @@ -4,93 +4,163 @@ NoCloud ******* The data source ``NoCloud`` is a flexible datasource that can be used in -multiple different ways. With NoCloud, the user can provide user data and -metadata to the instance without running a network service (or even without -having a network at all). Alternatively, one may use a custom webserver to -provide configurations. +multiple different ways. With NoCloud, one can provide configurations to +the instance without running a network service (or even without having a +network at all). Alternatively, one can use HTTP/HTTPS or FTP/FTPS to provide +a configuration. Configuration Methods: ====================== -Method 1: Local filesystem, labeled filesystem ----------------------------------------------- +.. warning:: + User data placed under ``/etc/cloud/`` will **not** be recognized as a + source of configuration data by the NoCloud datasource. While it may + be acted upon by cloud-init, using + :ref:`DataSourceNone` should be preferred. -To provide cloud-init configurations from the local filesystem, a labeled -`vfat`_ or `iso9660`_ filesystem containing user data and metadata may -be used. For this method to work, the filesystem volume must be labelled -``CIDATA``. +Method 1: Labeled filesystem +---------------------------- -Method 2: Local filesystem, kernel commandline or SMBIOS --------------------------------------------------------- +A labeled `vfat`_ or `iso9660` filesystem may be used. The filesystem volume +must be labelled ``CIDATA``. -Configuration files can be provided on the local filesystem without a label -using kernel commandline arguments or SMBIOS serial number to tell cloud-init -where on the filesystem to look. -Alternatively, one can provide metadata via the kernel command line or SMBIOS -"serial number" option. This argument might look like: :: +Method 2: Custom webserver +-------------------------- - ds=nocloud;s=file://path/to/directory/;h=node-42 +Configuration files can be provided to cloud-init over HTTP(s). To tell +cloud-init the URI to use, arguments must be passed to the instance via the +kernel command line or SMBIOS serial number. This argument might look like: :: -Method 3: Custom webserver: kernel commandline or SMBIOS --------------------------------------------------------- + ds=nocloud;s=https://10.42.42.42/cloud-init/configs/ -In a similar fashion, configuration files can be provided to cloud-init using a -custom webserver at a URL dictated by kernel commandline arguments or SMBIOS +.. note:: + If using kernel command line arguments with GRUB, note that an + unescaped semicolon is intepreted as the end of a statement. + Consider using single-quotes to avoid this pitfall. See: `GRUB quoting`_ + ds=nocloud;s=http://10.42.42.42/cloud-init/configs/ + +Alternatively, this URI may be defined in a configuration in a file +:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: + + datasource: + NoCloud: + seedfrom: https://10.42.42.42/cloud-init/configs/ + +Method 3: FTP Server +-------------------- + +Configuration files can be provided to cloud-init over unsecured FTP +or alternatively with FTP over TLS. To tell cloud-init the URL to use, +arguments must be passed to the instance via the kernel command line or SMBIOS serial number. This argument might look like: :: - ds=nocloud;s=http://10.42.42.42/cloud-init/configs/ + ds=nocloud;s=ftps://10.42.42.42/cloud-init/configs/ + +Alternatively, this URI may be defined in a configuration in a file +:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: + + datasource: + NoCloud: + seedfrom: ftps://10.42.42.42/cloud-init/configs/ + +Method 4: Local filesystem +-------------------------- + +Configuration files can be provided on the local filesystem at specific +filesystem paths using kernel command line arguments or SMBIOS serial number to +tell cloud-init where on the filesystem to look. .. note:: - When supplementing kernel parameters in GRUB's boot menu take care to single-quote this full value to avoid GRUB interpreting the semi-colon as a reserved word. See: `GRUB quoting`_ + Unless arbitrary filesystem paths are required, one might prefer to use + :ref:`DataSourceNone`, since it does not require + modifying the kernel command line or SMBIOS. + +This argument might look like: :: + + ds=nocloud;s=file://path/to/directory/ + +Alternatively, this URI may be defined in a configuration in a file +:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: + + datasource: + NoCloud: + seedfrom: file://10.42.42.42/cloud-init/configs/ + Permitted keys ============== -The permitted keys are: +Currently three keys (and their aliases) are permitted for configuring +cloud-init. + +The only required key is: + +* ``seedfrom`` alias: ``s`` + +A valid ``seedfrom`` value consists of a URI which must contain a trailing +``/``. + +Some optional keys may be used, but their use is discouraged and may +be removed in the future. + +* ``local-hostname`` alias: ``h`` (:ref:`cloud-config` + preferred) +* ``instance-id`` alias: ``i`` (set instance id in :file:`meta-data` instead) + +.. note:: -* ``h`` or ``local-hostname`` -* ``i`` or ``instance-id`` -* ``s`` or ``seedfrom`` + The aliases ``s`` , ``h`` and ``i`` are only supported by kernel + command line or SMBIOS. When configured in a ``*.cfg`` file, the long key + name is required. -A valid ``seedfrom`` value consists of: +Seedfrom: HTTP and HTTPS +------------------------ -Filesystem ----------- +The URI elements supported by NoCloud's HTTP and HTTPS implementations +include: :: -A filesystem path starting with ``/`` or ``file://`` that points to a directory -containing files: ``user-data``, ``meta-data``, and (optionally) -``vendor-data`` (a trailing ``/`` is required) + ://// -HTTP server ------------ +Where ``scheme`` can be ``http`` or ``https`` and ``host`` can be an IP +address or DNS name. -An ``http`` or ``https`` URL (a trailing ``/`` is required) +Seedfrom: FTP and FTP over TLS +------------------------------ +The URI elements supported by NoCloud's FTP and FTPS implementation +include: :: -File formats -============ + ://@:// -These user data and metadata files are required as separate files at the -same base URL: :: +Where ``scheme`` can be ``ftp`` or ``ftps``, ``userinfo`` will be +``username:password`` (defaults is ``anonymous`` and an empty password), +``host`` can be an IP address or DNS name, and ``port`` is which network +port to use (default is ``21``). - /user-data - /meta-data +Seedfrom: Files +--------------- -Both files must be present for it to be considered a valid seed ISO. +The path pointed to by the URI can contain the following +files: -The ``user-data`` file uses :ref:`user data format` and -``meta-data`` is a YAML-formatted file representing what you'd find in the EC2 -metadata service. +``user-data`` (required) +``meta-data`` (required) +``vendor-data`` (optional) +``network-config`` (optional) -You may also optionally provide a vendor data file adhering to -:ref:`user data formats` at the same base URL: :: +If the seedfrom URI doesn't contain the required files, this datasource +will be skipped. - /vendor-data +The ``user-data`` file uses :ref:`user data format`. The +``meta-data`` file is a YAML-formatted file. +The ``vendor-data`` file adheres to +:ref:`user data formats`. The ``network-config`` file +follows cloud-init's :ref:`Network Configuration Formats`. -DMI-specific kernel commandline -=============================== +DMI-specific kernel command line +================================ Cloud-init performs variable expansion of the ``seedfrom`` URL for any DMI kernel variables present in :file:`/sys/class/dmi/id` (kenv on FreeBSD). diff --git a/doc/rtd/reference/datasources/none.rst b/doc/rtd/reference/datasources/none.rst new file mode 100644 index 000000000..c46472d22 --- /dev/null +++ b/doc/rtd/reference/datasources/none.rst @@ -0,0 +1,49 @@ +.. _datasource_none: + +None +**** + +The data source ``None`` may be used when no other viable datasource is +present on disk. This has two primary use cases: + +1. Providing user data to cloud-init from on-disk configuration when + no other datasource is present. +2. As a fallback for when a datasource is otherwise intermittently + unavailable. + +When the datasource is ``None``, cloud-init is unable to obtain or +render networking configuration. Additionally, when cloud-init +completes, a warning is logged that DataSourceNone is being used. + +Configuration +============= + +User data and meta data may be passed to cloud-init via system +configuration in :file:`/etc/cloud/cloud.cfg` or +:file:`/etc/cloud/cloud.cfg.d/*.cfg`. + +``userdata_raw`` +---------------- + +A **string** containing the user data (including header) to be used by +cloud-init. + +``metadata`` +------------ +The metadata to be used by cloud-init. + +.. _datasource_none_example: + +Example configuration +--------------------- + +.. code-block:: yaml + + datasource: + None: + metadata: + local-hostname: "myhost.internal" + userdata_raw: | + #cloud-config + runcmd: + - echo 'mydata' > /var/tmp/mydata.txt diff --git a/doc/rtd/reference/datasources/openstack.rst b/doc/rtd/reference/datasources/openstack.rst index 7bb52e4aa..7072c4ea1 100644 --- a/doc/rtd/reference/datasources/openstack.rst +++ b/doc/rtd/reference/datasources/openstack.rst @@ -148,7 +148,7 @@ Explicitly set ``datasource_list`` to only ``openstack``, such as: Method 2: Kernel command line ----------------------------- -Set the kernel commandline to configure +Set the kernel command line to configure :ref:`datasource override `. Example using Ubuntu + GRUB2: diff --git a/doc/rtd/reference/datasources/wsl.rst b/doc/rtd/reference/datasources/wsl.rst index dbbfd500d..ab96f9490 100644 --- a/doc/rtd/reference/datasources/wsl.rst +++ b/doc/rtd/reference/datasources/wsl.rst @@ -37,6 +37,8 @@ the datasource from working. For more information about how to configure WSL, `check the official documentation `_. +.. _wsl_user_data_configuration: + User data configuration ======================== @@ -49,7 +51,25 @@ User data can be supplied in any :ref:`format supported by cloud-init`, such as YAML cloud-config files or shell scripts. At runtime, the WSL datasource looks for user data in the following locations inside the Windows host filesystem, in the -order specified below: +order specified below. + +First, configurations from Ubuntu Pro/Landscape are checked for in the +following paths: + +1. ``%USERPROFILE%\.ubuntupro\.cloud-init\.user-data`` holds data + provided by Landscape to configure a specific WSL instance. If this file + is present, normal user-provided configurations are not looked for. This + file is merged with (2) on a per-module basis. If this file is not present, + then the first user-provided configuration will be used in its place. + +2. ``%USERPROFILE%\.ubuntupro\.cloud-init\agent.yaml`` holds data provided by + the Ubuntu Pro for WSL agent. If this file is present, its modules will be + merged with (1), overriding any conflicting modules. If (1) is not provided, + then this file will be merged with any valid user-provided configuration + instead. + +Then, if a file from (1) is not found, a user-provided configuration will be +looked for instead in the following order: 1. ``%USERPROFILE%\.cloud-init\.user-data`` holds user data for a specific instance configuration. The datasource resolves the name attributed @@ -82,7 +102,8 @@ Only the first match is loaded, and no config merging is done, even in the presence of errors. That avoids unexpected behaviour due to surprising merge scenarios. Also, notice that the file name casing is irrelevant since both the Windows file names, as well as the WSL distro names, are case-insensitive by -default. If none are found, cloud-init remains disabled. +default. If none are found, cloud-init remains disabled if no other +configurations from previous steps were found. .. note:: Some users may have configured case sensitivity for file names on Windows. @@ -167,8 +188,9 @@ include file. While creating users through cloud-init works as in any other platform, WSL has the concept of the *default user*, which is the user logged in by default. So, to create the default user with cloud-init, one must supply user - data to the :ref:`Users and Groups module ` and write the - entry in ``/etc/wsl.conf`` to make that user the default. See the example: + data to the :ref:`Users and Groups module ` and write + the entry in ``/etc/wsl.conf`` to make that user the default. See the + example: .. code-block:: yaml @@ -205,4 +227,3 @@ include file. WSL automatically generates those files by default, unless configured to behave otherwise in ``/etc/wsl.conf``. Overwriting may work, but only until the next reboot. - diff --git a/doc/rtd/reference/index.rst b/doc/rtd/reference/index.rst index db64d4f6e..14e754b29 100644 --- a/doc/rtd/reference/index.rst +++ b/doc/rtd/reference/index.rst @@ -23,4 +23,5 @@ matrices and so on. datasource_dsname_map.rst performance_analysis.rst ubuntu_stable_release_updates.rst + breaking_changes.rst user_files.rst diff --git a/doc/rtd/reference/modules.rst b/doc/rtd/reference/modules.rst index f3de6d2d8..2a7d26d30 100644 --- a/doc/rtd/reference/modules.rst +++ b/doc/rtd/reference/modules.rst @@ -4,90 +4,128 @@ Module reference **************** Deprecation schedule and versions ---------------------------------- -Keys may be documented as ``deprecated``, ``new``, or ``changed``. +================================= + +Keys can be documented as ``deprecated``, ``new``, or ``changed``. This allows cloud-init to evolve as requirements change, and to adopt better practices without maintaining design decisions indefinitely. -Keys that have been marked as deprecated or changed may be removed or -changed 5 years from the date of deprecation. For example, a key that is -deprecated in version ``22.1`` (which is the first release in 2022) is -scheduled to be removed in ``27.1`` (first release in 2027). Use of -deprecated keys may cause warnings in the logs. In the case that a -key's expected value changes, the key will be marked ``changed`` with a -date. A 5 year timeline may also be expected for changed keys. - -.. automodule:: cloudinit.config.cc_ansible -.. automodule:: cloudinit.config.cc_apk_configure -.. automodule:: cloudinit.config.cc_apt_configure -.. automodule:: cloudinit.config.cc_apt_pipelining -.. automodule:: cloudinit.config.cc_bootcmd -.. automodule:: cloudinit.config.cc_byobu -.. automodule:: cloudinit.config.cc_ca_certs -.. automodule:: cloudinit.config.cc_chef -.. automodule:: cloudinit.config.cc_disable_ec2_metadata - -.. _mod-disk_setup: - -.. automodule:: cloudinit.config.cc_disk_setup -.. automodule:: cloudinit.config.cc_fan -.. automodule:: cloudinit.config.cc_final_message -.. automodule:: cloudinit.config.cc_growpart -.. automodule:: cloudinit.config.cc_grub_dpkg -.. automodule:: cloudinit.config.cc_install_hotplug -.. automodule:: cloudinit.config.cc_keyboard -.. automodule:: cloudinit.config.cc_keys_to_console -.. automodule:: cloudinit.config.cc_landscape -.. automodule:: cloudinit.config.cc_locale -.. automodule:: cloudinit.config.cc_lxd -.. automodule:: cloudinit.config.cc_mcollective -.. automodule:: cloudinit.config.cc_mounts - -.. _mod-ntp: - -.. automodule:: cloudinit.config.cc_ntp -.. automodule:: cloudinit.config.cc_package_update_upgrade_install -.. automodule:: cloudinit.config.cc_phone_home -.. automodule:: cloudinit.config.cc_power_state_change -.. automodule:: cloudinit.config.cc_puppet -.. automodule:: cloudinit.config.cc_resizefs -.. automodule:: cloudinit.config.cc_resolv_conf -.. automodule:: cloudinit.config.cc_rh_subscription - -.. _mod-rsyslog: - -.. automodule:: cloudinit.config.cc_rsyslog - -.. _mod-runcmd: - -.. automodule:: cloudinit.config.cc_runcmd -.. automodule:: cloudinit.config.cc_salt_minion -.. automodule:: cloudinit.config.cc_scripts_per_boot -.. automodule:: cloudinit.config.cc_scripts_per_instance -.. automodule:: cloudinit.config.cc_scripts_per_once -.. automodule:: cloudinit.config.cc_scripts_user -.. automodule:: cloudinit.config.cc_scripts_vendor -.. automodule:: cloudinit.config.cc_seed_random -.. automodule:: cloudinit.config.cc_set_hostname - -.. _mod-set_passwords: - -.. automodule:: cloudinit.config.cc_set_passwords -.. automodule:: cloudinit.config.cc_snap -.. automodule:: cloudinit.config.cc_spacewalk -.. automodule:: cloudinit.config.cc_ssh -.. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints -.. automodule:: cloudinit.config.cc_ssh_import_id -.. automodule:: cloudinit.config.cc_timezone -.. automodule:: cloudinit.config.cc_ubuntu_drivers -.. automodule:: cloudinit.config.cc_ubuntu_pro -.. automodule:: cloudinit.config.cc_update_etc_hosts -.. automodule:: cloudinit.config.cc_update_hostname - -.. _mod-users_groups: - -.. automodule:: cloudinit.config.cc_users_groups -.. automodule:: cloudinit.config.cc_wireguard -.. automodule:: cloudinit.config.cc_write_files -.. automodule:: cloudinit.config.cc_yum_add_repo -.. automodule:: cloudinit.config.cc_zypper_add_repo +Keys marked as ``deprecated`` or ``changed`` may be removed or changed 5 +years from the deprecation date. For example, if a key is deprecated in +version ``22.1`` (the first release in 2022) it is scheduled to be removed in +``27.1`` (first release in 2027). Use of deprecated keys may cause warnings in +the logs. If a key's expected value changes, the key will be marked +``changed`` with a date. A 5 year timeline also applies to changed keys. + +.. datatemplate:yaml:: ../../module-docs/cc_ansible/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_apk_configure/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_apt_configure/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_apt_pipelining/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_bootcmd/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_byobu/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_ca_certs/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_chef/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_disable_ec2_metadata/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_disk_setup/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_fan/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_final_message/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_growpart/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_grub_dpkg/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_install_hotplug/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_keyboard/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_keys_to_console/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_landscape/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_locale/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_lxd/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_mcollective/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_mounts/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_ntp/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_package_update_upgrade_install/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_phone_home/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_power_state_change/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_puppet/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_resizefs/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_resolv_conf/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_rh_subscription/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_rsyslog/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_runcmd/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_salt_minion/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_scripts_per_boot/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_scripts_per_instance/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_scripts_per_once/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_scripts_user/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_scripts_vendor/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_seed_random/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_set_hostname/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_set_passwords/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_snap/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_spacewalk/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_ssh/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_ssh_authkey_fingerprints/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_ssh_import_id/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_timezone/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_ubuntu_drivers/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_ubuntu_pro/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_update_etc_hosts/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_update_hostname/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_users_groups/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_wireguard/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_write_files/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_yum_add_repo/data.yaml + :template: modules.tmpl +.. datatemplate:yaml:: ../../module-docs/cc_zypper_add_repo/data.yaml + :template: modules.tmpl diff --git a/doc/rtd/reference/network-config-format-v1.rst b/doc/rtd/reference/network-config-format-v1.rst index 78e725fd2..236c813c4 100644 --- a/doc/rtd/reference/network-config-format-v1.rst +++ b/doc/rtd/reference/network-config-format-v1.rst @@ -259,8 +259,7 @@ Users can specify a ``nameserver`` type. Nameserver dictionaries include the following keys: - ``address``: List of IPv4 or IPv6 address of nameservers. -- ``search``: Optional. List of hostnames to include in the :file:`resolv.conf` - search path. +- ``search``: Optional. List of hostnames to include in the search path. - ``interface``: Optional. Ties the nameserver definition to the specified interface. The value specified here must match the ``name`` of an interface defined in this config. If unspecified, this nameserver will be considered @@ -306,10 +305,8 @@ Valid keys for ``subnets`` include the following: - ``broadcast`` : IPv4 broadcast address in dotted format. This is only rendered if :file:`/etc/network/interfaces` is used. - ``gateway``: IPv4 address of the default gateway for this subnet. -- ``dns_nameservers``: Specify a list of IPv4 dns server IPs to end up in - :file:`resolv.conf`. -- ``dns_search``: Specify a list of search paths to be included in - :file:`resolv.conf`. +- ``dns_nameservers``: Specify a list of IPv4 DNS server IPs. +- ``dns_search``: Specify a list of DNS search paths. - ``routes``: Specify a list of routes for a given interface. Subnet types are one of the following: diff --git a/doc/rtd/reference/network-config-format-v2.rst b/doc/rtd/reference/network-config-format-v2.rst index 78c75ba41..90a3ba739 100644 --- a/doc/rtd/reference/network-config-format-v2.rst +++ b/doc/rtd/reference/network-config-format-v2.rst @@ -37,7 +37,7 @@ For example the following could be present in network: version: 2 - ethernets: [] + ethernets: {} It may also be provided in other locations including the :ref:`datasource_nocloud`. See :ref:`network_config` for other places. diff --git a/doc/rtd/reference/network-config.rst b/doc/rtd/reference/network-config.rst index d9e67cf7e..2e95550e6 100644 --- a/doc/rtd/reference/network-config.rst +++ b/doc/rtd/reference/network-config.rst @@ -223,6 +223,11 @@ for BSD flavors at the moment. Network output policy ===================== +.. note:: + + These are **upstream** defaults and are known to be overridden by + downstream distributions. + The default policy for selecting a network ``renderer`` (in order of preference) is as follows: @@ -273,7 +278,7 @@ Example output: .. code-block:: usage: /usr/bin/cloud-init devel net-convert [-h] -p PATH -k {eni,network_data.json,yaml,azure-imds,vmware-imc} -d PATH -D - {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler} + {alpine,arch,azurelinux,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler} [-m name,mac] [--debug] -O {eni,netplan,networkd,sysconfig,network-manager} options: @@ -284,7 +289,7 @@ Example output: The format of the given network config -d PATH, --directory PATH directory to place output in - -D {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openeuler}, --distro {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler} + -D {alpine,arch,azurelinux,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openeuler}, --distro {alpine,arch,azurelinux,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler} -m name,mac, --mac name,mac interface name to mac mapping --debug enable debug logging to stderr. @@ -308,7 +313,6 @@ Example output: BOOTPROTO=static DEVICE=eth7 IPADDR=192.168.1.5/255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -316,7 +320,6 @@ Example output: # BOOTPROTO=dhcp DEVICE=eth9 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no diff --git a/doc/rtd/spelling_word_list.txt b/doc/rtd/spelling_word_list.txt index 83c824b83..239b3b494 100644 --- a/doc/rtd/spelling_word_list.txt +++ b/doc/rtd/spelling_word_list.txt @@ -61,6 +61,7 @@ dotnet downstreams dpkg dropbearssh +ds-identify dsa dsmode dsname @@ -209,6 +210,7 @@ runcmd scaleway seedurl serverurl +setup-keymap shortid sigonly sk diff --git a/doc/rtd/static/css/custom.css b/doc/rtd/static/css/custom.css index 186a8457a..d81eaec4d 100644 --- a/doc/rtd/static/css/custom.css +++ b/doc/rtd/static/css/custom.css @@ -2,7 +2,7 @@ /** Should be 100 for all headers, 400 for normal text **/ h1, h2, h3, h4, h5, h6, .sidebar-tree .current-page>.reference, button, input, optgroup, select, textarea, th.head { - font-weight: 300; + font-weight: 500; } .toc-title { @@ -10,7 +10,7 @@ h1, h2, h3, h4, h5, h6, .sidebar-tree .current-page>.reference, button, input, o } div.page, li.scroll-current>.reference, dl.glossary dt, dl.simple dt, dl:not([class]) dt { - font-weight: 300; + font-weight: 400; line-height: 1.5; font-size: var(--font-size--normal); } @@ -25,13 +25,13 @@ strong.command { /** Side bars (side-bar tree = left, toc-tree = right) **/ div.sidebar-tree { - font-weight: 200; + font-weight: 400; line-height: 1.5; font-size: var(--font-size--normal); } div.toc-tree { - font-weight: 200; + font-weight: 400; font-size: var(--font-size--medium); line-height: 1.5; } @@ -140,12 +140,12 @@ a.headerlink { border: 0; border-bottom: 2px solid var(--color-brand-primary); background-color: var(--color-sidebar-item-background--current); - font-weight:300; + font-weight:400; } .sphinx-tabs-tab{ color: var(--color-brand-primary); - font-weight:300; + font-weight:400; } .sphinx-tabs-panel { diff --git a/doc/rtd/templates/module_property.tmpl b/doc/rtd/templates/module_property.tmpl new file mode 100644 index 000000000..f429203c8 --- /dev/null +++ b/doc/rtd/templates/module_property.tmpl @@ -0,0 +1,12 @@ +{% macro print_prop(name, types, description, prefix) -%} +{% set descr_suffix = description.splitlines()[0]|d('') -%} +{% set descr_lines = description.splitlines()[1:]|d([]) -%} +{{prefix}}* **{{name}}:** ({{types}}){{ descr_suffix }} +{% for line in descr_lines -%} +{{prefix ~ ' '}}{{ line }} +{% endfor -%} +{%- endmacro -%} +{% if prop_cfg.get('items', {}).get('type') == 'object' %} +{% set description = description ~ " Each object in **" ~ name ~ "** list supports the following keys:" %} +{% endif %} +{{ print_prop(name, types, description, prefix ) }} diff --git a/doc/rtd/templates/modules.tmpl b/doc/rtd/templates/modules.tmpl new file mode 100644 index 000000000..6f7750a74 --- /dev/null +++ b/doc/rtd/templates/modules.tmpl @@ -0,0 +1,52 @@ +.. -*- mode: rst -*- +{% for mod_id, mod_cfg in data.items() %} +{% set mod_meta = config.html_context[mod_id]['meta'] -%} + +.. _mod_{{ mod_id }}: + +{{ mod_cfg['name'] }}{% set name_len = mod_cfg['name']|length %} +{{ '~' * name_len }} + +{{ mod_cfg['title'] }} + +.. tab-set:: + + .. tab-item:: Summary + +{% for line in mod_cfg['description'].splitlines() %} + {{ line -}} +{% endfor %} + + **Internal name:** ``{{ mod_id }}`` + + **Module frequency:** {{ mod_meta['frequency'] }} + + **Supported distros:** {{ mod_meta['distros']|join(', ') }} + +{% if mod_meta['activate_by_schema_keys'] %} + **Activate only on keys:** ``{{ mod_meta['activate_by_schema_keys']|join('``, ``') }}`` + +{% endif %} + .. tab-item:: Config schema + +{% for line2 in config.html_context[mod_id]['schema_doc'].splitlines() %} + {{ line2 }} +{% endfor %} + + .. tab-item:: Examples + + {% if mod_cfg['examples'] %} + {% for example in mod_cfg['examples'] %} + {% for line in example["comment"].splitlines() %} + {{ line }} + {% endfor %} + + .. literalinclude:: ../../module-docs/{{ example["file"] }} + :language: yaml + + {% endfor %} + {% else %} + No examples for this module + {% endif %} + +{% endfor %} diff --git a/doc/rtd/templates/property_changed.tmpl b/doc/rtd/templates/property_changed.tmpl new file mode 100644 index 000000000..cc09fcdf2 --- /dev/null +++ b/doc/rtd/templates/property_changed.tmpl @@ -0,0 +1,2 @@ + +{{ '*Changed in version ' ~ changed_version ~ '.' ~ changed_description ~ '*' -}} diff --git a/doc/rtd/templates/property_deprecation.tmpl b/doc/rtd/templates/property_deprecation.tmpl new file mode 100644 index 000000000..f7934ef13 --- /dev/null +++ b/doc/rtd/templates/property_deprecation.tmpl @@ -0,0 +1,2 @@ + +{{ '*Deprecated in version ' ~ deprecated_version ~ '.' ~ deprecated_description ~ '*' -}} diff --git a/doc/rtd/tutorial/index.rst b/doc/rtd/tutorial/index.rst index 392d2465a..58c2cce23 100644 --- a/doc/rtd/tutorial/index.rst +++ b/doc/rtd/tutorial/index.rst @@ -14,7 +14,7 @@ capable of. Core tutorial ============= -This tutorial, which we recommend if you are completely new to ``cloudinit``, +This tutorial, which we recommend if you are completely new to ``cloud-init``, uses the QEMU emulator to introduce you to all of the key concepts, tools, processes and operations that you will need to get started. @@ -35,3 +35,15 @@ user data script. :maxdepth: 1 lxd.rst + +WSL tutorial +============ + +This tutorial is for learning to use ``cloud-init`` within a ``WSL`` +environment. You will use a ``cloud-init`` user data script to customize a +``WSL`` instance. + +.. toctree:: + :maxdepth: 1 + + wsl.rst diff --git a/doc/rtd/tutorial/lxd.rst b/doc/rtd/tutorial/lxd.rst index 1eec2e6b4..8bde79c85 100644 --- a/doc/rtd/tutorial/lxd.rst +++ b/doc/rtd/tutorial/lxd.rst @@ -60,9 +60,9 @@ following file on your local filesystem at :file:`/tmp/my-user-data`: Here, we are defining our ``cloud-init`` user data in the :ref:`#cloud-config` format, using the -:ref:`runcmd module ` to define a command to run. When applied, it -will write ``Hello, World!`` to :file:`/var/tmp/hello-world.txt` (as we shall -see later!). +:ref:`runcmd module ` to define a command to run. When applied, +it will write ``Hello, World!`` to :file:`/var/tmp/hello-world.txt` (as we +shall see later!). Launch a LXD container with our user data ========================================= @@ -127,7 +127,7 @@ Which should print the following: .. code-block:: - Valid cloud-config: system userdata + Valid schema user-data Finally, let us verify that our user data was applied successfully: @@ -163,7 +163,7 @@ We can then remove the container completely using: What's next? ============ -In this tutorial, we used the :ref:`runcmd module ` to execute a +In this tutorial, we used the :ref:`runcmd module ` to execute a shell command. The full list of modules available can be found in our :ref:`modules documentation`. Each module contains examples of how to use it. diff --git a/doc/rtd/tutorial/qemu.rst b/doc/rtd/tutorial/qemu.rst index 27f0b980d..4c1afedd8 100644 --- a/doc/rtd/tutorial/qemu.rst +++ b/doc/rtd/tutorial/qemu.rst @@ -86,7 +86,7 @@ Define our user data Now we need to create our :file:`user-data` file. This user data cloud-config sets the password of the default user, and sets that password to never expire. For more details you can refer to the -:ref:`Set Passwords module page`. +:ref:`Set Passwords module page`. Run the following command, which creates a file named :file:`user-data` containing our configuration data. @@ -127,7 +127,7 @@ machine instance. Multiple different format types are supported by :ref:`documentation describing different formats`. The second line, ``password: password``, as per -:ref:`the Users and Groups module docs`, sets the default +:ref:`the Users and Groups module docs`, sets the default user's password to ``password``. The third and fourth lines direct ``cloud-init`` to not require a password diff --git a/doc/rtd/tutorial/wsl.rst b/doc/rtd/tutorial/wsl.rst new file mode 100644 index 000000000..ed486809e --- /dev/null +++ b/doc/rtd/tutorial/wsl.rst @@ -0,0 +1,334 @@ +.. _tutorial_wsl: + +WSL Tutorial +************ + +In this tutorial, we will customize a Windows Subsystem for Linux (WSL) +instance using cloud-init on Ubuntu. + +How to use this tutorial +======================== + +In this tutorial, the commands in each code block can be copied and pasted +directly into a ``PowerShell`` Window . Omit the prompt before each +command, or use the "copy code" button on the right-hand side of the block, +which will copy the command for you without the prompt. + +Prerequisites +============= + +This tutorial assumes you are running within a ``Windows 11`` or ``Windows +Server 2022`` environment. If ``wsl`` is already installed, you must be +running version 2. You can check your version of ``wsl`` by running the +following command: + +.. code-block:: doscon + + PS> wsl --version + +Example output: + +.. code-block:: text + + WSL version: 2.1.5.0 + Kernel version: 5.15.146.1 + WSLg version: 1.0.60 + MSRDC version: 1.2.5105 + Direct3D version: 1.611.1-81528511 + DXCore version: 10.0.25131.1002-220531-1700.rs-onecore-base2-hyp + Windows version: 10.0.20348.2402 + +If running this tutorial within a virtualized +environment (`including in the cloud`_), ensure that +`nested virtualization`_ is enabled. + +Install WSL +=========== + +.. note:: + If you have already installed WSL, you can skip this section. + +.. code-block:: doscon + + PS> wsl --install + +Example output: + +.. code-block:: text + + Installing: Virtual Machine Platform + Virtual Machine Platform has been installed. + Installing: Windows Subsystem for Linux + Windows Subsystem for Linux has been installed. + Installing: Ubuntu + Ubuntu has been installed. + The requested operation is successful. Changes will not be effective until the system is rebooted. + +Reboot the system when prompted. + +Obtain the Ubuntu WSL image +=========================== + +Ubuntu 24.04 is the first Ubuntu version to support cloud-init in WSL, +so that is the image that we'll use. + +We have two options to obtain the Ubuntu 24.04 WSL image: the Microsoft +Store and the Ubuntu image server. + +Option #1: The Microsoft Store +------------------------------ + +If you have access to the Microsoft Store, you can download the +`Ubuntu 24.04`_ WSL image from within the app. + +Click on the "Get" button to download the image. + +Once the image has downloaded, do **NOT** click open as that +will start the instance before we have defined our cloud-init user data +used to customize the instance. + +Once the image has downloaded, you can verify that it is available by +running the following command: + +.. code-block:: doscon + + PS> wsl --list + +Example output: + +.. code-block:: text + + Windows Subsystem for Linux Distributions: + Ubuntu (Default) + Ubuntu-24.04 + +It should show ``Ubuntu-24.04`` in the list of available WSL instances. + +Option #2: The Ubuntu image server +---------------------------------- + +If the Microsoft Store is not an option, we can instead download the +Ubuntu 24.04 WSL image from the `Ubuntu image server`_. + +Create a directory under the user's home directory to store the +WSL image and install data. + +.. code-block:: doscon + + PS> mkdir ~\wsl-images + +Download the Ubuntu 24.04 WSL image. + +.. code-block:: doscon + + PS> Invoke-WebRequest -Uri https://cloud-images.ubuntu.com/wsl/noble/current/ubuntu-noble-wsl-amd64-wsl.rootfs.tar.gz -OutFile wsl-images\ubuntu-noble-wsl-amd64-wsl.rootfs.tar.gz + +Import the image into WSL storing it in the ``wsl-images`` directory. + +.. code-block:: doscon + + PS> wsl --import Ubuntu-24.04 wsl-images .\wsl-images\ubuntu-noble-wsl-amd64-wsl.rootfs.tar.gz + +Example output: + +.. code-block:: + + Import in progress, this may take a few minutes. + The operation completed successfully. + +Create our user data +==================== + +User data is the primary way for a user to customize a cloud-init instance. +Open Notepad and paste the following: + +.. code-block:: yaml + + #cloud-config + write_files: + - content: | + Hello from cloud-init + path: /var/tmp/hello-world.txt + permissions: '0777' + +Save the file to ``%USERPROFILE%\.cloud-init\Ubuntu-24.04.user-data``. + +For example, if your username is ``me``, the path would be +``C:\Users\me\.cloud-init\Ubuntu-24.04.user-data``. +Ensure that the file is saved with the ``.user-data`` extension and +not as a ``.txt`` file. + +.. note:: + We are creating user data that is tied to the instance we just created, + but by changing the filename, we can create user data that applies to + multiple or all WSL instances. See + :ref:`WSL Datasource reference page` for + more information. + +What is user data? +================== + +Before moving forward, let's inspect our :file:`user-data` file. + +We created the following contents: + +.. code-block:: yaml + + #cloud-config + write_files: + - content: | + Hello from cloud-init + path: /var/tmp/hello-world.txt + permissions: '0770' + +The first line starts with ``#cloud-config``, which tells cloud-init +what type of user data is in the config. Cloud-config is a YAML-based +configuration type that tells cloud-init how to configure the instance +being created. Multiple different format types are supported by +cloud-init. For more information, see the +:ref:`documentation describing different formats`. + +The remaining lines, as per +:ref:`the Write Files module docs`, creates a file +``/var/tmp/hello-world.txt`` with the content ``Hello from cloud-init`` and +permissions allowing anybody on the system to read or write the file. + +Start the Ubuntu WSL instance +============================= + +.. code-block:: doscon + + PS> wsl --distribution Ubuntu-24.04 + +The Ubuntu WSL instance will start, and you may be prompted for a username +and password. + +.. code-block:: text + + Installing, this may take a few minutes... + Please create a default UNIX user account. The username does not need to match your Windows username. + For more information visit: https://aka.ms/wslusers + Enter new UNIX username: + New password: + Retype new password: + +Once the credentials have been entered, you should see a welcome +screen similar to the following: + +.. code-block:: text + + Welcome to Ubuntu Noble Numbat (GNU/Linux 5.15.146.1-microsoft-standard-WSL2 x86_64) + + * Documentation: https://help.ubuntu.com + * Management: https://landscape.canonical.com + * Support: https://ubuntu.com/pro + + System information as of Mon Apr 22 21:06:49 UTC 2024 + + System load: 0.08 Processes: 51 + Usage of /: 0.1% of 1006.85GB Users logged in: 0 + Memory usage: 4% IPv4 address for eth0: 172.29.240.255 + Swap usage: 0% + + + This message is shown once a day. To disable it please create the + /root/.hushlogin file. + root@machine:/mnt/c/Users/me# + +You should now be in a shell inside the WSL instance. + +Verify that ``cloud-init`` ran successfully +------------------------------------------- + +Before validating the user data, let's wait for ``cloud-init`` to complete +successfully: + +.. code-block:: shell-session + + $ cloud-init status --wait + +Which provides the following output: + +.. code-block:: text + + status: done + +Now we can now see that cloud-init has detected that we running in WSL: + +.. code-block:: shell-session + + $ cloud-id + +Which provides the following output: + +.. code-block:: text + + wsl + +Verify our user data +-------------------- + +Now we know that ``cloud-init`` has been successfully run, we can verify that +it received the expected user data we provided earlier: + +.. code-block:: shell-session + + $ cloud-init query userdata + +Which should print the following to the terminal window: + +.. code-block:: + + #cloud-config + write_files: + - content: | + Hello from cloud-init + path: /var/tmp/hello-world.txt + permissions: '0770' + +We can also assert the user data we provided is a valid cloud-config: + +.. code-block:: shell-session + + $ cloud-init schema --system --annotate + +Which should print the following: + +.. code-block:: + + Valid schema user-data + +Finally, let us verify that our user data was applied successfully: + +.. code-block:: shell-session + + $ cat /var/tmp/hello-world.txt + +Which should then print: + +.. code-block:: + + Hello from cloud-init + +We can see that ``cloud-init`` has received and consumed our user data +successfully! + +What's next? +============ + +In this tutorial, we used the :ref:`Write Files module ` to +write a file to our WSL instance. The full list of modules available can be +found in our :ref:`modules documentation`. +Each module contains examples of how to use it. + +You can also head over to the :ref:`examples page` for +examples of more common use cases. + +Cloud-init's WSL reference documentation can be found on the +:ref:`WSL Datasource reference page`. + + +.. _including in the cloud: https://techcommunity.microsoft.com/t5/itops-talk-blog/how-to-setup-nested-virtualization-for-azure-vm-vhd/ba-p/1115338 +.. _nested virtualization: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/nested-virtualization +.. _Ubuntu 24.04: https://apps.microsoft.com/detail/9nz3klhxdjp5 +.. _Ubuntu image server: https://cloud-images.ubuntu.com/wsl/ diff --git a/integration-requirements.txt b/integration-requirements.txt index dc17759a3..a34b63e70 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,7 +1,8 @@ +-r requirements.txt # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib>=5.10.0,<1!6 +pycloudlib>=1!6.7.0,<1!8 # Avoid breaking change in `testpaths` treatment forced # test/unittests/conftest.py to be loaded by our integration-tests tox env diff --git a/packages/debian/cloud-init.logrotate b/packages/debian/cloud-init.logrotate index 24e131053..8f224e694 100644 --- a/packages/debian/cloud-init.logrotate +++ b/packages/debian/cloud-init.logrotate @@ -6,6 +6,6 @@ notifempty rotate 6 compress - delayacompress + delaycompress size 1M } diff --git a/packages/debian/control.in b/packages/debian/control.in index 30cf406b7..fb1cffc71 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -12,7 +12,6 @@ Architecture: all Depends: ${misc:Depends}, ${python3:Depends}, iproute2, - isc-dhcp-client, python3-debconf Recommends: eatmydata, sudo, software-properties-common, gdisk Suggests: ssh-import-id, openssh-server diff --git a/pyproject.toml b/pyproject.toml index 99854f397..7408488f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,39 +15,240 @@ skip = ["cloudinit/cmd/main.py", ".tox", "packages", "tools"] [tool.mypy] follow_imports = "silent" -warn_unused_ignores = "true" -warn_redundant_casts = "true" +check_untyped_defs = true +warn_redundant_casts = true +warn_unused_ignores = true +warn_unreachable = true exclude = [] [[tool.mypy.overrides]] module = [ "apport.*", "BaseHTTPServer", - "cloudinit.feature_overrides", "configobj", "debconf", "httplib", "jsonpatch", - "netifaces", "paramiko.*", "pip.*", "pycloudlib.*", - "responses", "serial", "tests.integration_tests.user_settings", "uaclient.*", ] ignore_missing_imports = true +no_implicit_optional = true + +[[tool.mypy.overrides]] +module = [ + "cloudinit.analyze", + "cloudinit.analyze.dump", + "cloudinit.analyze.show", + "cloudinit.cmd.devel.hotplug_hook", + "cloudinit.cmd.devel.make_mime", + "cloudinit.cmd.devel.net_convert", + "cloudinit.cmd.main", + "cloudinit.config.cc_apt_configure", + "cloudinit.config.cc_ca_certs", + "cloudinit.config.cc_growpart", + "cloudinit.config.cc_ntp", + "cloudinit.config.cc_power_state_change", + "cloudinit.config.cc_rsyslog", + "cloudinit.config.cc_ubuntu_pro", + "cloudinit.config.modules", + "cloudinit.config.schema", + "cloudinit.distros", + "cloudinit.distros.alpine", + "cloudinit.distros.azurelinux", + "cloudinit.distros.bsd", + "cloudinit.distros.opensuse", + "cloudinit.distros.parsers.hostname", + "cloudinit.distros.parsers.hosts", + "cloudinit.distros.parsers.resolv_conf", + "cloudinit.distros.ubuntu", + "cloudinit.distros.ug_util", + "cloudinit.helpers", + "cloudinit.log", + "cloudinit.mergers", + "cloudinit.net", + "cloudinit.net.cmdline", + "cloudinit.net.dhcp", + "cloudinit.net.eni", + "cloudinit.net.ephemeral", + "cloudinit.net.freebsd", + "cloudinit.net.netbsd", + "cloudinit.net.netplan", + "cloudinit.net.network_manager", + "cloudinit.net.network_state", + "cloudinit.net.networkd", + "cloudinit.net.sysconfig", + "cloudinit.netinfo", + "cloudinit.reporting.handlers", + "cloudinit.sources.DataSourceAzure", + "cloudinit.sources.DataSourceBigstep", + "cloudinit.sources.DataSourceCloudSigma", + "cloudinit.sources.DataSourceCloudStack", + "cloudinit.sources.DataSourceConfigDrive", + "cloudinit.sources.DataSourceDigitalOcean", + "cloudinit.sources.DataSourceEc2", + "cloudinit.sources.DataSourceExoscale", + "cloudinit.sources.DataSourceGCE", + "cloudinit.sources.DataSourceHetzner", + "cloudinit.sources.DataSourceIBMCloud", + "cloudinit.sources.DataSourceMAAS", + "cloudinit.sources.DataSourceNoCloud", + "cloudinit.sources.DataSourceOVF", + "cloudinit.sources.DataSourceOpenNebula", + "cloudinit.sources.DataSourceOpenStack", + "cloudinit.sources.DataSourceOracle", + "cloudinit.sources.DataSourceRbxCloud", + "cloudinit.sources.DataSourceScaleway", + "cloudinit.sources.DataSourceSmartOS", + "cloudinit.sources.DataSourceVMware", + "cloudinit.sources", + "cloudinit.sources.helpers.azure", + "cloudinit.sources.helpers.ec2", + "cloudinit.sources.helpers.netlink", + "cloudinit.sources.helpers.openstack", + "cloudinit.sources.helpers.vmware.imc.config_file", + "cloudinit.sources.helpers.vmware.imc.config_nic", + "cloudinit.sources.helpers.vultr", + "cloudinit.ssh_util", + "cloudinit.stages", + "cloudinit.temp_utils", + "cloudinit.templater", + "cloudinit.user_data", + "cloudinit.util", + "tests.integration_tests.instances", + "tests.unittests.analyze.test_show", + "tests.unittests.cmd.devel.test_hotplug_hook", + "tests.unittests.cmd.devel.test_render", + "tests.unittests.cmd.test_clean", + "tests.unittests.cmd.test_query", + "tests.unittests.cmd.test_status", + "tests.unittests.config.test_apt_configure_sources_list_v1", + "tests.unittests.config.test_apt_configure_sources_list_v3", + "tests.unittests.config.test_apt_source_v1", + "tests.unittests.config.test_apt_source_v3", + "tests.unittests.config.test_cc_ansible", + "tests.unittests.config.test_cc_apk_configure", + "tests.unittests.config.test_cc_apt_pipelining", + "tests.unittests.config.test_cc_bootcmd", + "tests.unittests.config.test_cc_ca_certs", + "tests.unittests.config.test_cc_chef", + "tests.unittests.config.test_cc_disable_ec2_metadata", + "tests.unittests.config.test_cc_final_message", + "tests.unittests.config.test_cc_growpart", + "tests.unittests.config.test_cc_grub_dpkg", + "tests.unittests.config.test_cc_install_hotplug", + "tests.unittests.config.test_cc_keys_to_console", + "tests.unittests.config.test_cc_landscape", + "tests.unittests.config.test_cc_locale", + "tests.unittests.config.test_cc_mcollective", + "tests.unittests.config.test_cc_mounts", + "tests.unittests.config.test_cc_ntp", + "tests.unittests.config.test_cc_phone_home", + "tests.unittests.config.test_cc_power_state_change", + "tests.unittests.config.test_cc_puppet", + "tests.unittests.config.test_cc_resizefs", + "tests.unittests.config.test_cc_resolv_conf", + "tests.unittests.config.test_cc_rh_subscription", + "tests.unittests.config.test_cc_rsyslog", + "tests.unittests.config.test_cc_runcmd", + "tests.unittests.config.test_cc_set_hostname", + "tests.unittests.config.test_cc_snap", + "tests.unittests.config.test_cc_ssh", + "tests.unittests.config.test_cc_timezone", + "tests.unittests.config.test_cc_ubuntu_autoinstall", + "tests.unittests.config.test_cc_ubuntu_drivers", + "tests.unittests.config.test_cc_ubuntu_pro", + "tests.unittests.config.test_cc_update_etc_hosts", + "tests.unittests.config.test_cc_users_groups", + "tests.unittests.config.test_cc_wireguard", + "tests.unittests.config.test_cc_yum_add_repo", + "tests.unittests.config.test_cc_zypper_add_repo", + "tests.unittests.config.test_modules", + "tests.unittests.config.test_schema", + "tests.unittests.conftest", + "tests.unittests.distros.test_alpine", + "tests.unittests.distros.test_hosts", + "tests.unittests.distros.test_ifconfig", + "tests.unittests.distros.test_netbsd", + "tests.unittests.distros.test_netconfig", + "tests.unittests.distros.test_opensuse", + "tests.unittests.distros.test_user_data_normalize", + "tests.unittests.helpers", + "tests.unittests.net.test_dhcp", + "tests.unittests.net.test_init", + "tests.unittests.net.test_network_state", + "tests.unittests.net.test_networkd", + "tests.unittests.runs.test_merge_run", + "tests.unittests.runs.test_simple_run", + "tests.unittests.sources.azure.test_errors", + "tests.unittests.sources.azure.test_imds", + "tests.unittests.sources.helpers.test_openstack", + "tests.unittests.sources.test_aliyun", + "tests.unittests.sources.test_altcloud", + "tests.unittests.sources.test_azure", + "tests.unittests.sources.test_azure_helper", + "tests.unittests.sources.test_cloudsigma", + "tests.unittests.sources.test_common", + "tests.unittests.sources.test_configdrive", + "tests.unittests.sources.test_digitalocean", + "tests.unittests.sources.test_ec2", + "tests.unittests.sources.test_exoscale", + "tests.unittests.sources.test_gce", + "tests.unittests.sources.test_init", + "tests.unittests.sources.test_lxd", + "tests.unittests.sources.test_maas", + "tests.unittests.sources.test_nocloud", + "tests.unittests.sources.test_opennebula", + "tests.unittests.sources.test_openstack", + "tests.unittests.sources.test_oracle", + "tests.unittests.sources.test_ovf", + "tests.unittests.sources.test_rbx", + "tests.unittests.sources.test_scaleway", + "tests.unittests.sources.test_smartos", + "tests.unittests.sources.test_upcloud", + "tests.unittests.sources.test_vultr", + "tests.unittests.sources.test_wsl", + "tests.unittests.sources.vmware.test_vmware_config_file", + "tests.unittests.test__init__", + "tests.unittests.test_apport", + "tests.unittests.test_builtin_handlers", + "tests.unittests.test_cli", + "tests.unittests.test_conftest", + "tests.unittests.test_data", + "tests.unittests.test_ds_identify", + "tests.unittests.test_helpers", + "tests.unittests.test_log", + "tests.unittests.test_merging", + "tests.unittests.test_net", + "tests.unittests.test_net_activators", + "tests.unittests.test_ssh_util", + "tests.unittests.test_stages", + "tests.unittests.test_subp", + "tests.unittests.test_templating", + "tests.unittests.test_upgrade", + "tests.unittests.test_url_helper", + "tests.unittests.test_util", + "tests.unittests.util", + + # tools/* + "netplan_schema_check", + "mock-meta", +] +check_untyped_defs = false [tool.ruff] target-version = "py37" line-length = 79 # E, W, and F make up the entirety of default flake8 -select = [ +lint.select = [ + "D", # pydocstyle "E", # pycodestyle errors "W", # pycodestyle warnings "F", # pyflakes - "CPY", # flake8-copyright "T10", # flake8-debugger "ISC", # flake8-implicit-str-concat "ICN", # flake8-import-conventions @@ -55,8 +256,24 @@ select = [ "PIE", # flake8-pie "Q", # flake8-quotes ] -ignore = [ +lint.ignore = [ + "D100", # docstring: public module + "D101", # docstring: public class required + "D102", # docstring: public method required + "D103", # docstring: public function required + "D107", # docstring: __init__ required + "D104", # docstring: public package required + "D105", # docstring: magic method required + "D200", # docstring: one line docstring shouldn't wrap + "D202", # docstring: blank line + "D205", # docstring: 1 blank line between initial and summary + "D209", # docstring: closing quotes -> separate line + "D400", # docstring: end with a period + "D401", # docstring: imperative mood + "D402", # docstring: docstring shouldn't start with func signature + "D403", # docstring: capitalized first line "E731", # Do not assign a `lambda` expression, use a `def` ] -[tool.ruff.per-file-ignores] -"cloudinit/cmd/main.py" = ["E402"] + +[tool.ruff.lint.pydocstyle] +convention = "pep257" diff --git a/requirements.txt b/requirements.txt index edec46a7f..eabd7a22c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,12 +29,3 @@ jsonpatch # For validating cloud-config sections per schema definitions jsonschema - -# Used by DataSourceVMware to inspect the host's network configuration during -# the "setup()" function. -# -# This allows a host that uses DHCP to bring up the network during BootLocal -# and still participate in instance-data by gathering the network in detail at -# runtime and merge that information into the metadata and repersist that to -# disk. -netifaces>=0.10.4 diff --git a/setup.py b/setup.py index 084249548..3e33d0062 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,6 @@ # isort: off from setup_utils import ( # noqa: E402 get_version, - in_virtualenv, is_f, is_generator, pkg_config_read, @@ -82,7 +81,7 @@ def render_tmpl(template, mode=None, is_yaml=False): cmd_variant = ["--variant", VARIANT] if PREFIX: cmd_prefix = ["--prefix", PREFIX] - subprocess.run( + subprocess.run( # nosec B603 [ sys.executable, "./tools/render-template", @@ -266,13 +265,12 @@ def finalize_options(self): self.distribution.reinitialize_command("install_data", True) -if not in_virtualenv(): - USR = "/" + USR - ETC = "/" + ETC - USR_LIB_EXEC = "/" + USR_LIB_EXEC - LIB = "/" + LIB - for k in INITSYS_ROOTS.keys(): - INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] +USR = "/" + USR +ETC = "/" + ETC +USR_LIB_EXEC = "/" + USR_LIB_EXEC +LIB = "/" + LIB +for k in INITSYS_ROOTS.keys(): + INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] data_files = [ (ETC + "/cloud", [render_tmpl("config/cloud.cfg.tmpl", is_yaml=True)]), @@ -301,11 +299,14 @@ def finalize_options(self): USR + "/share/doc/cloud-init/examples/seed", [f for f in glob("doc/examples/seed/*") if is_f(f)], ), + ( + USR + "/share/doc/cloud-init/module-docs", + [f for f in glob("doc/module-docs/*", recursive=True) if is_f(f)], + ), ] if not platform.system().endswith("BSD"): RULES_PATH = pkg_config_read("udev", "udevdir") - if not in_virtualenv(): - RULES_PATH = "/" + RULES_PATH + RULES_PATH = "/" + RULES_PATH data_files.extend( [ diff --git a/setup_utils.py b/setup_utils.py index f33250a6e..0ff758107 100644 --- a/setup_utils.py +++ b/setup_utils.py @@ -25,7 +25,7 @@ def pkg_config_read(library: str, var: str) -> str: } cmd = ["pkg-config", f"--variable={var}", library] try: - path = subprocess.check_output(cmd).decode("utf-8") + path = subprocess.check_output(cmd).decode("utf-8") # nosec B603 path = path.strip() except Exception: path = fallbacks[library][var] @@ -35,15 +35,6 @@ def pkg_config_read(library: str, var: str) -> str: return path -def in_virtualenv() -> bool: - # TODO: sys.real_prefix doesn't exist on any currently supported - # version of python. This function can never return True - try: - return sys.real_prefix != sys.prefix - except AttributeError: - return False - - def version_to_pep440(version: str) -> str: # read-version can spit out something like 22.4-15-g7f97aee24 # which is invalid under PEP 440. If we replace the first - with a + @@ -53,12 +44,12 @@ def version_to_pep440(version: str) -> str: def get_version() -> str: cmd = [sys.executable, "tools/read-version"] - ver = subprocess.check_output(cmd) + ver = subprocess.check_output(cmd) # B603 version = ver.decode("utf-8").strip() return version_to_pep440(version) def read_requires() -> List[str]: cmd = [sys.executable, "tools/read-dependencies"] - deps = subprocess.check_output(cmd) + deps = subprocess.check_output(cmd) # nosec B603 return deps.decode("utf-8").splitlines() diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl index 709864341..62fdefa27 100644 --- a/systemd/cloud-config.service.tmpl +++ b/systemd/cloud-config.service.tmpl @@ -1,12 +1,11 @@ ## template:jinja [Unit] -Description=Apply the settings specified in cloud-config -After=network-online.target cloud-config.target -After=snapd.seeded.service +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Config Stage +After=network-online.target cloud-config.target snapd.seeded.service Wants=network-online.target cloud-config.target ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled -ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot diff --git a/systemd/cloud-final.service.tmpl b/systemd/cloud-final.service.tmpl index ab3daed01..69fd2b1de 100644 --- a/systemd/cloud-final.service.tmpl +++ b/systemd/cloud-final.service.tmpl @@ -1,6 +1,7 @@ ## template:jinja [Unit] -Description=Execute cloud user/final scripts +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Final Stage After=network-online.target time-sync.target cloud-config.service rc-local.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=multi-user.target @@ -9,7 +10,6 @@ Before=apt-daily.service Wants=network-online.target cloud-config.service ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled -ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] @@ -18,7 +18,7 @@ ExecStart=/usr/bin/cloud-init modules --mode=final RemainAfterExit=yes TimeoutSec=0 KillMode=process -{% if variant == "rhel" %} +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} # Restart NetworkManager if it is present and running. ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ out=$(systemctl show --property=SubState $u) || exit; \ diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 0418cb033..791c74109 100644 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -70,7 +70,7 @@ main() { if [ "$ds" = "1" ]; then debug 1 "cloud-init is enabled but no datasource found, disabling" else - debug 1 "cloud-init is disabled by kernel commandline or etc_file" + debug 1 "cloud-init is disabled by kernel command line or etc_file" fi if [ -f "$link_path" ]; then if rm -f "$link_path"; then diff --git a/systemd/cloud-init-hotplugd.service b/systemd/cloud-init-hotplugd.service index 0aeeeaff5..174ad657e 100644 --- a/systemd/cloud-init-hotplugd.service +++ b/systemd/cloud-init-hotplugd.service @@ -10,14 +10,15 @@ # cloud-init-hotplud.service will read args from file descriptor 3 [Unit] -Description=cloud-init hotplug hook daemon +Description=Cloud-init: Hotplug Hook After=cloud-init-hotplugd.socket +After=cloud-init.target Requires=cloud-init-hotplugd.socket ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled [Service] -Type=simple +Type=oneshot ExecStart=/bin/bash -c 'read args <&3; echo "args=$args"; \ exec /usr/bin/cloud-init devel hotplug-hook $args; \ exit 0' diff --git a/systemd/cloud-init-hotplugd.socket b/systemd/cloud-init-hotplugd.socket index acf53f12c..5c3966b4a 100644 --- a/systemd/cloud-init-hotplugd.socket +++ b/systemd/cloud-init-hotplugd.socket @@ -5,9 +5,12 @@ # Known bug with an enforcing SELinux policy: LP: #1936229 [Unit] Description=cloud-init hotplug hook socket +After=cloud-config.target +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled [Socket] ListenFIFO=/run/cloud-init/hook-hotplug-cmd [Install] -WantedBy=cloud-init.target +WantedBy=cloud-config.target diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl index 3a1ca7fa2..3000513c0 100644 --- a/systemd/cloud-init-local.service.tmpl +++ b/systemd/cloud-init-local.service.tmpl @@ -1,23 +1,24 @@ ## template:jinja [Unit] -Description=Initial cloud-init job (pre-networking) -{% if variant in ["ubuntu", "unknown", "debian", "rhel" ] %} +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Local Stage (pre-network) +{% if variant in ["almalinux", "cloudlinux", "ubuntu", "unknown", "debian", "rhel"] %} DefaultDependencies=no {% endif %} Wants=network-pre.target After=hv_kvp_daemon.service After=systemd-remount-fs.service -{% if variant == "rhel" %} +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} Requires=dbus.socket After=dbus.socket {% endif %} Before=NetworkManager.service -{% if variant == "rhel" %} +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} Before=network.service {% endif %} Before=network-pre.target Before=shutdown.target -{% if variant == "rhel" %} +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} Before=firewalld.target Conflicts=shutdown.target {% endif %} @@ -28,11 +29,10 @@ Conflicts=shutdown.target RequiresMountsFor=/var/lib/cloud ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled -ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot -{% if variant == "rhel" %} +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} ExecStartPre=/bin/mkdir -p /run/cloud-init ExecStartPre=/sbin/restorecon /run/cloud-init ExecStartPre=/usr/bin/touch /run/cloud-init/enabled diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index 4223348ab..1efaebd0a 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -1,7 +1,8 @@ ## template:jinja [Unit] -Description=Initial cloud-init job (metadata service crawler) -{% if variant not in ["photon", "rhel"] %} +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +Description=Cloud-init: Network Stage +{% if variant not in ["almalinux", "cloudlinux", "photon", "rhel"] %} DefaultDependencies=no {% endif %} Wants=cloud-init-local.service @@ -25,6 +26,7 @@ After=wicked.service After=dbus.service {% endif %} Before=network-online.target +Before=systemd-user-sessions.service {% if variant in ["ubuntu", "unknown", "debian"] %} Before=sysinit.target Before=shutdown.target @@ -34,10 +36,8 @@ Conflicts=shutdown.target Before=shutdown.target Conflicts=shutdown.target {% endif %} -Before=systemd-user-sessions.service ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled -ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot diff --git a/systemd/cloud-init.target b/systemd/cloud-init.target index 30450f7ff..c8e7b2153 100644 --- a/systemd/cloud-init.target +++ b/systemd/cloud-init.target @@ -12,4 +12,3 @@ Description=Cloud-init target After=multi-user.target ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled -ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled diff --git a/sysvinit/freebsd/cloudinitlocal.tmpl b/sysvinit/freebsd/cloudinitlocal.tmpl index acf8c20a8..c6a65194e 100755 --- a/sysvinit/freebsd/cloudinitlocal.tmpl +++ b/sysvinit/freebsd/cloudinitlocal.tmpl @@ -6,7 +6,7 @@ ``cloudinitlocal`` purposefully does not depend on ``dsidentify``. That makes it easy for image builders to disable ``dsidentify``. #} -# REQUIRE: ldconfig mountcritlocal +# REQUIRE: ldconfig cleanvar # BEFORE: NETWORKING cloudinit cloudconfig cloudfinal . /etc/rc.subr diff --git a/sysvinit/freebsd/dsidentify.tmpl b/sysvinit/freebsd/dsidentify.tmpl index d18e0042d..96bc88aae 100755 --- a/sysvinit/freebsd/dsidentify.tmpl +++ b/sysvinit/freebsd/dsidentify.tmpl @@ -2,13 +2,7 @@ #!/bin/sh # PROVIDE: dsidentify -{# -once we are correctly using ``paths.run_dir`` / ``paths.get_runpath()`` in the -python code-base, we can start thinking about how to bring that into -``ds-identify`` itself, and then!, then we can depend on (``REQUIRE``) -``var_run`` instead of ``mountcritlocal`` here. -#} -# REQUIRE: mountcritlocal +# REQUIRE: cleanvar # BEFORE: cloudinitlocal . /etc/rc.subr diff --git a/templates/chrony.conf.almalinux.tmpl b/templates/chrony.conf.almalinux.tmpl new file mode 100644 index 000000000..43b1f5d7c --- /dev/null +++ b/templates/chrony.conf.almalinux.tmpl @@ -0,0 +1,51 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} +{% for peer in peers -%} +peer {{peer}} +{% endfor %} +{% for a in allow -%} +allow {{a}} +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Enable hardware timestamping on all interfaces that support it. +#hwtimestamp * + +# Increase the minimum number of selectable sources required to adjust +# the system clock. +#minsources 2 + +# Allow NTP client access from local network. +#allow 192.168.0.0/16 + +# Serve time even if not synchronized to a time source. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.cloudlinux.tmpl b/templates/chrony.conf.cloudlinux.tmpl new file mode 100644 index 000000000..43b1f5d7c --- /dev/null +++ b/templates/chrony.conf.cloudlinux.tmpl @@ -0,0 +1,51 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} +{% for peer in peers -%} +peer {{peer}} +{% endfor %} +{% for a in allow -%} +allow {{a}} +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Enable hardware timestamping on all interfaces that support it. +#hwtimestamp * + +# Increase the minimum number of selectable sources required to adjust +# the system clock. +#minsources 2 + +# Allow NTP client access from local network. +#allow 192.168.0.0/16 + +# Serve time even if not synchronized to a time source. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/hosts.azurelinux.tmpl b/templates/hosts.azurelinux.tmpl new file mode 100644 index 000000000..8e3c23f6f --- /dev/null +++ b/templates/hosts.azurelinux.tmpl @@ -0,0 +1,22 @@ +## template:jinja +{# +This file /etc/cloud/templates/hosts.azurelinux.tmpl is only utilized +if enabled in cloud-config. Specifically, in order to enable it +you need to add the following to config: + manage_etc_hosts: True +-#} +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.azurelinux.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +# The following lines are desirable for IPv4 capable hosts +127.0.0.1 {{fqdn}} {{hostname}} +127.0.0.1 localhost.localdomain localhost +127.0.0.1 localhost4.localdomain4 localhost4 + +# The following lines are desirable for IPv6 capable hosts +::1 {{fqdn}} {{hostname}} +::1 localhost6.localdomain6 localhost6 diff --git a/templates/ntp.conf.almalinux.tmpl b/templates/ntp.conf.almalinux.tmpl new file mode 100644 index 000000000..6d166aa38 --- /dev/null +++ b/templates/ntp.conf.almalinux.tmpl @@ -0,0 +1,64 @@ +## template:jinja + +# For more information about this file, see the man pages +# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). + +driftfile /var/lib/ntp/drift + +# Permit time synchronization with our time source, but do not +# permit the source to query or modify the service on this system. +restrict default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery + +# Permit all access over the loopback interface. This could +# be tightened as well, but to do so would effect some of +# the administrative functions. +restrict 127.0.0.1 +restrict -6 ::1 + +# Hosts on local network are less restricted. +#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap + +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} +{% for peer in peers -%} +peer {{peer}} +{% endfor %} + +#broadcast 192.168.1.255 autokey # broadcast server +#broadcastclient # broadcast client +#broadcast 224.0.1.1 autokey # multicast server +#multicastclient 224.0.1.1 # multicast client +#manycastserver 239.255.254.254 # manycast server +#manycastclient 239.255.254.254 autokey # manycast client + +# Enable public key cryptography. +#crypto + +includefile /etc/ntp/crypto/pw + +# Key file containing the keys and key identifiers used when operating +# with symmetric key cryptography. +keys /etc/ntp/keys + +# Specify the key identifiers which are trusted. +#trustedkey 4 8 42 + +# Specify the key identifier to use with the ntpdc utility. +#requestkey 8 + +# Specify the key identifier to use with the ntpq utility. +#controlkey 8 + +# Enable writing of statistics records. +#statistics clockstats cryptostats loopstats peerstats diff --git a/templates/ntp.conf.cloudlinux.tmpl b/templates/ntp.conf.cloudlinux.tmpl new file mode 100644 index 000000000..6d166aa38 --- /dev/null +++ b/templates/ntp.conf.cloudlinux.tmpl @@ -0,0 +1,64 @@ +## template:jinja + +# For more information about this file, see the man pages +# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). + +driftfile /var/lib/ntp/drift + +# Permit time synchronization with our time source, but do not +# permit the source to query or modify the service on this system. +restrict default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery + +# Permit all access over the loopback interface. This could +# be tightened as well, but to do so would effect some of +# the administrative functions. +restrict 127.0.0.1 +restrict -6 ::1 + +# Hosts on local network are less restricted. +#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap + +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} +{% for peer in peers -%} +peer {{peer}} +{% endfor %} + +#broadcast 192.168.1.255 autokey # broadcast server +#broadcastclient # broadcast client +#broadcast 224.0.1.1 autokey # multicast server +#multicastclient 224.0.1.1 # multicast client +#manycastserver 239.255.254.254 # manycast server +#manycastclient 239.255.254.254 autokey # manycast client + +# Enable public key cryptography. +#crypto + +includefile /etc/ntp/crypto/pw + +# Key file containing the keys and key identifiers used when operating +# with symmetric key cryptography. +keys /etc/ntp/keys + +# Specify the key identifiers which are trusted. +#trustedkey 4 8 42 + +# Specify the key identifier to use with the ntpdc utility. +#requestkey 8 + +# Specify the key identifier to use with the ntpq utility. +#controlkey 8 + +# Enable writing of statistics records. +#statistics clockstats cryptostats loopstats peerstats diff --git a/templates/sources.list.ubuntu.deb822.tmpl b/templates/sources.list.ubuntu.deb822.tmpl index 2f2cc1449..8202dcbe5 100644 --- a/templates/sources.list.ubuntu.deb822.tmpl +++ b/templates/sources.list.ubuntu.deb822.tmpl @@ -31,10 +31,9 @@ ## or updates from the Ubuntu security team. ## Components: Aside from main, the following components can be added to the list ## restricted - Software that may not be under a free license, or protected by patents. -## universe - Community maintained packages. -## Software from this repository is only maintained and supported by Canonical -## for machines with Ubuntu Pro subscriptions. Without Ubuntu Pro, the Ubuntu -## community provides best-effort security maintenance. +## universe - Community maintained packages. Software in this repository receives maintenance +## from volunteers in the Ubuntu community, or a 10 year security maintenance +## commitment from Canonical when an Ubuntu Pro subscription is attached. ## multiverse - Community maintained of restricted. Software from this repository is ## ENTIRELY UNSUPPORTED by the Ubuntu team, and may not be under a free ## licence. Please satisfy yourself as to your rights to use the software. diff --git a/test-requirements.txt b/test-requirements.txt index 19488b943..c6c32cae6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,5 +1,7 @@ # Needed generally in tests +-r requirements.txt + # Avoid breaking change in `testpaths` treatment forced # test/unittests/conftest.py to be loaded by our integration-tests tox env # resulting in an unmet dependency issue: diff --git a/tests/data/netinfo/freebsd-ifconfig-cidr-output b/tests/data/netinfo/freebsd-ifconfig-cidr-output new file mode 100644 index 000000000..755a5d5bf --- /dev/null +++ b/tests/data/netinfo/freebsd-ifconfig-cidr-output @@ -0,0 +1,15 @@ +vtnet0: flags=1008843 metric 0 mtu 1500 + options=c00b8 + ether 96:00:02:b1:49:68 + inet 198.51.100.13/32 broadcast 198.51.100.13 + inet6 fe80::9400:2ff:feb1:4968%vtnet0/64 scopeid 0x1 + media: Ethernet autoselect (10Gbase-T ) + status: active + nd6 options=21 +lo0: flags=1008049 metric 0 mtu 16384 + options=680003 + inet 127.0.0.1/8 + inet6 ::1/128 + inet6 fe80::1%lo0/64 scopeid 0x2 + groups: lo + nd6 options=21 \ No newline at end of file diff --git a/tests/data/user_data.1.txt b/tests/data/user_data.1.txt index 4c4543de8..a1b5aa60e 100644 --- a/tests/data/user_data.1.txt +++ b/tests/data/user_data.1.txt @@ -3,13 +3,3 @@ write_files: - content: blah path: /etc/blah.ini permissions: 493 - -system_info: - package_mirrors: - - arches: [i386, amd64, blah] - failsafe: - primary: http://my.archive.mydomain.com/ubuntu - security: http://my.security.mydomain.com/ubuntu - search: - primary: [] - security: [] diff --git a/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg b/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg index 70a9d3139..53776c9ba 100644 --- a/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg +++ b/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg @@ -33,5 +33,5 @@ SUFFIX|1 = eng.vmware.com TIMEZONE = Africa/Abidjan UTC = yes -[CLOUDINIT] -INSTANCE-ID = guest-os-customization-uuid +[MISC] +INSTANCE-ID = 50078779-0bc7-1a57-7c52-1179f03f5650 diff --git a/tests/integration_tests/assets/enable_profile.py b/tests/integration_tests/assets/enable_profile.py new file mode 100644 index 000000000..a6a0070c3 --- /dev/null +++ b/tests/integration_tests/assets/enable_profile.py @@ -0,0 +1,25 @@ +from pathlib import Path + +services = [ + "cloud-init-local.service", + "cloud-init.service", + "cloud-config.service", + "cloud-final.service", +] +service_dir = Path("/lib/systemd/system/") + +# Check for the existence of the service files +for service in services: + if not (service_dir / service).is_file(): + print(f"Error: {service} does not exist in {service_dir}") + exit(1) + +# Prepend the ExecStart= line with 'python3 -m coverage run' +for service in services: + file_path = service_dir / service + content = file_path.read_text() + content = content.replace( + "ExecStart=/usr", + (f"ExecStart=python3 -m cProfile -o /var/log/{service}.stats /usr"), + ) + file_path.write_text(content) diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py index d726e518b..99ac92b28 100644 --- a/tests/integration_tests/bugs/test_lp1813396.py +++ b/tests/integration_tests/bugs/test_lp1813396.py @@ -33,9 +33,11 @@ def test_gpg_no_tty(client: IntegrationInstance): ] verify_ordered_items_in_text(to_verify, log) verify_clean_log(log) - processes_in_cgroup = int( - client.execute( - "systemd-cgls -u cloud-config.service 2>/dev/null | wc -l" - ).stdout + control_groups = client.execute( + "systemd-cgls -u cloud-config.service 2>/dev/null | wc -l" + ).stdout + processes_in_cgroup = len(control_groups.split("\n")) + assert processes_in_cgroup < 2, ( + "Cloud-init didn't clean up after itself, " + f"cloud-config has remaining daemons:\n{control_groups}" ) - assert processes_in_cgroup < 2 diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py index 25d271b75..b2d1b1bd8 100644 --- a/tests/integration_tests/bugs/test_lp1835584.py +++ b/tests/integration_tests/bugs/test_lp1835584.py @@ -1,4 +1,4 @@ -""" Integration test for LP #1835584 +"""Integration test for LP #1835584 Upstream linux kernels prior to 4.15 provide DMI product_uuid in uppercase. More recent kernels switched to lowercase for DMI product_uuid. Azure diff --git a/tests/integration_tests/bugs/test_lp1897099.py b/tests/integration_tests/bugs/test_lp1897099.py index b09817902..8546bbda7 100644 --- a/tests/integration_tests/bugs/test_lp1897099.py +++ b/tests/integration_tests/bugs/test_lp1897099.py @@ -1,4 +1,4 @@ -""" Integration test for LP #187099 +"""Integration test for LP #187099 Ensure that if fallocate fails during mkswap that we fall back to using dd diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index c201cfa9f..82acde409 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -21,7 +21,7 @@ Openstack, Qemu, ) -from pycloudlib.cloud import BaseCloud, ImageType +from pycloudlib.cloud import ImageType from pycloudlib.ec2.instance import EC2Instance from pycloudlib.lxd.cloud import _BaseLXD from pycloudlib.lxd.instance import BaseInstance, LXDInstance @@ -55,7 +55,6 @@ def _get_ubuntu_series() -> list: class IntegrationCloud(ABC): datasource: str - cloud_instance: BaseCloud def __init__( self, @@ -64,7 +63,7 @@ def __init__( ): self._image_type = image_type self.settings = settings - self.cloud_instance: BaseCloud = self._get_cloud_instance() + self.cloud_instance = self._get_cloud_instance() self.initial_image_id = self._get_initial_image() self.snapshot_id = None @@ -183,7 +182,7 @@ def snapshot(self, instance): def delete_snapshot(self): if self.snapshot_id: - if self.settings.KEEP_IMAGE: + if self.settings.KEEP_IMAGE: # type: ignore log.info( "NOT deleting snapshot image created for this testrun " "because KEEP_IMAGE is True: %s", @@ -200,7 +199,7 @@ def delete_snapshot(self): class Ec2Cloud(IntegrationCloud): datasource = "ec2" - def _get_cloud_instance(self): + def _get_cloud_instance(self) -> EC2: return EC2(tag="ec2-integration-test") def _get_initial_image(self, **kwargs) -> str: @@ -218,17 +217,9 @@ def _perform_launch( name="ec2-cloud-init-integration" ) - # Enable IPv6 metadata at http://[fd00:ec2::254] - if "Ipv6AddressCount" not in launch_kwargs: - launch_kwargs["Ipv6AddressCount"] = 1 - if "MetadataOptions" not in launch_kwargs: - launch_kwargs["MetadataOptions"] = {} - if "HttpProtocolIpv6" not in launch_kwargs["MetadataOptions"]: - launch_kwargs["MetadataOptions"] = { - "HttpProtocolIpv6": "enabled" - } - - pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) + pycloudlib_instance = self.cloud_instance.launch( + enable_ipv6=enable_ipv6, **launch_kwargs + ) self._maybe_wait(pycloudlib_instance, wait) return pycloudlib_instance @@ -236,7 +227,7 @@ def _perform_launch( class GceCloud(IntegrationCloud): datasource = "gce" - def _get_cloud_instance(self): + def _get_cloud_instance(self) -> GCE: return GCE( tag="gce-integration-test", ) @@ -251,7 +242,7 @@ class AzureCloud(IntegrationCloud): datasource = "azure" cloud_instance: Azure - def _get_cloud_instance(self): + def _get_cloud_instance(self) -> Azure: return Azure(tag="azure-integration-test") def _get_initial_image(self, **kwargs) -> str: @@ -273,7 +264,7 @@ def destroy(self): class OciCloud(IntegrationCloud): datasource = "oci" - def _get_cloud_instance(self): + def _get_cloud_instance(self) -> OCI: return OCI( tag="oci-integration-test", ) @@ -284,11 +275,7 @@ class _LxdIntegrationCloud(IntegrationCloud): instance_tag: str cloud_instance: _BaseLXD - def _get_cloud_instance(self): - return self.pycloudlib_instance_cls(tag=self.instance_tag) - - @staticmethod - def _get_or_set_profile_list(release): + def _get_or_set_profile_list(self, release): return None @staticmethod @@ -300,6 +287,10 @@ def _mount_source(instance: LXDInstance): os.path.join(cloudinit_path, "..", "templates"), "/etc/cloud/templates", ), + ( + os.path.join(cloudinit_path, "..", "doc", "module-docs"), + "/usr/share/doc/cloud-init/module-docs", + ), ] for n, (source_path, target_path) in enumerate(mounts): format_variables = { @@ -359,15 +350,21 @@ class LxdContainerCloud(_LxdIntegrationCloud): pycloudlib_instance_cls = LXDContainer instance_tag = "lxd-container-integration-test" + def _get_cloud_instance(self) -> LXDContainer: + return self.pycloudlib_instance_cls(tag=self.instance_tag) + class LxdVmCloud(_LxdIntegrationCloud): datasource = "lxd_vm" cloud_instance: LXDVirtualMachine pycloudlib_instance_cls = LXDVirtualMachine instance_tag = "lxd-vm-integration-test" - _profile_list = None + _profile_list: list = [] - def _get_or_set_profile_list(self, release): + def _get_cloud_instance(self) -> LXDVirtualMachine: + return self.pycloudlib_instance_cls(tag=self.instance_tag) + + def _get_or_set_profile_list(self, release) -> list: if self._profile_list: return self._profile_list self._profile_list = self.cloud_instance.build_necessary_profiles( diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py index 356a793be..3155a0791 100644 --- a/tests/integration_tests/cmd/test_schema.py +++ b/tests/integration_tests/cmd/test_schema.py @@ -3,9 +3,13 @@ import pytest +from cloudinit.util import should_log_deprecation from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.releases import CURRENT_RELEASE, MANTIC -from tests.integration_tests.util import verify_clean_log +from tests.integration_tests.util import ( + get_feature_flag_value, + verify_clean_log, +) USER_DATA = """\ #cloud-config @@ -62,10 +66,19 @@ class TestSchemaDeprecations: def test_clean_log(self, class_client: IntegrationInstance): log = class_client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log, ignore_deprecations=True) - assert "DEPRECATED]: Deprecated cloud-config provided:" in log - assert "apt_reboot_if_required: Default: ``false``. Deprecated " in log - assert "apt_update: Default: ``false``. Deprecated in version" in log - assert "apt_upgrade: Default: ``false``. Deprecated in version" in log + version_boundary = get_feature_flag_value( + class_client, "DEPRECATION_INFO_BOUNDARY" + ) + # the deprecation_version is 22.2 in schema for apt_* keys in + # user-data. Pass 22.2 in against the client's version_boundary. + if should_log_deprecation("22.2", version_boundary): + log_level = "DEPRECATED" + else: + log_level = "INFO" + assert f"{log_level}]: Deprecated cloud-config provided:" in log + assert "apt_reboot_if_required: Deprecated " in log + assert "apt_update: Deprecated in version" in log + assert "apt_upgrade: Deprecated in version" in log def test_network_config_schema_validation( self, class_client: IntegrationInstance @@ -99,14 +112,14 @@ def test_network_config_schema_validation( # No netplan API available skips validation content_responses[NET_CFG_V2] = { "out": ( - "Skipping network-config schema validation." - " No network schema for version: 2" + "Skipping network-config schema validation for version: 2." + " No netplan API available." ) } content_responses[NET_CFG_V2_INVALID] = { "out": ( - "Skipping network-config schema validation." - " No network schema for version: 2" + "Skipping network-config schema validation for version: 2." + " No netplan API available." ) } @@ -139,17 +152,10 @@ def test_schema_deprecations(self, class_client: IntegrationInstance): ), "`schema` cmd must return 0 even with deprecated configs" assert not result.stderr assert "Cloud config schema deprecations:" in result.stdout + assert "apt_update: Deprecated in version" in result.stdout + assert "apt_upgrade: Deprecated in version" in result.stdout assert ( - "apt_update: Default: ``false``. Deprecated in version" - in result.stdout - ) - assert ( - "apt_upgrade: Default: ``false``. Deprecated in version" - in result.stdout - ) - assert ( - "apt_reboot_if_required: Default: ``false``. Deprecated in version" - in result.stdout + "apt_reboot_if_required: Deprecated in version" in result.stdout ) annotated_result = class_client.execute( @@ -167,9 +173,9 @@ def test_schema_deprecations(self, class_client: IntegrationInstance): apt_reboot_if_required: false\t\t# D3 # Deprecations: ------------- - # D1: Default: ``false``. Deprecated in version 22.2. Use ``package_update`` instead. - # D2: Default: ``false``. Deprecated in version 22.2. Use ``package_upgrade`` instead. - # D3: Default: ``false``. Deprecated in version 22.2. Use ``package_reboot_if_required`` instead. + # D1: Deprecated in version 22.2. Use ``package_update`` instead. + # D2: Deprecated in version 22.2. Use ``package_upgrade`` instead. + # D3: Deprecated in version 22.2. Use ``package_reboot_if_required`` instead. Valid schema /root/user-data""" # noqa: E501 diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py index 6296072e3..23509c57c 100644 --- a/tests/integration_tests/cmd/test_status.py +++ b/tests/integration_tests/cmd/test_status.py @@ -8,7 +8,10 @@ from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM from tests.integration_tests.releases import CURRENT_RELEASE, IS_UBUNTU, JAMMY -from tests.integration_tests.util import wait_for_cloud_init +from tests.integration_tests.util import ( + push_and_enable_systemd_unit, + wait_for_cloud_init, +) def _remove_nocloud_dir_and_reboot(client: IntegrationInstance): @@ -60,8 +63,11 @@ def test_wait_when_no_datasource(session_cloud: IntegrationCloud, setup_image): USER_DATA = """\ #cloud-config -ca-certs: - remove_defaults: false +users: + - name: something + ssh-authorized-keys: ["something"] + - default +ca_certs: invalid_key: true """ @@ -77,23 +83,28 @@ def test_status_json_errors(client): ) status_json = client.execute("cloud-init status --format json").stdout - assert "Deprecated cloud-config provided:\nca-certs:" in json.loads( - status_json - )["init"]["recoverable_errors"].get("DEPRECATED").pop(0) - assert "Deprecated cloud-config provided:\nca-certs:" in json.loads( - status_json - )["recoverable_errors"].get("DEPRECATED").pop(0) - assert "Invalid cloud-config provided" in json.loads(status_json)["init"][ - "recoverable_errors" - ].get("WARNING").pop(0) - assert "Invalid cloud-config provided" in json.loads(status_json)[ + assert ( + "Deprecated cloud-config provided: users.0.ssh-authorized-keys" + in json.loads(status_json)["init"]["recoverable_errors"] + .get("DEPRECATED") + .pop(0) + ) + assert ( + "Deprecated cloud-config provided: users.0.ssh-authorized-keys:" + in json.loads(status_json)["recoverable_errors"] + .get("DEPRECATED") + .pop(0) + ) + assert "cloud-config failed schema validation" in json.loads(status_json)[ + "init" + ]["recoverable_errors"].get("WARNING").pop(0) + assert "cloud-config failed schema validation" in json.loads(status_json)[ "recoverable_errors" ].get("WARNING").pop(0) EARLY_BOOT_WAIT_USER_DATA = """\ #cloud-config -runcmd: [systemctl enable before-cloud-init-local.service] write_files: - path: /waitoncloudinit.sh permissions: '0755' @@ -106,33 +117,34 @@ def test_status_json_errors(client): fi cloud-init status --wait --long > $1 date +%s.%N > $MARKER_FILE -- path: /lib/systemd/system/before-cloud-init-local.service - permissions: '0644' - content: | - [Unit] - Description=BEFORE cloud-init local - DefaultDependencies=no - After=systemd-remount-fs.service - Before=cloud-init-local.service - Before=shutdown.target - Before=sysinit.target - Conflicts=shutdown.target - RequiresMountsFor=/var/lib/cloud - - [Service] - Type=simple - ExecStart=/waitoncloudinit.sh /before-local - RemainAfterExit=yes - TimeoutSec=0 - - # Output needs to appear in instance console output - StandardOutput=journal+console - - [Install] - WantedBy=cloud-init.target """ # noqa: E501 +BEFORE_CLOUD_INIT_LOCAL = """\ +[Unit] +Description=BEFORE cloud-init local +DefaultDependencies=no +After=systemd-remount-fs.service +Before=cloud-init-local.service +Before=shutdown.target +Before=sysinit.target +Conflicts=shutdown.target +RequiresMountsFor=/var/lib/cloud + +[Service] +Type=simple +ExecStart=/waitoncloudinit.sh /before-local +RemainAfterExit=yes +TimeoutSec=0 + +# Output needs to appear in instance console output +StandardOutput=journal+console + +[Install] +WantedBy=cloud-init.target +""" + + @pytest.mark.user_data(EARLY_BOOT_WAIT_USER_DATA) @pytest.mark.lxd_use_exec @pytest.mark.skipif( @@ -141,6 +153,9 @@ def test_status_json_errors(client): ) def test_status_block_through_all_boot_status(client): """Assert early boot cloud-init status --wait does not exit early.""" + push_and_enable_systemd_unit( + client, "before-cloud-init-local.service", BEFORE_CLOUD_INIT_LOCAL + ) client.execute("cloud-init clean --logs --reboot") wait_for_cloud_init(client).stdout.strip() client.execute("cloud-init status --wait") diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index ccbf6a716..c3b8531ae 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -118,7 +118,9 @@ def setup_image(session_cloud: IntegrationCloud, request): """ source = get_validated_source(session_cloud) if not ( - source.installs_new_version() or integration_settings.INCLUDE_COVERAGE + source.installs_new_version() + or integration_settings.INCLUDE_COVERAGE + or integration_settings.INCLUDE_PROFILE ): return log.info("Setting up source image") @@ -126,9 +128,20 @@ def setup_image(session_cloud: IntegrationCloud, request): if source.installs_new_version(): log.info("Installing cloud-init from %s", source.name) client.install_new_cloud_init(source) + if ( + integration_settings.INCLUDE_PROFILE + and integration_settings.INCLUDE_COVERAGE + ): + log.error( + "Invalid configuration, cannot enable both profile and coverage." + ) + raise ValueError() if integration_settings.INCLUDE_COVERAGE: log.info("Installing coverage") client.install_coverage() + elif integration_settings.INCLUDE_PROFILE: + log.info("Installing profiler") + client.install_profile() # All done customizing the image, so snapshot it and make it global snapshot_id = client.snapshot() client.cloud.snapshot_id = snapshot_id @@ -169,6 +182,30 @@ def _collect_coverage(instance: IntegrationInstance, log_dir: Path): log.error("Failed to pull coverage for: %s", e) +def _collect_profile(instance: IntegrationInstance, log_dir: Path): + log.info("Writing profile to %s", log_dir) + try: + (log_dir / "profile").mkdir(parents=True) + instance.pull_file( + "/var/log/cloud-init-local.service.stats", + log_dir / "profile" / "local.stats", + ) + instance.pull_file( + "/var/log/cloud-init.service.stats", + log_dir / "profile" / "network.stats", + ) + instance.pull_file( + "/var/log/cloud-config.service.stats", + log_dir / "profile" / "config.stats", + ) + instance.pull_file( + "/var/log/cloud-final.service.stats", + log_dir / "profile" / "final.stats", + ) + except Exception as e: + log.error("Failed to pull profile for: %s", e) + + def _setup_artifact_paths(node_id: str): parent_dir = Path(integration_settings.LOCAL_LOG_PATH, session_start_time) @@ -209,7 +246,12 @@ def _collect_artifacts( integration_settings.COLLECT_LOGS == "ON_ERROR" and test_failed ) should_collect_coverage = integration_settings.INCLUDE_COVERAGE - if not (should_collect_logs or should_collect_coverage): + should_collect_profile = integration_settings.INCLUDE_PROFILE + if not ( + should_collect_logs + or should_collect_coverage + or should_collect_profile + ): return log_dir = _setup_artifact_paths(node_id) @@ -220,6 +262,9 @@ def _collect_artifacts( if should_collect_coverage: _collect_coverage(instance, log_dir) + elif should_collect_profile: + _collect_profile(instance, log_dir) + @contextmanager def _client( @@ -273,7 +318,7 @@ def _client( @pytest.fixture -def client( +def client( # pylint: disable=W0135 request, fixture_utils, session_cloud, setup_image ) -> Iterator[IntegrationInstance]: """Provide a client that runs for every test.""" @@ -282,7 +327,7 @@ def client( @pytest.fixture(scope="module") -def module_client( +def module_client( # pylint: disable=W0135 request, fixture_utils, session_cloud, setup_image ) -> Iterator[IntegrationInstance]: """Provide a client that runs once per module.""" @@ -291,7 +336,7 @@ def module_client( @pytest.fixture(scope="class") -def class_client( +def class_client( # pylint: disable=W0135 request, fixture_utils, session_cloud, setup_image ) -> Iterator[IntegrationInstance]: """Provide a client that runs once per class.""" @@ -385,7 +430,20 @@ def _generate_coverage_report() -> None: log.info("Coverage report generated") +def _generate_profile_report() -> None: + log.info("Profile reports generated, run the following to view:") + command = ( + "python3 -m snakeviz /tmp/cloud_init_test_logs/" + "last/tests/integration_tests/*/*/*/profile/%s" + ) + log.info(command, "local.stats") + log.info(command, "network.stats") + log.info(command, "config.stats") + log.info(command, "final.stats") + + def pytest_sessionfinish(session, exitstatus) -> None: - if not integration_settings.INCLUDE_COVERAGE: - return - _generate_coverage_report() + if integration_settings.INCLUDE_COVERAGE: + _generate_coverage_report() + elif integration_settings.INCLUDE_PROFILE: + _generate_profile_report() diff --git a/tests/integration_tests/datasources/test_azure.py b/tests/integration_tests/datasources/test_azure.py index 8e663ac21..c1d36abe4 100644 --- a/tests/integration_tests/datasources/test_azure.py +++ b/tests/integration_tests/datasources/test_azure.py @@ -1,11 +1,14 @@ +import datetime + import pytest +from pycloudlib.azure.util import AzureCreateParams, AzureParams from pycloudlib.cloud import ImageType from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.conftest import get_validated_source from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM -from tests.integration_tests.releases import CURRENT_RELEASE +from tests.integration_tests.releases import BIONIC, CURRENT_RELEASE def _check_for_eject_errors( @@ -45,3 +48,70 @@ def test_azure_eject(session_cloud: IntegrationCloud): session_cloud.cloud_instance.delete_image(snapshot_id) else: _check_for_eject_errors(instance) + + +def parse_resolvectl_dns(output: str) -> dict: + """Parses the output of 'resolvectl dns'. + + >>> parse_resolvectl_dns( + ... "Global:", + ... "Link 2 (eth0): 168.63.129.16", + ... "Link 3 (eth1): 168.63.129.16", + ... ) + {'Global': '', + 'Link 2 (eth0)': '168.63.129.16', + 'Link 3 (eth1)': '168.63.129.16'} + """ + + parsed = dict() + for line in output.splitlines(): + if line.isspace(): + continue + splitted = line.split(":") + k = splitted.pop(0).strip() + v = splitted.pop(0).strip() if splitted else "" + parsed[k] = v + return parsed + + +@pytest.mark.skipif(PLATFORM != "azure", reason="Test is Azure specific") +@pytest.mark.skipif( + CURRENT_RELEASE < BIONIC, reason="Easier to test on Bionic+" +) +def test_azure_multi_nic_setup( + setup_image, session_cloud: IntegrationCloud +) -> None: + """Integration test for https://warthogs.atlassian.net/browse/CPC-3999. + + Azure should have the primary NIC only route to DNS. + Ensure other NICs do not have route to DNS. + """ + us = datetime.datetime.now().strftime("%f") + rg_params = AzureParams(f"ci-test-multi-nic-setup-{us}", None) + nic_one = AzureCreateParams(f"ci-nic1-test-{us}", rg_params.name, None) + nic_two = AzureCreateParams(f"ci-nic2-test-{us}", rg_params.name, None) + with session_cloud.launch( + launch_kwargs={ + "resource_group_params": rg_params, + "network_interfaces_params": [nic_one, nic_two], + } + ) as client: + _check_for_eject_errors(client) + if CURRENT_RELEASE == BIONIC: + ret = client.execute("systemd-resolve --status") + assert ret.ok, ret.stderr + assert ret.stdout.count("Current Scopes: DNS") == 1 + else: + ret = client.execute("resolvectl dns") + assert ret.ok, ret.stderr + routes = parse_resolvectl_dns(ret.stdout) + routes_devices = list(routes.keys()) + eth1_dev = [dev for dev in routes_devices if "(eth1)" in dev][0] + assert not routes[eth1_dev], ( + f"Expected eth1 to not have routes to dns." + f" Found: {routes[eth1_dev]}" + ) + + # check the instance can resolve something + res = client.execute("resolvectl query google.com") + assert res.ok, res.stderr diff --git a/tests/integration_tests/datasources/test_lxd_hotplug.py b/tests/integration_tests/datasources/test_lxd_hotplug.py index 536f4e367..c2744c460 100644 --- a/tests/integration_tests/datasources/test_lxd_hotplug.py +++ b/tests/integration_tests/datasources/test_lxd_hotplug.py @@ -1,8 +1,8 @@ import json import pytest +import yaml -from cloudinit import safeyaml from cloudinit.subp import subp from cloudinit.util import is_true from tests.integration_tests.decorators import retry @@ -142,10 +142,10 @@ def test_network_config_applied(self, class_client: IntegrationInstance): f"nictype=bridged parent=ci-test-br-eth2".split() ) ensure_hotplug_exited(client) - post_netplan = safeyaml.load( + post_netplan = yaml.safe_load( client.read_from_file("/etc/netplan/50-cloud-init.yaml") ) - expected_netplan = safeyaml.load(UPDATED_NETWORK_CONFIG) + expected_netplan = yaml.safe_load(UPDATED_NETWORK_CONFIG) expected_netplan = {"network": expected_netplan} assert post_netplan == expected_netplan, client.read_from_file( "/var/log/cloud-init.log" diff --git a/tests/integration_tests/datasources/test_nocloud.py b/tests/integration_tests/datasources/test_nocloud.py index b7886b502..c6c440840 100644 --- a/tests/integration_tests/datasources/test_nocloud.py +++ b/tests/integration_tests/datasources/test_nocloud.py @@ -1,12 +1,21 @@ """NoCloud datasource integration tests.""" + from textwrap import dedent import pytest from pycloudlib.lxd.instance import LXDInstance from cloudinit.subp import subp +from cloudinit.util import should_log_deprecation from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import CURRENT_RELEASE, FOCAL +from tests.integration_tests.util import ( + get_feature_flag_value, + override_kernel_command_line, + verify_clean_boot, + verify_clean_log, +) VENDOR_DATA = """\ #cloud-config @@ -186,6 +195,257 @@ def test_smbios_seed_network(self, client: IntegrationInstance): assert client.execute("cloud-init clean --logs").ok client.restart() assert client.execute("test -f /var/tmp/smbios_test_file").ok - assert "'nocloud-net' datasource name is deprecated" in client.execute( - "cloud-init status --format json" + version_boundary = get_feature_flag_value( + client, "DEPRECATION_INFO_BOUNDARY" + ) + # nocloud-net deprecated in version 24.1 + if should_log_deprecation("24.1", version_boundary): + log_level = "DEPRECATED" + else: + log_level = "INFO" + client.execute( + rf"grep \"{log_level}]: The 'nocloud-net' datasource name is" + ' deprecated" /var/log/cloud-init.log' + ).ok + + +@pytest.mark.skipif(PLATFORM != "lxd_vm", reason="Modifies grub config") +@pytest.mark.lxd_use_exec +class TestFTP: + """Test nocloud's support for unencrypted FTP and FTP over TLS (ftps). + + These tests work by setting up a local ftp server on the test instance + and then rebooting the instance clean (cloud-init clean --logs --reboot). + + Check for the existence (or non-existence) of specific log messages to + verify functionality. + """ + + # should we really be surfacing this netplan stderr as a warning? + # i.e. how does it affect the users? + expected_warnings = [ + "Falling back to a hard restart of systemd-networkd.service" + ] + + @staticmethod + def _boot_with_cmdline( + cmdline: str, client: IntegrationInstance, encrypted: bool = False + ) -> None: + """configure an ftp server to start prior to network timeframe + optionally install certs and make the server support only FTP over TLS + + cmdline: a string containing the kernel command line set on reboot + client: an instance to configure + encrypted: a boolean which modifies the configured ftp server + """ + + # install the essential bits + assert client.execute( + "apt update && apt install -yq python3-pyftpdlib " + "python3-openssl ca-certificates libnss3-tools" + ).ok + + # How do you reliably run a ftp server for your instance to + # read files from during early boot? In typical production + # environments, the ftp server would be separate from the instance. + # + # For a reliable server that fits with the framework of running tests + # on a single instance, it is easier to just install an ftp server + # that runs on the second boot prior to the cloud-init unit which + # reaches out to the ftp server. This achieves reaching out to an + # ftp(s) server for testing - cloud-init just doesn't have to reach + # very far to get what it needs. + # + # DO NOT use these concepts in a production. + # + # This configuration is neither secure nor production-grade - intended + # only for testing purposes. + client.write_to_file( + "/server.py", + dedent( + """\ + #!/usr/bin/python3 + import logging + + from pyftpdlib.authorizers import DummyAuthorizer + from pyftpdlib.handlers import FTPHandler, TLS_FTPHandler + from pyftpdlib.servers import FTPServer + from pyftpdlib.filesystems import UnixFilesystem + + encrypted = """ + + str(encrypted) + + """ + + logging.basicConfig(level=logging.DEBUG) + + # yeah, it's not secure but that's not the point + authorizer = DummyAuthorizer() + + # Define a read-only anonymous user + authorizer.add_anonymous("/home/anonymous") + + # Instantiate FTP handler class + if not encrypted: + handler = FTPHandler + logging.info("Running unencrypted ftp server") + else: + handler = TLS_FTPHandler + handler.certfile = "/cert.pem" + handler.keyfile = "/key.pem" + logging.info("Running encrypted ftp server") + + handler.authorizer = authorizer + handler.abstracted_fs = UnixFilesystem + server = FTPServer(("localhost", 2121), handler) + + # start the ftp server + server.serve_forever() + """ + ), ) + assert client.execute("chmod +x /server.py").ok + + if encrypted: + if CURRENT_RELEASE > FOCAL: + assert client.execute("apt install -yq mkcert").ok + else: + + # install golang + assert client.execute("apt install -yq golang").ok + + # build mkcert from source + # + # we could check out a tag, but the project hasn't + # been updated in 2 years + # + # instructions from https://github.com/FiloSottile/mkcert + assert client.execute( + "git clone https://github.com/FiloSottile/mkcert && " + "cd mkcert && " + "export latest_ver=$(git describe --tags --abbrev=0) && " + 'wget "https://github.com/FiloSottile/mkcert/releases/' + "download/${latest_ver}/mkcert-" + '${latest_ver}-linux-amd64"' + " -O mkcert" + ).ok + + # giddyup + assert client.execute( + "ln -s $HOME/mkcert/mkcert /usr/local/bin/mkcert" + ).ok + + # more palatable than openssl commands + assert client.execute( + "mkcert -install -cert-file /cert.pem -key-file /key.pem " + "localhost 127.0.0.1 0.0.0.0 ::1" + ).ok + + client.write_to_file( + "/lib/systemd/system/local-ftp.service", + dedent( + """\ + [Unit] + Description=TESTING USE ONLY ftp server + Wants=cloud-init-local.service + DefaultDependencies=no + + # we want the network up for network operations + # and NoCloud operates in network timeframe + After=systemd-networkd-wait-online.service + After=networking.service + Before=cloud-init.service + + [Service] + Type=exec + ExecStart=/server.py + + [Install] + WantedBy=cloud-init.target + """ + ), + ) + assert client.execute( + "chmod 644 /lib/systemd/system/local-ftp.service" + ).ok + assert client.execute("systemctl enable local-ftp.service").ok + assert client.execute("mkdir /home/anonymous").ok + + client.write_to_file( + "/user-data", + dedent( + """\ + #cloud-config + + hostname: ftp-bootstrapper + """ + ), + ) + client.write_to_file( + "/meta-data", + dedent( + """\ + instance-id: ftp-instance + """ + ), + ) + client.write_to_file("/vendor-data", "") + + # set the kernel command line, reboot with it + override_kernel_command_line(cmdline, client) + + def test_nocloud_ftp_unencrypted_server_succeeds( + self, client: IntegrationInstance + ): + """check that ftp:// succeeds to unencrypted ftp server + + this mode allows administrators to choose unencrypted ftp, + at their own risk + """ + cmdline = "ds=nocloud;seedfrom=ftp://0.0.0.0:2121" + self._boot_with_cmdline(cmdline, client) + verify_clean_boot(client, ignore_warnings=self.expected_warnings) + assert "ftp-bootstrapper" == client.execute("hostname").rstrip() + verify_clean_log(client.execute("cat /var/log/cloud-init.log").stdout) + + def test_nocloud_ftps_unencrypted_server_fails( + self, client: IntegrationInstance + ): + """check that ftps:// fails to unencrypted ftp server + + this mode allows administrators to enforce TLS encryption + """ + cmdline = "ds=nocloud;seedfrom=ftps://localhost:2121" + self._boot_with_cmdline(cmdline, client) + verify_clean_boot( + client, + ignore_warnings=self.expected_warnings, + require_warnings=[ + "Getting data from failed", + "Used fallback datasource", + "Attempted to connect to an insecure ftp server but used" + " a scheme of ftps://, which is not allowed. Use ftp:// " + "to allow connecting to insecure ftp servers.", + ], + ) + + def test_nocloud_ftps_encrypted_server_succeeds( + self, client: IntegrationInstance + ): + """check that ftps:// encrypted ftp server succeeds + + this mode allows administrators to enforce TLS encryption + """ + cmdline = "ds=nocloud;seedfrom=ftps://localhost:2121" + self._boot_with_cmdline(cmdline, client, encrypted=True) + verify_clean_boot(client, ignore_warnings=self.expected_warnings) + assert "ftp-bootstrapper" == client.execute("hostname").rstrip() + verify_clean_log(client.execute("cat /var/log/cloud-init.log").stdout) + + def test_nocloud_ftp_encrypted_server_fails( + self, client: IntegrationInstance + ): + """check that using ftp:// to encrypted ftp server fails""" + cmdline = "ds=nocloud;seedfrom=ftp://0.0.0.0:2121" + self._boot_with_cmdline(cmdline, client, encrypted=True) + verify_clean_boot(client, ignore_warnings=self.expected_warnings) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 1b09cba12..32281756c 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -1,13 +1,16 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging import os +import re import uuid from enum import Enum from pathlib import Path from tempfile import NamedTemporaryFile from typing import Union +from pycloudlib.gce.instance import GceInstance from pycloudlib.instance import BaseInstance +from pycloudlib.lxd.instance import LXDInstance from pycloudlib.result import Result from tests.helpers import cloud_init_project_dir @@ -67,7 +70,10 @@ def __init__( self._ip = "" def destroy(self): - self.instance.delete() + if isinstance(self.instance, GceInstance): + self.instance.delete(wait=False) + else: + self.instance.delete() def restart(self): """Restart this instance (via cloud mechanism) and wait for boot. @@ -162,6 +168,13 @@ def install_coverage(self): ) assert self.execute("python3 /var/tmp/enable_coverage.py").ok + def install_profile(self): + self.push_file( + local_path=ASSETS_DIR / "enable_profile.py", + remote_path="/var/tmp/enable_profile.py", + ) + assert self.execute("python3 /var/tmp/enable_profile.py").ok + def install_new_cloud_init( self, source: CloudInitSource, @@ -200,11 +213,26 @@ def install_proposed_image(self): def install_ppa(self): log.info("Installing PPA") + if self.execute("which add-apt-repository").failed: + log.info("Installing missing software-properties-common package") + self._apt_update() + assert self.execute( + "apt install -qy software-properties-common" + ).ok + pin_origin = self.settings.CLOUD_INIT_SOURCE[4:] # Drop leading ppa: + pin_origin = re.sub("[^a-z0-9-]", "-", pin_origin) + self.write_to_file( + "/etc/apt/preferences.d/cloud-init-integration-testing", + f"package: cloud-init\nPin: release o=LP-PPA-{pin_origin}\n" + "Pin-Priority: 1001\n", + ) assert self.execute( "add-apt-repository {} -y".format(self.settings.CLOUD_INIT_SOURCE) ).ok - self._apt_update() - assert self.execute("apt-get install -qy cloud-init").ok + # PIN this PPA as priority for cloud-init installs regardless of ver + assert self.execute( + "apt-get install -qy cloud-init --allow-downgrades" + ).ok @retry(tries=30, delay=1) def install_deb(self): @@ -262,7 +290,7 @@ def ip(self) -> str: try: # in some cases that ssh is not used, an address is not assigned if ( - hasattr(self.instance, "execute_via_ssh") + isinstance(self.instance, LXDInstance) and self.instance.execute_via_ssh ): self._ip = self.instance.ip diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 5dc5a729c..368943c0d 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -88,6 +88,12 @@ # `html` directory with the coverage report. INCLUDE_COVERAGE = False +# We default our profile to False because it involves modifying the +# cloud-init systemd services, which is too intrusive of a change to +# enable by default. If changed to true, the test directory corresponding +# to the test run under LOCAL_LOG_PATH defined above will contain a report +INCLUDE_PROFILE = False + ################################################################## # USER SETTINGS OVERRIDES ################################################################## diff --git a/tests/integration_tests/modules/test_ansible.py b/tests/integration_tests/modules/test_ansible.py index ab7139c34..ba71bc47a 100644 --- a/tests/integration_tests/modules/test_ansible.py +++ b/tests/integration_tests/modules/test_ansible.py @@ -1,8 +1,12 @@ import pytest +from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM from tests.integration_tests.releases import CURRENT_RELEASE, FOCAL -from tests.integration_tests.util import verify_clean_log +from tests.integration_tests.util import ( + push_and_enable_systemd_unit, + verify_clean_log, +) # This works by setting up a local repository and web server # daemon on the first boot. Second boot should succeed @@ -20,40 +24,6 @@ - git - python3-pip write_files: - - path: /etc/systemd/system/repo_server.service - content: | - [Unit] - Description=Serve a local git repo - Wants=repo_waiter.service - After=cloud-init-local.service - Before=cloud-config.service - Before=cloud-final.service - - [Install] - WantedBy=cloud-init-local.service - - [Service] - WorkingDirectory=/root/playbooks/.git - ExecStart=/usr/bin/env python3 -m http.server --bind 0.0.0.0 8000 - - - path: /etc/systemd/system/repo_waiter.service - content: | - [Unit] - Description=Block boot until repo is available - After=repo_server.service - Before=cloud-final.service - - [Install] - WantedBy=cloud-init-local.service - - # clone into temp directory to test that server is running - # sdnotify would be an alternative way to verify that the server is - # running and continue once it is up, but this is simple and works - [Service] - Type=oneshot - ExecStart=/bin/sh -c "while \ - ! git clone http://0.0.0.0:8000/ $(mktemp -d); do sleep 0.1; done" - - path: /root/playbooks/ubuntu.yml content: | --- @@ -80,10 +50,40 @@ - "{{ item }}" state: latest loop: "{{ packages }}" +""" -runcmd: - - [systemctl, enable, repo_server.service] - - [systemctl, enable, repo_waiter.service] +REPO_SERVER = """\ +[Unit] +Description=Serve a local git repo +Wants=repo_waiter.service +After=cloud-init-local.service +Before=cloud-config.service +Before=cloud-final.service + +[Install] +WantedBy=cloud-init-local.service + +[Service] +WorkingDirectory=/root/playbooks/.git +ExecStart=/usr/bin/env python3 -m http.server --bind 0.0.0.0 8000 +""" + +REPO_WAITER = """\ +[Unit] +Description=Block boot until repo is available +After=repo_server.service +Before=cloud-final.service + +[Install] +WantedBy=cloud-init-local.service + +# clone into temp directory to test that server is running +# sdnotify would be an alternative way to verify that the server is +# running and continue once it is up, but this is simple and works +[Service] +Type=oneshot +ExecStart=/bin/sh -c "while \ + ! git clone http://0.0.0.0:8000/ $(mktemp -d); do sleep 0.1; done" """ INSTALL_METHOD = """ @@ -284,7 +284,9 @@ def _test_ansible_pull_from_local_server(my_client): @pytest.mark.user_data( USER_DATA + INSTALL_METHOD.format(package="ansible-core", method="pip") ) -def test_ansible_pull_pip(client): +def test_ansible_pull_pip(client: IntegrationInstance): + push_and_enable_systemd_unit(client, "repo_server.service", REPO_SERVER) + push_and_enable_systemd_unit(client, "repo_waiter.service", REPO_WAITER) _test_ansible_pull_from_local_server(client) @@ -300,6 +302,8 @@ def test_ansible_pull_pip(client): USER_DATA + INSTALL_METHOD.format(package="ansible", method="distro") ) def test_ansible_pull_distro(client): + push_and_enable_systemd_unit(client, "repo_server.service", REPO_SERVER) + push_and_enable_systemd_unit(client, "repo_waiter.service", REPO_WAITER) _test_ansible_pull_from_local_server(client) diff --git a/tests/integration_tests/modules/test_apt_functionality.py b/tests/integration_tests/modules/test_apt_functionality.py index b69c3445a..2af9e590c 100644 --- a/tests/integration_tests/modules/test_apt_functionality.py +++ b/tests/integration_tests/modules/test_apt_functionality.py @@ -5,7 +5,6 @@ import pytest -from cloudinit import gpg from cloudinit.config import cc_apt_configure from cloudinit.util import is_true from tests.integration_tests.clouds import IntegrationCloud @@ -19,6 +18,7 @@ DEB822_SOURCES_FILE = "/etc/apt/sources.list.d/ubuntu.sources" ORIG_SOURCES_FILE = "/etc/apt/sources.list" +GET_TEMPDIR = "python3 -c 'import tempfile;print(tempfile.mkdtemp());'" USER_DATA = """\ #cloud-config @@ -48,9 +48,10 @@ deb-src $SECURITY $RELEASE-security multiverse sources: test_keyserver: - keyid: 110E21D8B0E2A1F0243AF6820856F197B892ACEA + keyid: 1BC30F715A3B861247A81A5E55FE7C8C0165013E keyserver: keyserver.ubuntu.com - source: "deb http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu $RELEASE main" + # Hard-code noble as devel releases may not see new packages for some time + source: "deb http://ppa.launchpad.net/curtin-dev/daily/ubuntu noble main" test_ppa: keyid: 441614D8 keyserver: keyserver.ubuntu.com @@ -123,7 +124,7 @@ r"deb-src http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse", ] -TEST_KEYSERVER_KEY = "110E 21D8 B0E2 A1F0 243A F682 0856 F197 B892 ACEA" +TEST_KEYSERVER_KEY = "1BC3 0F71 5A3B 8612 47A8 1A5E 55FE 7C8C 0165 013E" TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8" TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF" TEST_SIGNED_BY_KEY = "A2EB 2DEC 0BD7 519B 7B38 BE38 376A 290E C806 8B11" @@ -136,15 +137,26 @@ def get_keys(self, class_client: IntegrationInstance): """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg in human readable format. Mimics the output of apt-key finger """ - list_cmd = " ".join(gpg.GPG_LIST) + " " + class_client.execute("mkdir /root/tmpdir && chmod &00 /root/tmpdir") + GPG_LIST = [ + "gpg", + "--no-options", + "--with-fingerprint", + "--homedir /root/tmpdir", + "--no-default-keyring", + "--list-keys", + "--keyring", + ] + + list_cmd = " ".join(GPG_LIST) + " " keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS) - print(keys) files = class_client.execute( "ls " + cc_apt_configure.APT_TRUSTED_GPG_DIR - ) + ).stdout for file in files.split(): path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file - keys += class_client.execute(list_cmd + path) or "" + keys += class_client.execute(list_cmd + path).stdout + class_client.execute("gpgconf --homedir /root/tmpdir --kill all") return keys def test_sources_list(self, class_client: IntegrationInstance): @@ -203,8 +215,10 @@ def test_signed_by(self, class_client: IntegrationInstance): ) assert path_contents == source + temp = class_client.execute(GET_TEMPDIR) key = class_client.execute( - "gpg --no-default-keyring --with-fingerprint --list-keys " + f"gpg --no-options --homedir {temp} --no-default-keyring " + "--with-fingerprint --list-keys " "--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg" ) @@ -236,7 +250,7 @@ def test_keyserver(self, class_client: IntegrationInstance): ) assert ( - "http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu" + "http://ppa.launchpad.net/curtin-dev/daily/ubuntu" in test_keyserver_contents ) @@ -439,9 +453,10 @@ def test_apt_proxy(client: IntegrationInstance): apt: sources: test_keyserver: - keyid: 110E21D8B0E2A1F0243AF6820856F197B892ACEA + keyid: 1BC30F715A3B861247A81A5E55FE7C8C0165013E keyserver: keyserver.ubuntu.com - source: "deb http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu $RELEASE main" + # Hard-code noble as devel releases may not see new packages for some time + source: "deb http://ppa.launchpad.net/curtin-dev/daily/ubuntu noble main" test_ppa: keyid: 441614D8 keyserver: keyserver.ubuntu.com diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py index 81fbf1248..5106907a4 100644 --- a/tests/integration_tests/modules/test_cli.py +++ b/tests/integration_tests/modules/test_cli.py @@ -20,6 +20,14 @@ - echo 'hi' > /var/tmp/test """ +FAILING_USER_DATA = """\ +#cloud-config +bootcmd: + - exit 1 +runcmd: + - exit 1 +""" + # The '-' in 'hashed-password' fails schema validation INVALID_USER_DATA_SCHEMA = """\ #cloud-config @@ -36,19 +44,26 @@ @pytest.mark.user_data(VALID_USER_DATA) -def test_valid_userdata(client: IntegrationInstance): - """Test `cloud-init schema` with valid userdata. +class TestValidUserData: + def test_schema_status(self, class_client: IntegrationInstance): + """Test `cloud-init schema` with valid userdata. - PR #575 - """ - result = client.execute("cloud-init schema --system") - assert result.ok - assert "Valid schema user-data" in result.stdout.strip() - result = client.execute("cloud-init status --long") - assert 0 == result.return_code, ( - f"Unexpected exit {result.return_code} from cloud-init status:" - f" {result}" - ) + PR #575 + """ + result = class_client.execute("cloud-init schema --system") + assert result.ok + assert "Valid schema user-data" in result.stdout.strip() + result = class_client.execute("cloud-init status --long") + assert 0 == result.return_code, ( + f"Unexpected exit {result.return_code} from cloud-init status:" + f" {result}" + ) + + def test_modules_init(self, class_client: IntegrationInstance): + for mode in ("init", "config", "final"): + result = class_client.execute(f"cloud-init modules --mode {mode}") + assert result.ok + assert f"'modules:{mode}'" in result.stdout.strip() @pytest.mark.skipif( @@ -93,8 +108,27 @@ def test_invalid_userdata_schema(client: IntegrationInstance): ), f"Unexpected exit code {result.return_code}" log = client.read_from_file("/var/log/cloud-init.log") warning = ( - "[WARNING]: Invalid cloud-config provided: Please run " - "'sudo cloud-init schema --system' to see the schema errors." + "[WARNING]: cloud-config failed schema validation! " + "You may run 'sudo cloud-init schema --system' to check the details." ) assert warning in log assert "asdfasdf" not in log + + +@pytest.mark.user_data(FAILING_USER_DATA) +def test_failing_userdata_modules_exit_codes(client: IntegrationInstance): + """Test failing in modules representd in exit status. + + To ensure we don't miss any errors or warnings if a service happens + to be restarted, any further module invocations will exit with error + on the same boot if a previous invocation exited with error. + + In this test, both bootcmd and runcmd will exit with error the first time. + The second time, runcmd will run cleanly, but still exit with error. + Since bootcmd runs in init timeframe, and runcmd runs in final timeframe, + expect error from those two modes. + """ + for mode in ("init", "config", "final"): + result = client.execute(f"cloud-init modules --mode {mode}") + assert result.ok if mode == "config" else result.failed + assert f"'modules:{mode}'" in result.stdout.strip() diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 29219155e..0bf1b3d49 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -13,13 +13,15 @@ from pathlib import Path import pytest +from pycloudlib.ec2.instance import EC2Instance +from pycloudlib.gce.instance import GceInstance import cloudinit.config -from cloudinit.util import is_true +from cloudinit.util import is_true, should_log_deprecation from tests.integration_tests.decorators import retry from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM -from tests.integration_tests.releases import CURRENT_RELEASE, IS_UBUNTU +from tests.integration_tests.releases import CURRENT_RELEASE, IS_UBUNTU, MANTIC from tests.integration_tests.util import ( get_feature_flag_value, get_inactive_modules, @@ -68,6 +70,7 @@ me: "127.0.0.1" runcmd: - echo 'hello world' > /var/tmp/runcmd_output + - echo '💩' > /var/tmp/unicode_data - # - logger "My test log" @@ -89,6 +92,14 @@ def test_netplan_permissions(self, class_client: IntegrationInstance): """ Test that netplan config file is generated with proper permissions """ + log = class_client.read_from_file("/var/log/cloud-init.log") + if CURRENT_RELEASE < MANTIC: + assert ( + "No netplan python module. Fallback to write" + " /etc/netplan/50-cloud-init.yaml" in log + ) + else: + assert "Rendered netplan config using netplan python API" in log file_perms = class_client.execute( "stat -c %a /etc/netplan/50-cloud-init.yaml" ) @@ -122,9 +133,26 @@ def test_deprecated_message(self, class_client: IntegrationInstance): """Check that deprecated key produces a log warning""" client = class_client log = client.read_from_file("/var/log/cloud-init.log") - assert "Deprecated cloud-config provided" in log - assert "The value of 'false' in user craig's 'sudo' config is " in log - assert 2 == log.count("DEPRECATE") + version_boundary = get_feature_flag_value( + class_client, "DEPRECATION_INFO_BOUNDARY" + ) + # the changed_version is 22.2 in schema for user.sudo key in + # user-data. Pass 22.2 in against the client's version_boundary. + if should_log_deprecation("22.2", version_boundary): + log_level = "DEPRECATED" + deprecation_count = 2 + else: + # Expect the distros deprecated call to be redacted. + # jsonschema still emits deprecation log due to changed_version + # instead of deprecated_version + log_level = "INFO" + deprecation_count = 1 + + assert ( + f"[{log_level}]: The value of 'false' in user craig's 'sudo'" + " config is deprecated" in log + ) + assert deprecation_count == log.count("DEPRECATE") def test_ntp_with_apt(self, class_client: IntegrationInstance): """LP #1628337. @@ -452,6 +480,9 @@ def test_instance_json_ec2(self, class_client: IntegrationInstance): "/run/cloud-init/cloud-id-aws" ) assert v1_data["subplatform"].startswith("metadata") + + # type narrow since availability_zone is not a BaseInstance attribute + assert isinstance(client.instance, EC2Instance) assert ( v1_data["availability_zone"] == client.instance.availability_zone ) @@ -474,6 +505,9 @@ def test_instance_json_gce(self, class_client: IntegrationInstance): "/run/cloud-init/cloud-id-gce" ) assert v1_data["subplatform"].startswith("metadata") + # type narrow since zone and instance_id are not BaseInstance + # attributes + assert isinstance(client.instance, GceInstance) assert v1_data["availability_zone"] == client.instance.zone assert v1_data["instance_id"] == client.instance.instance_id assert v1_data["local_hostname"] == client.instance.name @@ -498,6 +532,10 @@ def test_instance_cloud_id_across_reboot( assert client.execute(f"test -f /run/cloud-init/{cloud_file}").ok assert client.execute("test -f /run/cloud-init/cloud-id").ok + def test_unicode(self, class_client: IntegrationInstance): + client = class_client + assert "💩" == client.read_from_file("/var/tmp/unicode_data") + @pytest.mark.user_data(USER_DATA) class TestCombinedNoCI: diff --git a/tests/integration_tests/modules/test_growpart.py b/tests/integration_tests/modules/test_growpart.py index b42c016cb..ebd2d8d15 100644 --- a/tests/integration_tests/modules/test_growpart.py +++ b/tests/integration_tests/modules/test_growpart.py @@ -62,7 +62,7 @@ def test_grow_part(self, client: IntegrationInstance): log = client.read_from_file("/var/log/cloud-init.log") assert ( "cc_growpart.py[INFO]: '/dev/sdb1' resized:" - " changed (/dev/sdb, 1) from" in log + " changed (/dev/sdb1) from" in log ) lsblk = json.loads(client.execute("lsblk --json")) diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py index 81a75dec2..8c7bc7839 100644 --- a/tests/integration_tests/modules/test_hotplug.py +++ b/tests/integration_tests/modules/test_hotplug.py @@ -1,7 +1,7 @@ -import contextlib import time from collections import namedtuple +import paramiko import pytest import yaml @@ -14,7 +14,11 @@ FOCAL, UBUNTU_STABLE, ) -from tests.integration_tests.util import verify_clean_log +from tests.integration_tests.util import ( + push_and_enable_systemd_unit, + verify_clean_log, + wait_for_cloud_init, +) USER_DATA = """\ #cloud-config @@ -54,14 +58,28 @@ def _wait_till_hotplug_complete(client, expected_runs=1): raise Exception("Waiting for hotplug handler failed") -def _get_ip_addr(client): +def _get_ip_addr(client, *, _retries: int = 0): ips = [] lines = client.execute("ip --brief addr").split("\n") for line in lines: attributes = line.split() interface, state = attributes[0], attributes[1] ip4_cidr = attributes[2] if len(attributes) > 2 else None - ip6_cidr = attributes[3] if len(attributes) > 3 else None + + # Retry to wait for ipv6_cidr: + # ens6 UP metric 200 + if len(attributes) == 6 and _retries < 3: + time.sleep(1) + return _get_ip_addr(client, _retries=_retries + 1) + + # The output of `ip --brief addr` can contain metric info: + # ens5 UP metric 100 ... + ip6_cidr = None + if len(attributes) > 3: + if attributes[3] != "metric": + ip6_cidr = attributes[3] + elif len(attributes) > 5: + ip6_cidr = attributes[5] ip4 = ip4_cidr.split("/")[0] if ip4_cidr else None ip6 = ip6_cidr.split("/")[0] if ip6_cidr else None ip = ip_addr(interface, state, ip4, ip6) @@ -72,8 +90,7 @@ def _get_ip_addr(client): @pytest.mark.skipif( PLATFORM != "openstack", reason=( - f"Test was written for {PLATFORM} but can likely run on " - "other platforms." + "Test was written for openstack but can likely run on other platforms." ), ) @pytest.mark.skipif( @@ -196,8 +213,7 @@ def test_hotplug_enable_cmd_ec2(client: IntegrationInstance): @pytest.mark.skipif( PLATFORM != "openstack", reason=( - f"Test was written for {PLATFORM} but can likely run on " - "other platforms." + "Test was written for openstack but can likely run on other platforms." ), ) def test_no_hotplug_in_userdata(client: IntegrationInstance): @@ -231,90 +247,117 @@ def test_multi_nic_hotplug(setup_image, session_cloud: IntegrationCloud): """Tests that additional secondary NICs are routable from non-local networks after the hotplug hook is executed when network updates are configured on the HOTPLUG event.""" - ec2 = session_cloud.cloud_instance.client with session_cloud.launch(launch_kwargs={}, user_data=USER_DATA) as client: ips_before = _get_ip_addr(client) - instance_pub_ip = client.instance.ip - secondary_priv_ip = client.instance.add_network_interface() - response = ec2.describe_network_interfaces( - Filters=[ - { - "Name": "private-ip-address", - "Values": [secondary_priv_ip], - }, - ], + secondary_priv_ip = client.instance.add_network_interface( + ipv4_public_ip_count=1, ) - nic_id = response["NetworkInterfaces"][0]["NetworkInterfaceId"] + _wait_till_hotplug_complete(client, expected_runs=1) - # Create Elastic IP - # Refactor after https://github.com/canonical/pycloudlib/issues/337 is - # completed - allocation = ec2.allocate_address(Domain="vpc") - try: - secondary_pub_ip = allocation["PublicIp"] - association = ec2.associate_address( - AllocationId=allocation["AllocationId"], - NetworkInterfaceId=nic_id, - ) - assert association["ResponseMetadata"]["HTTPStatusCode"] == 200 - - _wait_till_hotplug_complete(client) - - log_content = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log_content) - - ips_after_add = _get_ip_addr(client) - - netplan_cfg = client.read_from_file( - "/etc/netplan/50-cloud-init.yaml" - ) - config = yaml.safe_load(netplan_cfg) - new_addition = [ - ip for ip in ips_after_add if ip.ip4 == secondary_priv_ip - ][0] - assert new_addition.interface in config["network"]["ethernets"] - new_nic_cfg = config["network"]["ethernets"][ - new_addition.interface - ] - assert "routing-policy" in new_nic_cfg - assert [{"from": secondary_priv_ip, "table": 101}] == new_nic_cfg[ - "routing-policy" - ] - - assert len(ips_after_add) == len(ips_before) + 1 - - # SSH over primary NIC works - subp("nc -w 5 -zv " + instance_pub_ip + " 22", shell=True) - - # THE TEST: SSH over secondary NIC works - subp("nc -w 5 -zv " + secondary_pub_ip + " 22", shell=True) - - # Remove new NIC - client.instance.remove_network_interface(secondary_priv_ip) - _wait_till_hotplug_complete(client, expected_runs=2) - - # SSH over primary NIC works - subp("nc -w 1 -zv " + instance_pub_ip + " 22", shell=True) - - ips_after_remove = _get_ip_addr(client) - assert len(ips_after_remove) == len(ips_before) - assert secondary_priv_ip not in [ip.ip4 for ip in ips_after_remove] - - netplan_cfg = client.read_from_file( - "/etc/netplan/50-cloud-init.yaml" - ) - config = yaml.safe_load(netplan_cfg) - assert new_addition.interface not in config["network"]["ethernets"] - - log_content = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log_content) - finally: - with contextlib.suppress(Exception): - ec2.disassociate_address( - AssociationId=association["AssociationId"] - ) - with contextlib.suppress(Exception): - ec2.release_address(AllocationId=allocation["AllocationId"]) + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) + + ips_after_add = _get_ip_addr(client) + + netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml") + config = yaml.safe_load(netplan_cfg) + new_addition = [ + ip for ip in ips_after_add if ip.ip4 == secondary_priv_ip + ][0] + assert new_addition.interface in config["network"]["ethernets"] + new_nic_cfg = config["network"]["ethernets"][new_addition.interface] + assert [{"from": secondary_priv_ip, "table": 101}] == new_nic_cfg[ + "routing-policy" + ] + + assert len(ips_after_add) == len(ips_before) + 1 + public_ips = client.instance.public_ips + assert len(public_ips) == 2 + + # SSH over all public ips works + for pub_ip in public_ips: + subp("nc -w 5 -zv " + pub_ip + " 22", shell=True) + + # Remove new NIC + client.instance.remove_network_interface(secondary_priv_ip) + _wait_till_hotplug_complete(client, expected_runs=2) + + public_ips = client.instance.public_ips + assert len(public_ips) == 1 + # SSH over primary NIC works + subp("nc -w 1 -zv " + public_ips[0] + " 22", shell=True) + + ips_after_remove = _get_ip_addr(client) + assert len(ips_after_remove) == len(ips_before) + assert secondary_priv_ip not in [ip.ip4 for ip in ips_after_remove] + + netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml") + config = yaml.safe_load(netplan_cfg) + assert new_addition.interface not in config["network"]["ethernets"] + + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) + + +@pytest.mark.skipif(CURRENT_RELEASE <= FOCAL, reason="See LP: #2055397") +@pytest.mark.skipif(PLATFORM != "ec2", reason="test is ec2 specific") +def test_multi_nic_hotplug_vpc(setup_image, session_cloud: IntegrationCloud): + """Tests that additional secondary NICs are routable from local + networks after the hotplug hook is executed when network updates + are configured on the HOTPLUG event.""" + with session_cloud.launch( + user_data=USER_DATA + ) as client, session_cloud.launch() as bastion: + ips_before = _get_ip_addr(client) + primary_priv_ip4 = ips_before[1].ip4 + primary_priv_ip6 = ips_before[1].ip6 + client.instance.add_network_interface(ipv6_address_count=1) + + _wait_till_hotplug_complete(client) + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) + + netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml") + config = yaml.safe_load(netplan_cfg) + + ips_after_add = _get_ip_addr(client) + secondary_priv_ip4 = ips_after_add[2].ip4 + secondary_priv_ip6 = ips_after_add[2].ip6 + assert primary_priv_ip4 != secondary_priv_ip4 + + new_addition = [ + ip for ip in ips_after_add if ip.ip4 == secondary_priv_ip4 + ][0] + assert new_addition.interface in config["network"]["ethernets"] + new_nic_cfg = config["network"]["ethernets"][new_addition.interface] + assert "routing-policy" in new_nic_cfg + assert [ + {"from": secondary_priv_ip4, "table": 101}, + {"from": secondary_priv_ip6, "table": 101}, + ] == new_nic_cfg["routing-policy"] + + assert len(ips_after_add) == len(ips_before) + 1 + + # pings to primary and secondary NICs work + r = bastion.execute(f"ping -c1 {primary_priv_ip4}") + assert r.ok, r.stdout + r = bastion.execute(f"ping -c1 {secondary_priv_ip4}") + assert r.ok, r.stdout + r = bastion.execute(f"ping -c1 {primary_priv_ip6}") + assert r.ok, r.stdout + r = bastion.execute(f"ping -c1 {secondary_priv_ip6}") + assert r.ok, r.stdout + + # Remove new NIC + client.instance.remove_network_interface(secondary_priv_ip4) + _wait_till_hotplug_complete(client, expected_runs=2) + + # ping to primary NIC works + assert bastion.execute(f"ping -c1 {primary_priv_ip4}").ok + assert bastion.execute(f"ping -c1 {primary_priv_ip6}").ok + + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) @pytest.mark.skipif(PLATFORM != "ec2", reason="test is ec2 specific") @@ -343,3 +386,156 @@ def test_no_hotplug_triggered_by_docker(client: IntegrationInstance): assert "enabled" == client.execute( "cloud-init devel hotplug-hook -s net query" ) + + +def wait_for_cmd( + client: IntegrationInstance, cmd: str, return_code: int +) -> None: + for _ in range(60): + try: + res = client.execute(cmd) + except paramiko.ssh_exception.SSHException: + pass + else: + if res.return_code == return_code: + return + time.sleep(1) + assert False, f"`{cmd}` never exited with {return_code}" + + +def assert_systemctl_status_code( + client: IntegrationInstance, service: str, return_code: int +): + result = client.execute(f"systemctl status {service}") + assert result.return_code == return_code, ( + f"status of {service} expected to be {return_code} but was" + f" {result.return_code}\nstdout: {result.stdout}\n" + f"stderr {result.stderr}" + ) + + +BLOCK_CLOUD_CONFIG = """\ +[Unit] +Description=Block cloud-config.service +After=cloud-config.target +Before=cloud-config.service + +DefaultDependencies=no +Before=shutdown.target +Conflicts=shutdown.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/sleep 360 +TimeoutSec=0 + +# Output needs to appear in instance console output +StandardOutput=journal+console + +[Install] +WantedBy=cloud-config.service +""" # noqa: E501 + + +BLOCK_CLOUD_FINAL = """\ +[Unit] +Description=Block cloud-final.service +After=cloud-config.target +Before=cloud-final.service + +DefaultDependencies=no +Before=shutdown.target +Conflicts=shutdown.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/sleep 360 +TimeoutSec=0 + +# Output needs to appear in instance console output +StandardOutput=journal+console + +[Install] +WantedBy=cloud-final.service +""" # noqa: E501 + + +def _customize_environment(client: IntegrationInstance): + push_and_enable_systemd_unit( + client, "block-cloud-config.service", BLOCK_CLOUD_CONFIG + ) + push_and_enable_systemd_unit( + client, "block-cloud-final.service", BLOCK_CLOUD_FINAL + ) + + # Disable pam_nologin for 1000(ubuntu) user to allow ssh access early + # during boot. Without this we get: + # + # System is booting up. Unprivileged users are not permitted to log in yet. + # Please come back later. For technical details, see pam_nologin(8). + # + # sshd[xxx]: fatal: Access denied for user ubuntu by PAM account + # configuration [preauth] + # + # See: pam(7), pam_nologin(8), pam_succeed_id(8) + contents = client.read_from_file("/etc/pam.d/sshd") + contents = ( + "account [success=1 default=ignore] pam_succeed_if.so quiet uid eq" + " 1000\n\n" + contents + ) + client.write_to_file("/etc/pam.d/sshd", contents) + + client.instance.shutdown(wait=True) + client.instance.start(wait=False) + + +@pytest.mark.skipif( + PLATFORM != "ec2", + reason="test is ec2 specific but should work on other platforms with the" + " ability to add_network_interface", +) +@pytest.mark.user_data(USER_DATA) +def test_nics_before_config_trigger_hotplug(client: IntegrationInstance): + """ + Test that NICs added/removed after the Network boot stage but before + the rest boot stages do trigger cloud-init-hotplugd. + + Note: Do not test first boot, as cc_install_hotplug runs at + config-final.service time. + """ + _customize_environment(client) + + # wait until we are between cloud-config.target done and + # cloud-config.service + wait_for_cmd(client, "systemctl status cloud-config.target", 0) + wait_for_cmd(client, "systemctl status block-cloud-config.service", 3) + + # socket active but service not + assert_systemctl_status_code(client, "cloud-init-hotplugd.socket", 0) + assert_systemctl_status_code(client, "cloud-init-hotplugd.service", 3) + + assert_systemctl_status_code(client, "cloud-config.service", 3) + assert_systemctl_status_code(client, "cloud-final.service", 3) + + added_ip_0 = client.instance.add_network_interface() + + assert_systemctl_status_code(client, "cloud-config.service", 3) + assert_systemctl_status_code(client, "cloud-final.service", 3) + + # unblock cloud-config.service + assert client.execute("systemctl stop block-cloud-config.service").ok + wait_for_cmd(client, "systemctl status cloud-config.service", 0) + wait_for_cmd(client, "systemctl status block-cloud-final.service", 3) + assert_systemctl_status_code(client, "cloud-final.service", 3) + + # hotplug didn't run before cloud-final.service + _wait_till_hotplug_complete(client, expected_runs=0) + + # unblock cloud-final.service + assert client.execute("systemctl stop block-cloud-final.service").ok + + wait_for_cloud_init(client) + _wait_till_hotplug_complete(client, expected_runs=1) + + client.instance.remove_network_interface(added_ip_0) + _wait_till_hotplug_complete(client, expected_runs=2) diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py index 98e6ef781..b4c2d3dd1 100644 --- a/tests/integration_tests/modules/test_package_update_upgrade_install.py +++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py @@ -88,6 +88,8 @@ def test_snap_packages_are_installed(self, class_client): HELLO_VERSIONS_BY_RELEASE = { + "oracular": "2.10-3build2", + "noble": "2.10-3build1", "mantic": "2.10-3", "lunar": "2.10-3", "jammy": "2.10-2ubuntu4", @@ -104,7 +106,7 @@ def test_snap_packages_are_installed(self, class_client): @pytest.mark.skipif(not IS_UBUNTU, reason="Uses Apt") def test_versioned_packages_are_installed(session_cloud: IntegrationCloud): pkg_version = HELLO_VERSIONS_BY_RELEASE.get( - CURRENT_RELEASE.series, "2.10-3" + CURRENT_RELEASE.series, "2.10-3build1" ) with session_cloud.launch( user_data=VERSIONED_USER_DATA.format(pkg_version=pkg_version) diff --git a/tests/integration_tests/modules/test_ubuntu_pro.py b/tests/integration_tests/modules/test_ubuntu_pro.py index c26ea699c..f44381634 100644 --- a/tests/integration_tests/modules/test_ubuntu_pro.py +++ b/tests/integration_tests/modules/test_ubuntu_pro.py @@ -5,6 +5,7 @@ import pytest from pycloudlib.cloud import ImageType +from cloudinit.util import should_log_deprecation from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.conftest import get_validated_source from tests.integration_tests.instances import ( @@ -19,7 +20,10 @@ IS_UBUNTU, JAMMY, ) -from tests.integration_tests.util import verify_clean_log +from tests.integration_tests.util import ( + get_feature_flag_value, + verify_clean_log, +) LOG = logging.getLogger("integration_testing.test_ubuntu_pro") @@ -43,14 +47,6 @@ PRO_AUTO_ATTACH_DISABLED = """\ #cloud-config -# ubuntu_advantage config kept as duplication until the release of this -# commit in proclient (ubuntu-advantage-tools v. 32): -# https://github.com/canonical/ubuntu-pro-client/commit/7bb69e3ad -# Without a top-level ubuntu_advantage key Pro will automatically attach -# instead of defer to cloud-init for all attach operations. -ubuntu_advantage: - features: - disable_auto_attach: true ubuntu_pro: features: disable_auto_attach: true @@ -59,10 +55,6 @@ PRO_DAEMON_DISABLED = """\ #cloud-config # Disable Pro daemon (only needed in GCE) -# Drop ubuntu_advantage key once ubuntu-advantage-tools v. 32 is SRU'd -ubuntu_advantage: - features: - disable_auto_attach: true ubuntu_pro: features: disable_auto_attach: true @@ -72,21 +64,18 @@ AUTO_ATTACH_CUSTOM_SERVICES = """\ #cloud-config -# Drop ubuntu_advantage key once ubuntu-advantage-tools v. 32 is SRU'd -ubuntu_advantage: - enable: - - esm-infra ubuntu_pro: enable: - esm-infra """ -def did_ua_service_noop(client: IntegrationInstance) -> bool: - ua_log = client.read_from_file("/var/log/ubuntu-advantage.log") - return ( +def assert_ua_service_noop(client: IntegrationInstance): + status_resp = client.execute("systemctl status ua-auto-attach.service") + assert status_resp.return_code == 3 # Due to being skipped + assert ( "Skipping auto-attach and deferring to cloud-init to setup and" - " configure auto-attach" in ua_log + " configure auto-attach" in status_resp.stdout ) @@ -150,12 +139,18 @@ def test_valid_token(self, client: IntegrationInstance): "sed -i 's/ubuntu_pro$/ubuntu_advantage/' /etc/cloud/cloud.cfg" ) client.restart() - status_resp = client.execute("cloud-init status --format json") - status = json.loads(status_resp.stdout) - assert ( - "Module has been renamed from cc_ubuntu_advantage to cc_ubuntu_pro" - in "\n".join(status["recoverable_errors"]["DEPRECATED"]) + version_boundary = get_feature_flag_value( + client, "DEPRECATION_INFO_BOUNDARY" ) + # ubuntu_advantage key is deprecated in version 24.1 + if should_log_deprecation("24.1", version_boundary): + log_level = "DEPRECATED" + else: + log_level = "INFO" + client.execute( + rf"grep \"{log_level}]: Module has been renamed from" + " cc_ubuntu_advantage to cc_ubuntu_pro /var/log/cloud-init.log" + ).ok assert is_attached(client) @pytest.mark.user_data(ATTACH.format(token=CLOUD_INIT_UA_TOKEN)) @@ -212,16 +207,8 @@ def maybe_install_cloud_init(session_cloud: IntegrationCloud): user_data=user_data, launch_kwargs=launch_kwargs, ) as client: - # TODO: Re-enable this check after cloud images contain - # cloud-init 23.4. - # Explanation: We have to include something under - # user-data.ubuntu_pro to skip the automatic auto-attach - # (driven by ua-auto-attach.service and/or ubuntu-advantage.service) - # while customizing the instance but in cloud-init < 23.4, - # user-data.ubuntu_pro requires a token key. - - # log = client.read_from_file("/var/log/cloud-init.log") - # verify_clean_log(log) + log = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) assert not is_attached( client @@ -260,7 +247,7 @@ def test_custom_services(self, session_cloud: IntegrationCloud): ) as client: log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) - assert did_ua_service_noop(client) + assert_ua_service_noop(client) assert is_attached(client) services_status = get_services_status(client) assert services_status.pop( diff --git a/tests/integration_tests/releases.py b/tests/integration_tests/releases.py index 43724bb5e..9ff9e1e2d 100644 --- a/tests/integration_tests/releases.py +++ b/tests/integration_tests/releases.py @@ -97,8 +97,9 @@ def from_os_image( LUNAR = Release("ubuntu", "lunar", "23.04") MANTIC = Release("ubuntu", "mantic", "23.10") NOBLE = Release("ubuntu", "noble", "24.04") +ORACULAR = Release("ubuntu", "oracular", "24.10") -UBUNTU_STABLE = (FOCAL, JAMMY, MANTIC) +UBUNTU_STABLE = (FOCAL, JAMMY, MANTIC, NOBLE) CURRENT_RELEASE = Release.from_os_image() IS_UBUNTU = CURRENT_RELEASE.os == "ubuntu" diff --git a/tests/integration_tests/test_ds_identify.py b/tests/integration_tests/test_ds_identify.py new file mode 100644 index 000000000..59d3edd77 --- /dev/null +++ b/tests/integration_tests/test_ds_identify.py @@ -0,0 +1,34 @@ +"""test that ds-identify works as expected""" + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.util import verify_clean_log, wait_for_cloud_init + +DATASOURCE_LIST_FILE = "/etc/cloud/cloud.cfg.d/90_dpkg.cfg" +MAP_PLATFORM_TO_DATASOURCE = { + "lxd_container": "lxd", + "lxd_vm": "lxd", + "qemu": "nocloud", + "ec2": "aws", + "oci": "oracle", +} + + +def test_ds_identify(client: IntegrationInstance): + """Verify that ds-identify works correctly + + Deb packaging often a defines datasource_list with a single datasource, + which bypasses ds-identify logic. This tests works by removing this file + and verifying that cloud-init doesn't experience issues. + """ + assert client.execute(f"rm {DATASOURCE_LIST_FILE}") + assert client.execute("cloud-init clean --logs") + client.restart() + wait_for_cloud_init(client) + verify_clean_log(client.execute("cat /var/log/cloud-init.log")) + assert client.execute("cloud-init status --wait") + + datasource = MAP_PLATFORM_TO_DATASOURCE.get(PLATFORM, PLATFORM) + cloud_id = client.execute("cloud-id") + assert cloud_id.ok + assert datasource == cloud_id.stdout.rstrip() diff --git a/tests/integration_tests/test_kernel_commandline_match.py b/tests/integration_tests/test_kernel_command_line_match.py similarity index 64% rename from tests/integration_tests/test_kernel_commandline_match.py rename to tests/integration_tests/test_kernel_command_line_match.py index 0d90ab130..60bda9072 100644 --- a/tests/integration_tests/test_kernel_commandline_match.py +++ b/tests/integration_tests/test_kernel_command_line_match.py @@ -6,64 +6,15 @@ from tests.integration_tests.conftest import get_validated_source from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM -from tests.integration_tests.util import lxd_has_nocloud, wait_for_cloud_init +from tests.integration_tests.util import ( + lxd_has_nocloud, + override_kernel_command_line, + wait_for_cloud_init, +) log = logging.getLogger("integration_testing") -def restart_cloud_init(c): - client = c - client.instance.shutdown(wait=False) - try: - client.instance.wait_for_state("STOPPED", num_retries=20) - except RuntimeError as e: - log.warning( - "Retrying shutdown due to timeout on initial shutdown request %s", - str(e), - ) - client.instance.shutdown() - - client.instance.execute_via_ssh = False - client.instance.start() - client.execute("cloud-init status --wait") - - -def override_kernel_cmdline(ds_str: str, c: IntegrationInstance): - """ - Configure grub's kernel command line to tell cloud-init to use OpenStack - - even though LXD should naturally be detected. - - This runs on LXD, but forces cloud-init to attempt to run OpenStack. - This will inevitably fail on LXD, but we only care that it tried - on - Ironic, for example, it will succeed. - """ - client = c - - # The final output in /etc/default/grub should be: - # - # GRUB_CMDLINE_LINUX="'ds=nocloud;s=http://my-url/'" - # - # That ensures that the kernel commandline passed into - # /boot/efi/EFI/ubuntu/grub.cfg will be properly single-quoted - # - # Example: - # - # linux /boot/vmlinuz-5.15.0-1030-kvm ro 'ds=nocloud;s=http://my-url/' - # - # Not doing this will result in a semicolon-delimited ds argument - # terminating the kernel arguments prematurely. - client.execute('printf "GRUB_CMDLINE_LINUX=\\"" >> /etc/default/grub') - client.execute('printf "\'" >> /etc/default/grub') - client.execute(f"printf '{ds_str}' >> /etc/default/grub") - client.execute('printf "\'\\"" >> /etc/default/grub') - - # We should probably include non-systemd distros at some point. This should - # most likely be as simple as updating the output path for grub-mkconfig - client.execute("grub-mkconfig -o /boot/efi/EFI/ubuntu/grub.cfg") - client.execute("cloud-init clean --logs") - restart_cloud_init(client) - - @pytest.mark.skipif(PLATFORM != "lxd_vm", reason="Modifies grub config") @pytest.mark.lxd_use_exec @pytest.mark.parametrize( @@ -81,25 +32,25 @@ def override_kernel_cmdline(ds_str: str, c: IntegrationInstance): def test_lxd_datasource_kernel_override( ds_str, configured, cmdline_configured, client: IntegrationInstance ): - """This test is twofold: it tests kernel commandline override, which also + """This test is twofold: it tests kernel command line override, which also validates OpenStack Ironic requirements. OpenStack Ironic does not advertise itself to cloud-init via any of the conventional methods: DMI, etc. - On systemd, ds-identify is able to grok kernel commandline, however to + On systemd, ds-identify is able to grok kernel command line, however to support cloud-init kernel command line parsing on non-systemd, parsing - kernel commandline in Python code is required. + kernel command line in Python code is required. """ if configured == "lxd_or_nocloud": configured = ( "DataSourceNoCloud" if lxd_has_nocloud(client) else "DataSourceLXD" ) - override_kernel_cmdline(ds_str, client) + override_kernel_command_line(ds_str, client) if cmdline_configured: assert ( - "Machine is configured by the kernel commandline to run on single " - f"datasource {configured}" + "Machine is configured by the kernel command line to run on single" + f" datasource {configured}" ) in client.execute("cat /var/log/cloud-init.log") else: # verify that no plat @@ -107,7 +58,7 @@ def test_lxd_datasource_kernel_override( assert (f"Detected platform: {configured}") in log assert ( "Machine is configured by the kernel " - "commandline to run on single " + "command line to run on single " ) not in log @@ -147,7 +98,7 @@ def test_lxd_datasource_kernel_override_nocloud_net( assert wait_for_cloud_init(client, num_retries=60).ok if source.installs_new_version(): client.install_new_cloud_init(source, clean=False) - override_kernel_cmdline(ds_str, client) + override_kernel_command_line(ds_str, client) logs = client.execute("cat /var/log/cloud-init.log") assert ( @@ -164,9 +115,9 @@ def test_lxd_datasource_kernel_override_nocloud_net( @pytest.mark.skipif(PLATFORM != "lxd_vm", reason="Modifies grub config") @pytest.mark.lxd_use_exec def test_lxd_disable_cloud_init_cmdline(client: IntegrationInstance): - """Verify cloud-init disablement via kernel commandline works.""" + """Verify cloud-init disablement via kernel command line works.""" - override_kernel_cmdline("cloud-init=disabled", client) + override_kernel_command_line("cloud-init=disabled", client) assert "Active: inactive (dead)" in client.execute( "systemctl status cloud-init" ) @@ -178,7 +129,7 @@ def test_lxd_disable_cloud_init_file(client: IntegrationInstance): client.execute("touch /etc/cloud/cloud-init.disabled") client.execute("cloud-init --clean") - restart_cloud_init(client) + client.restart() assert "Active: inactive (dead)" in client.execute( "systemctl status cloud-init" ) @@ -192,7 +143,7 @@ def test_lxd_disable_cloud_init_env(client: IntegrationInstance): client.execute(f'echo "{env}" >> /etc/systemd/system.conf') client.execute("cloud-init --clean") - restart_cloud_init(client) + client.restart() assert "Active: inactive (dead)" in client.execute( "systemctl status cloud-init" ) diff --git a/tests/integration_tests/test_networking.py b/tests/integration_tests/test_networking.py index 6c133c331..bf2389572 100644 --- a/tests/integration_tests/test_networking.py +++ b/tests/integration_tests/test_networking.py @@ -1,4 +1,5 @@ """Networking-related tests.""" + import contextlib import json @@ -66,6 +67,13 @@ def test_skip(self, client: IntegrationInstance): client.execute( "mv /var/log/cloud-init.log /var/log/cloud-init.log.bak" ) + if CURRENT_RELEASE < MANTIC: + assert ( + "No netplan python module. Fallback to write" + " /etc/netplan/50-cloud-init.yaml" in log + ) + else: + assert "Rendered netplan config using netplan python API" in log netplan = yaml.safe_load( client.execute("cat /etc/netplan/50-cloud-init.yaml") ) @@ -191,7 +199,10 @@ def test_netplan_rendering( } with session_cloud.launch(launch_kwargs=launch_kwargs) as client: result = client.execute("cat /etc/netplan/50-cloud-init.yaml") - assert result.stdout.startswith(EXPECTED_NETPLAN_HEADER) + if CURRENT_RELEASE < MANTIC: + assert result.stdout.startswith(EXPECTED_NETPLAN_HEADER) + else: + assert EXPECTED_NETPLAN_HEADER not in result.stdout assert expected == yaml.safe_load(result.stdout) @@ -284,16 +295,16 @@ def test_invalid_network_v2_netplan( # Netplan python API only available on MANTIC and later if CURRENT_RELEASE < MANTIC: assert ( - "Skipping netplan schema validation. No netplan available" + "Skipping netplan schema validation. No netplan API available" ) in client.read_from_file("/var/log/cloud-init.log") assert ( - "Skipping network-config schema validation. No network schema" - " for version: 2" + "Skipping network-config schema validation for version: 2." + " No netplan API available." ) in client.execute("cloud-init schema --system") else: assert ( - "Invalid network-config provided: Please run " - "'sudo cloud-init schema --system' to see the schema errors." + "network-config failed schema validation! You may run " + "'sudo cloud-init schema --system' to check the details." ) in client.execute("cloud-init status --format=json") assert ( "Invalid network-config /var/lib/cloud/instances/" @@ -310,77 +321,28 @@ def test_ec2_multi_nic_reboot(setup_image, session_cloud: IntegrationCloud): """Tests that additional secondary NICs and secondary IPs on them are routable from non-local networks after a reboot event when network updates are configured on every boot.""" - ec2 = session_cloud.cloud_instance.client with session_cloud.launch(launch_kwargs={}, user_data=USER_DATA) as client: - # Add secondary NIC - secondary_priv_ip_0 = client.instance.add_network_interface() - response = ec2.describe_network_interfaces( - Filters=[ - { - "Name": "private-ip-address", - "Values": [secondary_priv_ip_0], - }, - ], - ) - nic_id = response["NetworkInterfaces"][0]["NetworkInterfaceId"] - # Add secondary IP to secondary NIC - association_0 = ec2.assign_private_ip_addresses( - NetworkInterfaceId=nic_id, SecondaryPrivateIpAddressCount=1 + # Add secondary NIC with two private and public ips + client.instance.add_network_interface( + ipv4_address_count=2, ipv4_public_ip_count=2 ) - assert association_0["ResponseMetadata"]["HTTPStatusCode"] == 200 - secondary_priv_ip_1 = association_0["AssignedPrivateIpAddresses"][0][ - "PrivateIpAddress" - ] - - # Assing elastic IPs - # Refactor after https://github.com/canonical/pycloudlib/issues/337 is - # completed - allocation_0 = ec2.allocate_address(Domain="vpc") - allocation_1 = ec2.allocate_address(Domain="vpc") - try: - secondary_pub_ip_0 = allocation_0["PublicIp"] - secondary_pub_ip_1 = allocation_1["PublicIp"] - - association_0 = ec2.associate_address( - AllocationId=allocation_0["AllocationId"], - NetworkInterfaceId=nic_id, - PrivateIpAddress=secondary_priv_ip_0, - ) - assert association_0["ResponseMetadata"]["HTTPStatusCode"] == 200 - association_1 = ec2.associate_address( - AllocationId=allocation_1["AllocationId"], - NetworkInterfaceId=nic_id, - PrivateIpAddress=secondary_priv_ip_1, - ) - assert association_1["ResponseMetadata"]["HTTPStatusCode"] == 200 - # Reboot to update network config - client.execute("cloud-init clean --logs") - client.restart() + public_ips = client.instance.public_ips + assert len(public_ips) == 3, ( + "Expected 3 public ips, one from the primary nic and 2 from the" + " secondary one" + ) - log_content = client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log_content) + # Reboot to update network config + client.execute("cloud-init clean --logs") + client.restart() - # SSH over primary NIC works - instance_pub_ip = client.instance.ip - subp("nc -w 5 -zv " + instance_pub_ip + " 22", shell=True) + log_content = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log_content) - # SSH over secondary NIC works - subp("nc -w 5 -zv " + secondary_pub_ip_0 + " 22", shell=True) - subp("nc -w 5 -zv " + secondary_pub_ip_1 + " 22", shell=True) - finally: - with contextlib.suppress(Exception): - ec2.disassociate_address( - AssociationId=association_0["AssociationId"] - ) - with contextlib.suppress(Exception): - ec2.release_address(AllocationId=allocation_0["AllocationId"]) - with contextlib.suppress(Exception): - ec2.disassociate_address( - AssociationId=association_1["AssociationId"] - ) - with contextlib.suppress(Exception): - ec2.release_address(AllocationId=allocation_1["AllocationId"]) + # SSH over primary and secondary NIC works + for ip in public_ips: + subp("nc -w 5 -zv " + ip + " 22", shell=True) @pytest.mark.adhoc # costly instance not available in all regions / azs diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index e0d5b3219..970a2406d 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -12,7 +12,7 @@ CURRENT_RELEASE, FOCAL, IS_UBUNTU, - NOBLE, + MANTIC, ) from tests.integration_tests.util import verify_clean_log @@ -62,7 +62,6 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): source = get_validated_source(session_cloud) if not source.installs_new_version(): pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) - return # type checking doesn't understand that skip raises launch_kwargs = { "image_id": session_cloud.initial_image_id, } @@ -143,18 +142,15 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): assert post_json["v1"]["datasource"].startswith( "DataSourceAzure" ) - if PLATFORM in ["gce", "qemu"] and CURRENT_RELEASE < NOBLE: - # GCE regenerates network config per boot AND - # GCE uses fallback config AND - # #4474 changed fallback configuration. - # Once the baseline includes #4474, this can be removed - pre_network = yaml.load(pre_network, Loader=yaml.Loader) - post_network = yaml.load(post_network, Loader=yaml.Loader) - for values in post_network["network"]["ethernets"].values(): - values.pop("dhcp6") - assert yaml.dump(pre_network) == yaml.dump(post_network) - else: + if CURRENT_RELEASE < MANTIC: + # Assert the full content is preserved including header comment + # since cloud-init writes the file directly and does not use + # netplan API to write 50-cloud-init.yaml. assert pre_network == post_network + else: + # Mantic and later Netplan API is used and doesn't allow + # cloud-init to write header comments in network config + assert yaml.safe_load(pre_network) == yaml.safe_load(post_network) # Calculate and log all the boot numbers pre_analyze_totals = [ @@ -197,7 +193,6 @@ def test_subsequent_boot_of_upgraded_package(session_cloud: IntegrationCloud): pytest.fail(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) else: pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) - return # type checking doesn't understand that skip raises launch_kwargs = {"image_id": session_cloud.initial_image_id} diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 8ee3631d0..d218861f5 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -1,3 +1,4 @@ +import json import logging import multiprocessing import os @@ -8,12 +9,14 @@ from functools import lru_cache from itertools import chain from pathlib import Path -from typing import TYPE_CHECKING, Set +from typing import TYPE_CHECKING, List, Optional, Set, Union import pytest from cloudinit.subp import subp +LOG = logging.getLogger("integration_testing.util") + if TYPE_CHECKING: # instances.py has imports util.py, so avoid circular import from tests.integration_tests.instances import IntegrationInstance @@ -42,6 +45,130 @@ def verify_ordered_items_in_text(to_verify: list, text: str): index = matched.start() +def _format_found(header: str, items: list) -> str: + """Helper function to format assertion message""" + + # do nothing, allows this formatter to be "stackable" + if not items: + return "" + + # if only one error put the header and the error message on a single line + if 1 == len(items): + return f"\n{header}: {items.pop(0)}" + + # otherwise make a list after header + else: + return f"\n{header}:\n\t- " + "\n\t- ".join(items) + + +def verify_clean_boot( + instance: "IntegrationInstance", + ignore_warnings: Optional[Union[List[str], bool]] = None, + ignore_errors: Optional[Union[List[str], bool]] = None, + require_warnings: Optional[list] = None, + require_errors: Optional[list] = None, +): + """raise assertions if the client experienced unexpected warnings or errors + + fail when an required error isn't found + + This function is similar to verify_clean_log, hence the similar name. + + differences from verify_clean_log: + + - more expressive syntax + - extensible (can be easily extended for other log levels) + - less resource intensive (no log copying required) + - nice error formatting + + instance: test instance + ignored_warnings: list of expected warnings to ignore, + or true to ignore all + ignored_errors: list of expected errors to ignore, or true to ignore all + require_warnings: Optional[list] = None, + require_errors: Optional[list] = None, + fail_when_expected_not_found: optional list of expected errors + """ + ignore_errors = ignore_errors or [] + ignore_warnings = ignore_warnings or [] + require_errors = require_errors or [] + require_warnings = require_warnings or [] + status = json.loads(instance.execute("cloud-init status --format=json")) + + unexpected_errors = set() + unexpected_warnings = set() + + required_warnings_found = set() + required_errors_found = set() + + for current_error in status["errors"]: + + # check for required errors + for expected in require_errors: + if expected in current_error: + required_errors_found.add(expected) + + # check for unexpected errors + if ignore_errors is True: + continue + for expected in [*ignore_errors, *require_errors]: + if expected in current_error: + break + else: + unexpected_errors.add(current_error) + + # check for unexpected warnings + for current_warning in status["recoverable_errors"].get("WARNING", []): + + # check for required warnings + for expected in require_warnings: + if expected in current_warning: + required_warnings_found.add(expected) + + # check for unexpected warnings + if ignore_warnings is True: + continue + for expected in [*ignore_warnings, *require_warnings]: + if expected in current_warning: + break + else: + unexpected_warnings.add(current_warning) + + required_errors_not_found = set(require_errors) - required_errors_found + required_warnings_not_found = ( + set(require_warnings) - required_warnings_found + ) + + errors = [ + *unexpected_errors, + *required_errors_not_found, + *unexpected_warnings, + *required_warnings_not_found, + ] + if errors: + message = "" + # if there is only one message, don't include the generic header + # so that the user can read the exact message in the pytest summary + if len(errors) > 1: + # more than one error, so include a generic message + message += "Unexpected warnings or errors found" + + # errors are probably more important, order them first + message += _format_found( + "Found unexpected errors", list(unexpected_errors) + ) + message += _format_found( + "Required errors not found", list(required_errors_not_found) + ) + message += _format_found( + "Found unexpected warnings", list(unexpected_warnings) + ) + message += _format_found( + "Required warnings not found", list(required_warnings_not_found) + ) + assert not errors, message + + def verify_clean_log(log: str, ignore_deprecations: bool = True): """Assert no unexpected tracebacks or warnings in logs""" if ignore_deprecations: @@ -57,6 +184,10 @@ def verify_clean_log(log: str, ignore_deprecations: bool = True): raise AssertionError( "Found unexpected errors: %s" % "\n".join(error_logs) ) + if re.findall("Cloud-init.*received SIG", log): + raise AssertionError( + "Found unexpected signal termination: %s" % "\n".join(error_logs) + ) warning_count = log.count("[WARNING]") expected_warnings = 0 @@ -70,8 +201,6 @@ def verify_clean_log(log: str, ignore_deprecations: bool = True): # Ubuntu lxd storage "thinpool by default on Ubuntu due to LP #1982780", "WARNING]: Could not match supplied host pattern, ignoring:", - # https://bugs.launchpad.net/ubuntu/+source/netplan.io/+bug/2041727 - "Cannot call Open vSwitch: ovsdb-server.service is not running.", ] traceback_texts = [] if "install canonical-livepatch" in log: @@ -82,9 +211,6 @@ def verify_clean_log(log: str, ignore_deprecations: bool = True): ) if "found network data from DataSourceNone" in log: warning_texts.append("Used fallback datasource") - warning_texts.append( - "Falling back to a hard restart of systemd-networkd.service" - ) if "oracle" in log: # LP: #1842752 lease_exists_text = "Stderr: RTNETLINK answers: File exists" @@ -178,16 +304,14 @@ def wait_for_cloud_init(client: "IntegrationInstance", num_retries: int = 30): for _ in range(num_retries): try: result = client.execute("cloud-init status") - if ( - result - and result.ok - and ("running" not in result or "not started" not in result) + if result.return_code in (0, 2) and ( + "running" not in result or "not started" not in result ): return result except Exception as e: last_exception = e time.sleep(1) - raise Exception( + raise Exception( # pylint: disable=W0719 "cloud-init status did not return successfully." ) from last_exception @@ -197,6 +321,8 @@ def get_console_log(client: "IntegrationInstance"): console_log = client.instance.console_log() except NotImplementedError: pytest.skip("NotImplementedError when requesting console log") + if console_log is None: + pytest.skip("Console log has not been setup") if console_log.lower().startswith("no console output"): pytest.fail("no console output") return console_log @@ -219,3 +345,51 @@ def get_feature_flag_value(client: "IntegrationInstance", key): if "NameError" in value: raise NameError(f"name '{key}' is not defined") return value + + +def override_kernel_command_line(ds_str: str, instance: "IntegrationInstance"): + """set the kernel command line and reboot, return after boot done + + This will not work with containers. This is only tested with lxd vms + but in theory should work on any virtual machine using grub. + + ds_str: the string that will be inserted into /proc/cmdline + instance: instance to set kernel command line for + """ + + # The final output in /etc/default/grub should be: + # + # GRUB_CMDLINE_LINUX="'ds=nocloud;s=http://my-url/'" + # + # That ensures that the kernel command line passed into + # /boot/efi/EFI/ubuntu/grub.cfg will be properly single-quoted + # + # Example: + # + # linux /boot/vmlinuz-5.15.0-1030-kvm ro 'ds=nocloud;s=http://my-url/' + # + # Not doing this will result in a semicolon-delimited ds argument + # terminating the kernel arguments prematurely. + assert instance.execute( + 'printf "GRUB_CMDLINE_LINUX=\\"" >> /etc/default/grub' + ).ok + assert instance.execute('printf "\'" >> /etc/default/grub').ok + assert instance.execute(f"printf '{ds_str}' >> /etc/default/grub").ok + assert instance.execute('printf "\'\\"" >> /etc/default/grub').ok + + # We should probably include non-systemd distros at some point. This should + # most likely be as simple as updating the output path for grub-mkconfig + assert instance.execute( + "grub-mkconfig -o /boot/efi/EFI/ubuntu/grub.cfg" + ).ok + assert instance.execute("cloud-init clean --logs").ok + instance.restart() + + +def push_and_enable_systemd_unit( + client: "IntegrationInstance", unit_name: str, content: str +) -> None: + service_filename = f"/etc/systemd/system/{unit_name}" + client.write_to_file(service_filename, content) + client.execute(f"chmod 0644 {service_filename}", use_sudo=True) + client.execute(f"systemctl enable {unit_name}", use_sudo=True) diff --git a/tests/unittests/analyze/__init__.py b/tests/unittests/analyze/__init__.py new file mode 100644 index 000000000..da6365a59 --- /dev/null +++ b/tests/unittests/analyze/__init__.py @@ -0,0 +1 @@ +# This file is part of cloud-init. See LICENSE file for license information. diff --git a/tests/unittests/analyze/test_dump.py b/tests/unittests/analyze/test_dump.py index 3fb0046aa..18088c47a 100644 --- a/tests/unittests/analyze/test_dump.py +++ b/tests/unittests/analyze/test_dump.py @@ -1,18 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. -from datetime import datetime +from contextlib import suppress +from datetime import datetime, timezone from textwrap import dedent import pytest from cloudinit.analyze.dump import ( dump_events, + has_gnu_date, parse_ci_logline, parse_timestamp, ) -from cloudinit.subp import which -from cloudinit.util import is_Linux, write_file -from tests.unittests.helpers import mock, skipIf +from cloudinit.util import write_file +from tests.unittests.helpers import mock class TestParseTimestamp: @@ -20,8 +21,10 @@ def test_parse_timestamp_handles_cloud_init_default_format(self): """Logs with cloud-init detailed formats will be properly parsed.""" trusty_fmt = "%Y-%m-%d %H:%M:%S,%f" trusty_stamp = "2016-09-12 14:39:20,839" - dt = datetime.strptime(trusty_stamp, trusty_fmt) - assert float(dt.strftime("%s.%f")) == parse_timestamp(trusty_stamp) + dt = datetime.strptime(trusty_stamp, trusty_fmt).replace( + tzinfo=timezone.utc + ) + assert dt.timestamp() == parse_timestamp(trusty_stamp) def test_parse_timestamp_handles_syslog_adding_year(self): """Syslog timestamps lack a year. Add year and properly parse.""" @@ -30,8 +33,10 @@ def test_parse_timestamp_handles_syslog_adding_year(self): # convert stamp ourselves by adding the missing year value year = datetime.now().year - dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt) - assert float(dt.strftime("%s.%f")) == parse_timestamp(syslog_stamp) + dt = datetime.strptime( + syslog_stamp + " " + str(year), syslog_fmt + ).replace(tzinfo=timezone.utc) + assert dt.timestamp() == parse_timestamp(syslog_stamp) def test_parse_timestamp_handles_journalctl_format_adding_year(self): """Journalctl precise timestamps lack a year. Add year and parse.""" @@ -40,14 +45,11 @@ def test_parse_timestamp_handles_journalctl_format_adding_year(self): # convert stamp ourselves by adding the missing year value year = datetime.now().year - dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt) - assert float(dt.strftime("%s.%f")) == parse_timestamp(journal_stamp) - - @skipIf(not which("date"), "'date' command not available.") - @skipIf( - not is_Linux() and not which("gdate"), - "'GNU date' command not available.", - ) + dt = datetime.strptime( + journal_stamp + " " + str(year), journal_fmt + ).replace(tzinfo=timezone.utc) + assert dt.timestamp() == parse_timestamp(journal_stamp) + @pytest.mark.allow_subp_for("date", "gdate") def test_parse_unexpected_timestamp_format_with_date_command(self): """Dump sends unexpected timestamp formats to date for processing.""" @@ -55,9 +57,59 @@ def test_parse_unexpected_timestamp_format_with_date_command(self): new_stamp = "17:15 08/08" # convert stamp ourselves by adding the missing year value year = datetime.now().year - dt = datetime.strptime(new_stamp + " " + str(year), new_fmt) + dt = datetime.strptime(new_stamp + " " + str(year), new_fmt).replace( + tzinfo=timezone.utc + ) - assert float(dt.strftime("%s.%f")) == parse_timestamp(new_stamp) + if has_gnu_date(): + assert dt.timestamp() == parse_timestamp(new_stamp) + else: + with pytest.raises(ValueError): + parse_timestamp(new_stamp) + + @pytest.mark.allow_subp_for("date", "gdate") + def test_parse_timestamp_round_trip(self): + """Ensure that timezone doesn't affect the returned timestamp. + + Depending on the format of the timestamp, we use different methods + to parse it. In all cases, the timestamp should be returned the + same, regardless of timezone. + """ + times = [ + "Sep 12 14:39:00", + "Sep 12 14:39:00.839452", + "14:39 09/12", + "2020-09-12 14:39:00,839", + "2020-09-12 14:39:00.839452+00:00", + ] + + timestamps = [] + for f in times: + with suppress(ValueError): + timestamps.append(parse_timestamp(f)) + + new_times = [ + datetime.fromtimestamp(ts, tz=timezone.utc).strftime( + "%Y-%m-%d %H:%M:%S" + ) + for ts in timestamps + ] + assert all(t.endswith("-09-12 14:39:00") for t in new_times) + + @pytest.mark.allow_subp_for("date", "gdate") + def test_parse_timestamp_handles_explicit_timezone(self): + """Explicitly provided timezones are parsed and properly offset.""" + if not has_gnu_date(): + pytest.skip("GNU date is required for this test") + + original_ts = "2020-09-12 14:39:20.839452+02:00" + parsed_ts = parse_timestamp(original_ts) + assert ( + datetime.fromtimestamp(parsed_ts, tz=timezone.utc).strftime( + "%Y-%m-%d %H:%M:%S" + ) + == "2020-09-12 12:39:20" + ) class TestParseCILogLine: @@ -82,8 +134,8 @@ def test_parse_logline_returns_event_for_cloud_init_logs(self): ) dt = datetime.strptime( "2017-08-08 20:05:07,147", "%Y-%m-%d %H:%M:%S,%f" - ) - timestamp = float(dt.strftime("%s.%f")) + ).replace(tzinfo=timezone.utc) + timestamp = dt.timestamp() expected = { "description": "starting search for local datasources", "event_type": "start", @@ -103,8 +155,8 @@ def test_parse_logline_returns_event_for_journalctl_logs(self): year = datetime.now().year dt = datetime.strptime( "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y" - ) - timestamp = float(dt.strftime("%s.%f")) + ).replace(tzinfo=timezone.utc) + timestamp = dt.timestamp() expected = { "description": "starting search for local datasources", "event_type": "start", @@ -145,9 +197,11 @@ def test_parse_logline_returns_event_for_amazon_linux_2_line(self): ) # Generate the expected value using `datetime`, so that TZ # determination is consistent with the code under test. - timestamp_dt = datetime.strptime( - "Apr 30 19:39:11", "%b %d %H:%M:%S" - ).replace(year=datetime.now().year) + timestamp_dt = ( + datetime.strptime("Apr 30 19:39:11", "%b %d %H:%M:%S") + .replace(year=datetime.now().year) + .replace(tzinfo=timezone.utc) + ) expected = { "description": "attempting to read from cache [check]", "event_type": "start", @@ -185,8 +239,8 @@ def test_dump_events_with_rawdata(self, m_parse_from_date): year = datetime.now().year dt1 = datetime.strptime( "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y" - ) - timestamp1 = float(dt1.strftime("%s.%f")) + ).replace(tzinfo=timezone.utc) + timestamp1 = dt1.timestamp() expected_events = [ { "description": "starting search for local datasources", @@ -217,8 +271,8 @@ def test_dump_events_with_cisource(self, m_parse_from_date, tmpdir): year = datetime.now().year dt1 = datetime.strptime( "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y" - ) - timestamp1 = float(dt1.strftime("%s.%f")) + ).replace(tzinfo=timezone.utc) + timestamp1 = dt1.timestamp() expected_events = [ { "description": "starting search for local datasources", diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py index 4c38e034a..7dfdfac6e 100644 --- a/tests/unittests/cmd/devel/test_logs.py +++ b/tests/unittests/cmd/devel/test_logs.py @@ -35,7 +35,9 @@ def test_collect_logs_with_userdata_requires_root_user( " Try sudo cloud-init collect-logs\n" == m_stderr.getvalue() ) - def test_collect_logs_creates_tarfile(self, m_getuid, mocker, tmpdir): + def test_collect_logs_creates_tarfile( + self, m_getuid, m_log_paths, mocker, tmpdir + ): """collect-logs creates a tarfile with all related cloud-init info.""" m_getuid.return_value = 100 log1 = tmpdir.join("cloud-init.log") @@ -46,12 +48,10 @@ def test_collect_logs_creates_tarfile(self, m_getuid, mocker, tmpdir): write_file(log2, "cloud-init-output-log") log2_rotated = tmpdir.join("cloud-init-output.log.1.gz") write_file(log2_rotated, "cloud-init-output-log-rotated") - run_dir = tmpdir.join("run") - write_file(run_dir.join("results.json"), "results") + run_dir = m_log_paths.run_dir + write_file(str(run_dir / "results.json"), "results") write_file( - run_dir.join( - INSTANCE_JSON_SENSITIVE_FILE, - ), + str(m_log_paths.instance_data_sensitive), "sensitive", ) output_tarfile = str(tmpdir.join("logs.tgz")) @@ -108,7 +108,6 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): M_PATH + "subprocess.call", side_effect=fake_subprocess_call ) mocker.patch(M_PATH + "sys.stderr", fake_stderr) - mocker.patch(M_PATH + "CLOUDINIT_RUN_DIR", run_dir) mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) logs.collect_logs(output_tarfile, include_userdata=False) @@ -155,7 +154,7 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) def test_collect_logs_includes_optional_userdata( - self, m_getuid, mocker, tmpdir + self, m_getuid, mocker, tmpdir, m_log_paths ): """collect-logs include userdata when --include-userdata is set.""" m_getuid.return_value = 0 @@ -163,12 +162,12 @@ def test_collect_logs_includes_optional_userdata( write_file(log1, "cloud-init-log") log2 = tmpdir.join("cloud-init-output.log") write_file(log2, "cloud-init-output-log") - userdata = tmpdir.join("user-data.txt") - write_file(userdata, "user-data") - run_dir = tmpdir.join("run") - write_file(run_dir.join("results.json"), "results") + userdata = m_log_paths.userdata_raw + write_file(str(userdata), "user-data") + run_dir = m_log_paths.run_dir + write_file(str(run_dir / "results.json"), "results") write_file( - run_dir.join(INSTANCE_JSON_SENSITIVE_FILE), + str(m_log_paths.instance_data_sensitive), "sensitive", ) output_tarfile = str(tmpdir.join("logs.tgz")) @@ -223,23 +222,21 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): M_PATH + "subprocess.call", side_effect=fake_subprocess_call ) mocker.patch(M_PATH + "sys.stderr", fake_stderr) - mocker.patch(M_PATH + "CLOUDINIT_RUN_DIR", run_dir) mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) - mocker.patch(M_PATH + "_get_user_data_file", return_value=userdata) logs.collect_logs(output_tarfile, include_userdata=True) # unpack the tarfile and check file contents subp(["tar", "zxvf", output_tarfile, "-C", str(tmpdir)]) out_logdir = tmpdir.join(date_logdir) assert "user-data" == load_text_file( - os.path.join(out_logdir, "user-data.txt") + os.path.join(out_logdir, userdata.name) ) assert "sensitive" == load_text_file( os.path.join( out_logdir, "run", "cloud-init", - INSTANCE_JSON_SENSITIVE_FILE, + m_log_paths.instance_data_sensitive.name, ) ) fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) @@ -382,7 +379,7 @@ def test_include_installer_logs_when_present( mocker.patch( M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", apport_sensitive_files ) - logs.collect_installer_logs( + logs._collect_installer_logs( log_dir=tmpdir.strpath, include_userdata=include_userdata, verbosity=0, @@ -400,7 +397,9 @@ def test_include_installer_logs_when_present( class TestParser: - def test_parser_help_has_userdata_file(self, mocker, tmpdir): - userdata = str(tmpdir.join("user-data.txt")) - mocker.patch(M_PATH + "_get_user_data_file", return_value=userdata) - assert userdata in re.sub(r"\s+", "", logs.get_parser().format_help()) + def test_parser_help_has_userdata_file(self, m_log_paths, mocker, tmpdir): + # userdata = str(tmpdir.join("user-data.txt")) + userdata = m_log_paths.userdata_raw + assert str(userdata) in re.sub( + r"\s+", "", logs.get_parser().format_help() + ) diff --git a/tests/unittests/cmd/devel/test_net_convert.py b/tests/unittests/cmd/devel/test_net_convert.py index fb72963f8..b59caed16 100644 --- a/tests/unittests/cmd/devel/test_net_convert.py +++ b/tests/unittests/cmd/devel/test_net_convert.py @@ -3,8 +3,8 @@ import itertools import pytest +import yaml -from cloudinit import safeyaml as yaml from cloudinit.cmd.devel import net_convert from cloudinit.distros.debian import NETWORK_FILE_HEADER from tests.unittests.helpers import mock @@ -62,7 +62,6 @@ # BOOTPROTO=dhcp DEVICE=eth0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -90,6 +89,18 @@ """ +@pytest.fixture +def mock_setup_logging(): + """Mock setup_basic_logging to avoid changing log level. + + net_convert.handle_args() can call setup_basic_logging() with a + WARNING level, which would be a side-effect for future tests. + It's behavior isn't checked in these tests, so mock it out. + """ + with mock.patch(f"{M_PATH}log.setup_basic_logging"): + yield + + class TestNetConvert: missing_required_args = itertools.combinations( @@ -155,7 +166,13 @@ def test_argparse_error_on_missing_args(self, cmdargs, capsys, tmpdir): ), ) def test_convert_output_kind_artifacts( - self, output_kind, outfile_content, debug, capsys, tmpdir + self, + output_kind, + outfile_content, + debug, + capsys, + tmpdir, + mock_setup_logging, ): """Assert proper output-kind artifacts are written.""" network_data = tmpdir.join("network_data") @@ -186,7 +203,9 @@ def test_convert_output_kind_artifacts( ] == chown.call_args_list @pytest.mark.parametrize("debug", (False, True)) - def test_convert_netplan_passthrough(self, debug, tmpdir): + def test_convert_netplan_passthrough( + self, debug, tmpdir, mock_setup_logging + ): """Assert that if the network config's version is 2 and the renderer is Netplan, then the config is passed through as-is. """ @@ -224,4 +243,4 @@ def test_convert_netplan_passthrough(self, debug, tmpdir): with mock.patch("cloudinit.util.chownbyname"): net_convert.handle_args("somename", args) outfile = tmpdir.join("etc/netplan/50-cloud-init.yaml") - assert yaml.load(content) == yaml.load(outfile.read()) + assert yaml.safe_load(content) == yaml.safe_load(outfile.read()) diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py index 6569214ba..5e1f9d461 100644 --- a/tests/unittests/cmd/test_clean.py +++ b/tests/unittests/cmd/test_clean.py @@ -7,6 +7,8 @@ import cloudinit.settings from cloudinit.cmd import clean +from cloudinit.distros import Distro +from cloudinit.stages import Init from cloudinit.util import ensure_dir, sym_link, write_file from tests.unittests.helpers import mock, wrap_and_call @@ -30,21 +32,14 @@ def clean_paths(tmpdir): @pytest.fixture(scope="function") def init_class(clean_paths): - class FakeInit: - cfg = { - "def_log_file": clean_paths.log, - "output": {"all": f"|tee -a {clean_paths.output_log}"}, - } - # Ensure cloud_dir has a trailing slash, to match real behaviour - paths = MyPaths(cloud_dir=f"{clean_paths.cloud_dir}/") - - def __init__(self, ds_deps): - pass - - def read_cfg(self): - pass - - return FakeInit + init = mock.Mock(spec=Init) + init.paths = MyPaths(cloud_dir=f"{clean_paths.cloud_dir}/") + init.distro.shutdown_command = Distro.shutdown_command + init.cfg = { + "def_log_file": clean_paths.log, + "output": {"all": f"|tee -a {clean_paths.output_log}"}, + } + return init class TestClean: @@ -56,10 +51,8 @@ def test_remove_artifacts_removes_logs(self, clean_paths, init_class): assert ( os.path.exists(clean_paths.cloud_dir) is False ), "Unexpected cloud_dir" - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init=init_class, remove_logs=True, ) assert ( @@ -93,10 +86,8 @@ def test_remove_net_conf(self, clean_paths, init_class): "cloudinit.cmd.clean.GEN_NET_CONFIG_FILES", [f.strpath for f in TEST_GEN_NET_CONFIG_FILES], ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, remove_config=["network"], ) @@ -130,10 +121,8 @@ def test_remove_ssh_conf(self, clean_paths, init_class): "cloudinit.cmd.clean.GEN_SSH_CONFIG_FILES", TEST_GEN_SSH_CONFIG_FILES, ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, remove_config=["ssh_config"], ) @@ -170,10 +159,8 @@ def test_remove_all_conf(self, clean_paths, init_class): "cloudinit.cmd.clean.GEN_SSH_CONFIG_FILES", TEST_GEN_SSH_CONFIG_FILES, ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, remove_config=["all"], ) @@ -200,10 +187,8 @@ def test_keep_net_conf(self, clean_paths, init_class): "cloudinit.cmd.clean.GEN_NET_CONFIG_FILES", TEST_GEN_NET_CONFIG_FILES, ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, remove_config=[], ) @@ -225,12 +210,8 @@ def test_remove_artifacts_runparts_clean_d(self, clean_paths, init_class): with mock.patch.object( cloudinit.settings, "CLEAN_RUNPARTS_DIR", clean_paths.clean_dir ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - { - "Init": {"side_effect": init_class}, - }, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, ) assert ( @@ -243,10 +224,8 @@ def test_remove_artifacts_preserves_logs(self, clean_paths, init_class): clean_paths.log.write("cloud-init-log") clean_paths.output_log.write("cloud-init-output-log") - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, ) assert 0 == retcode @@ -269,10 +248,8 @@ def test_remove_artifacts_removes_unlinks_symlinks( with mock.patch.object( cloudinit.settings, "CLEAN_RUNPARTS_DIR", clean_paths.clean_dir ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, ) assert 0 == retcode @@ -295,10 +272,8 @@ def test_remove_artifacts_removes_artifacts_skipping_seed( with mock.patch.object( cloudinit.settings, "CLEAN_RUNPARTS_DIR", clean_paths.clean_dir ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, ) assert 0 == retcode @@ -323,10 +298,8 @@ def test_remove_artifacts_removes_artifacts_removes_seed( with mock.patch.object( cloudinit.settings, "CLEAN_RUNPARTS_DIR", clean_paths.clean_dir ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - {"Init": {"side_effect": init_class}}, - clean.remove_artifacts, + retcode = clean.remove_artifacts( + init_class, remove_logs=False, remove_seed=True, ) @@ -346,15 +319,13 @@ def test_remove_artifacts_returns_one_on_errors( ensure_dir(clean_paths.cloud_dir) ensure_dir(clean_paths.cloud_dir.join("dir1")) - retcode = wrap_and_call( - "cloudinit.cmd.clean", - { - "del_dir": {"side_effect": OSError("oops")}, - "Init": {"side_effect": init_class}, - }, - clean.remove_artifacts, - remove_logs=False, - ) + with mock.patch( + "cloudinit.cmd.clean.del_dir", side_effect=OSError("oops") + ): + retcode = clean.remove_artifacts( + init_class, + remove_logs=False, + ) assert 1 == retcode _out, err = capsys.readouterr() assert ( @@ -368,7 +339,7 @@ def test_handle_clean_args_reboots(self, init_class): called_cmds = [] def fake_subp(cmd, capture): - called_cmds.append((cmd, capture)) + called_cmds.append(cmd) return "", "" myargs = namedtuple( @@ -385,14 +356,14 @@ def fake_subp(cmd, capture): "cloudinit.cmd.clean", { "subp": {"side_effect": fake_subp}, - "Init": {"side_effect": init_class}, + "Init": {"return_value": init_class}, }, clean.handle_clean_args, name="does not matter", args=cmdargs, ) assert 0 == retcode - assert [(["shutdown", "-r", "now"], False)] == called_cmds + assert [["shutdown", "-r", "now"]] == called_cmds @pytest.mark.parametrize( "machine_id,systemd_val", @@ -428,16 +399,13 @@ def test_handle_clean_args_removed_machine_id( with mock.patch.object( cloudinit.cmd.clean, "ETC_MACHINE_ID", machine_id_path.strpath ): - retcode = wrap_and_call( - "cloudinit.cmd.clean", - { - "Init": {"side_effect": init_class}, - }, - clean.handle_clean_args, - name="does not matter", - args=cmdargs, - ) - assert 0 == retcode + with mock.patch( + "cloudinit.cmd.clean.Init", return_value=init_class + ): + assert 0 == clean.handle_clean_args( + name="does not matter", + args=cmdargs, + ) if systemd_val: if machine_id: assert "uninitialized\n" == machine_id_path.read() @@ -453,7 +421,7 @@ def test_status_main(self, clean_paths, init_class): wrap_and_call( "cloudinit.cmd.clean", { - "Init": {"side_effect": init_class}, + "Init": {"return_value": init_class}, "sys.argv": {"new": ["clean", "--logs"]}, }, clean.main, diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py index ea258c4ab..25bec3561 100644 --- a/tests/unittests/cmd/test_main.py +++ b/tests/unittests/cmd/test_main.py @@ -56,6 +56,18 @@ def setUp(self): self.patchUtils(self.new_root) self.stderr = StringIO() self.patchStdoutAndStderr(stderr=self.stderr) + # Every cc_ module calls get_meta_doc on import. + # This call will fail if filesystem redirection mocks are in place + # and the module hasn't already been imported which can depend + # on test ordering. + self.m_doc = mock.patch( + "cloudinit.config.schema.get_meta_doc", return_value={} + ) + self.m_doc.start() + + def tearDown(self): + self.m_doc.stop() + super().tearDown() def test_main_init_run_net_runs_modules(self): """Modules like write_files are run in 'net' mode.""" @@ -71,7 +83,7 @@ def test_main_init_run_net_runs_modules(self): (_item1, item2) = wrap_and_call( "cloudinit.cmd.main", { - "util.close_stdin": True, + "close_stdin": True, "netinfo.debug_info": "my net debug info", "util.fixup_output": ("outfmt", "errfmt"), }, @@ -141,7 +153,7 @@ def set_hostname(name, cfg, cloud, args): (_item1, item2) = wrap_and_call( "cloudinit.cmd.main", { - "util.close_stdin": True, + "close_stdin": True, "netinfo.debug_info": "my net debug info", "cc_set_hostname.handle": {"side_effect": set_hostname}, "util.fixup_output": ("outfmt", "errfmt"), diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py index 542d884b4..81615e72c 100644 --- a/tests/unittests/cmd/test_status.py +++ b/tests/unittests/cmd/test_status.py @@ -186,7 +186,7 @@ def test_get_status_systemd_failure( lambda config: f"Cloud-init disabled by {config.disable_file}", id="true_on_disable_file", ), - # Not disabled when using systemd and enabled via commandline. + # Not disabled when using systemd and enabled via command line. pytest.param( lambda config: config.disable_file, True, @@ -309,7 +309,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: disabled extended_status: disabled - boot_status_code: disabled-by-kernel-cmdline + boot_status_code: disabled-by-kernel-command-line detail: disabled for some reason errors: [] recoverable_errors: {} @@ -452,7 +452,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: error extended_status: error - running - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line last_update: Thu, 01 Jan 1970 00:02:05 +0000 detail: DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] errors: @@ -482,7 +482,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ status: running extended_status: running - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line last_update: Thu, 01 Jan 1970 00:02:04 +0000 detail: Running in stage: init errors: [] @@ -508,7 +508,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( """\ --- _schema_version: '1' - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line datasource: '' detail: 'Running in stage: init' errors: [] @@ -523,7 +523,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( recoverable_errors: {} schemas: '1': - boot_status_code: enabled-by-kernel-cmdline + boot_status_code: enabled-by-kernel-command-line datasource: '' detail: 'Running in stage: init' errors: [] @@ -560,7 +560,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( MyArgs(long=False, wait=False, format="json"), 0, { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "", "detail": "Running in stage: init", "errors": [], @@ -573,7 +573,9 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( "_schema_version": "1", "schemas": { "1": { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": ( + "enabled-by-kernel-command-line" + ), "datasource": "", "detail": "Running in stage: init", "errors": [], @@ -621,7 +623,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( 1, { "_schema_version": "1", - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": ( "DataSourceNoCloud [seed=/var/.../seed/" @@ -644,7 +646,9 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( "recoverable_errors": {}, "schemas": { "1": { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": ( + "enabled-by-kernel-command-line" + ), "datasource": "nocloud", "detail": "DataSourceNoCloud " "[seed=/var/.../seed/nocloud-net][dsmode=net]", @@ -729,7 +733,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( 0, { "_schema_version": "1", - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": ( "DataSourceNoCloud [seed=/var/.../" @@ -790,7 +794,7 @@ def test_status_returns_disabled_long_on_presence_of_disable_file( }, "schemas": { "1": { - "boot_status_code": "enabled-by-kernel-cmdline", + "boot_status_code": "enabled-by-kernel-command-line", "datasource": "nocloud", "detail": "DataSourceNoCloud " "[seed=/var/.../seed/nocloud-net][dsmode=net]", @@ -892,7 +896,7 @@ def test_status_output( assert_file, cmdargs: MyArgs, expected_retcode: int, - expected_status: str, + expected_status: Union[str, dict], config: Config, capsys, ): diff --git a/tests/unittests/config/test_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py index 96c0a0dfa..83e0d6bb6 100644 --- a/tests/unittests/config/test_apt_configure_sources_list_v1.py +++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init -""" test_handler_apt_configure_sources_list +"""test_handler_apt_configure_sources_list Test templating of sources list """ import stat @@ -9,6 +10,7 @@ from cloudinit import subp, util from cloudinit.config import cc_apt_configure +from cloudinit.subp import SubpResult from tests.unittests.util import get_cloud EXAMPLE_TMPL = """\ @@ -86,7 +88,7 @@ class TestAptSourceConfigSourceList: @pytest.fixture(autouse=True) def common_mocks(self, mocker): self.subp = mocker.patch.object( - subp, "subp", return_value=("PPID PID", "") + subp, "subp", return_value=SubpResult("PPID PID", "") ) mocker.patch("cloudinit.config.cc_apt_configure._ensure_dependencies") lsb = mocker.patch("cloudinit.util.lsb_release") @@ -160,22 +162,6 @@ def test_apt_v1_source_list_by_distro(self, distro, mirror, tmpdir): ) assert 0o644 == stat.S_IMODE(sources_file.stat().mode) - self.subp.assert_called_once_with( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) - @staticmethod def myresolve(name): """Fake util.is_resolvable for mirrorfail tests""" @@ -229,21 +215,6 @@ def test_apt_v1_srcl_distro_mirrorfail( mockresolve.assert_any_call("http://does.not.exist") mockresolve.assert_any_call(mirrorcheck) - self.subp.assert_called_once_with( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) @pytest.mark.parametrize( "deb822,cfg,apt_file,expected", @@ -301,18 +272,3 @@ def test_apt_v1_srcl_custom( sources_file = tmpdir.join(apt_file) assert expected == sources_file.read() assert 0o644 == stat.S_IMODE(sources_file.stat().mode) - self.subp.assert_called_once_with( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) diff --git a/tests/unittests/config/test_apt_configure_sources_list_v3.py b/tests/unittests/config/test_apt_configure_sources_list_v3.py index 3770b26c7..5ba6dac86 100644 --- a/tests/unittests/config/test_apt_configure_sources_list_v3.py +++ b/tests/unittests/config/test_apt_configure_sources_list_v3.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init -""" test_apt_custom_sources_list +"""test_apt_custom_sources_list Test templating of custom sources list """ import stat @@ -10,6 +11,7 @@ from cloudinit import subp, util from cloudinit.config import cc_apt_configure from cloudinit.distros.debian import Distro +from cloudinit.subp import SubpResult from tests.unittests.util import get_cloud TARGET = "/" @@ -158,7 +160,7 @@ def common_mocks(self, mocker): self.subp = mocker.patch.object( subp, "subp", - return_value=("PPID PID", ""), + return_value=SubpResult("PPID PID", ""), ) lsb = mocker.patch("cloudinit.util.lsb_release") lsb.return_value = {"codename": "fakerel"} @@ -193,7 +195,7 @@ def test_apt_v3_empty_cfg_source_list_by_distro( mock_shouldcfg = mocker.patch.object( cc_apt_configure, "_should_configure_on_empty_apt", - return_value=(True, "test"), + return_value=SubpResult(True, "test"), ) cc_apt_configure.handle("test", {"apt": {}}, mycloud, None) @@ -330,18 +332,3 @@ def test_apt_v3_srcl_custom_deb822_feature_aware( sources_file = tmpdir.join(apt_file) assert expected == sources_file.read() assert 0o644 == stat.S_IMODE(sources_file.stat().mode) - self.subp.assert_called_once_with( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) diff --git a/tests/unittests/config/test_apt_key.py b/tests/unittests/config/test_apt_key.py index bbffe7d22..6325bca51 100644 --- a/tests/unittests/config/test_apt_key.py +++ b/tests/unittests/config/test_apt_key.py @@ -41,85 +41,106 @@ class TestAptKey: @mock.patch.object(subp, "subp", return_value=SubpResult("fakekey", "")) @mock.patch.object(util, "write_file") - def _apt_key_add_success_helper(self, directory, *args, hardened=False): + def _apt_key_add_success_helper( + self, directory, gpg, *args, hardened=False + ): file = cc_apt_configure.apt_key( - "add", output_file="my-key", data="fakekey", hardened=hardened + "add", + gpg=gpg, + output_file="my-key", + data="fakekey", + hardened=hardened, ) assert file == directory + "/my-key.gpg" - def test_apt_key_add_success(self): + def test_apt_key_add_success(self, m_gpg): """Verify the right directory path gets returned for unhardened case""" - self._apt_key_add_success_helper("/etc/apt/trusted.gpg.d") + self._apt_key_add_success_helper("/etc/apt/trusted.gpg.d", m_gpg) - def test_apt_key_add_success_hardened(self): + def test_apt_key_add_success_hardened(self, m_gpg): """Verify the right directory path gets returned for hardened case""" self._apt_key_add_success_helper( - "/etc/apt/cloud-init.gpg.d", hardened=True + "/etc/apt/cloud-init.gpg.d", m_gpg, hardened=True ) - def test_apt_key_add_fail_no_file_name(self): + def test_apt_key_add_fail_no_file_name(self, m_gpg): """Verify that null filename gets handled correctly""" - file = cc_apt_configure.apt_key("add", output_file=None, data="") + file = cc_apt_configure.apt_key( + "add", gpg=m_gpg, output_file=None, data="" + ) assert "/dev/null" == file - def _apt_key_fail_helper(self): + def _apt_key_fail_helper(self, m_gpg): file = cc_apt_configure.apt_key( - "add", output_file="my-key", data="fakekey" + "add", gpg=m_gpg, output_file="my-key", data="fakekey" ) assert file == "/dev/null" - @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError) - def test_apt_key_add_fail_no_file_name_subproc(self, *args): + def test_apt_key_add_fail_no_file_name_subproc(self, m_gpg): """Verify that bad key value gets handled correctly""" - self._apt_key_fail_helper() + m_gpg.dearmor = mock.Mock(side_effect=subp.ProcessExecutionError) + self._apt_key_fail_helper(m_gpg) - @mock.patch.object( - subp, "subp", side_effect=UnicodeDecodeError("test", b"", 1, 1, "") - ) - def test_apt_key_add_fail_no_file_name_unicode(self, *args): + def test_apt_key_add_fail_no_file_name_unicode(self, m_gpg): """Verify that bad key encoding gets handled correctly""" - self._apt_key_fail_helper() + m_gpg.dearmor = mock.Mock( + side_effect=UnicodeDecodeError("test", b"", 1, 1, "") + ) + self._apt_key_fail_helper(m_gpg) - def _apt_key_list_success_helper(self, finger, key, human_output=True): + def _apt_key_list_success_helper( + self, finger, key, gpg, human_output=True + ): @mock.patch.object(os, "listdir", return_value=("/fake/dir/key.gpg",)) @mock.patch.object(subp, "subp", return_value=(key, "")) def mocked_list(*a): - keys = cc_apt_configure.apt_key("list", human_output) + keys = cc_apt_configure.apt_key("list", gpg, human_output) assert finger in keys mocked_list() - def test_apt_key_list_success_human(self): + def test_apt_key_list_success_human(self, m_gpg): """Verify expected key output, human""" + m_gpg.list_keys = mock.Mock( + return_value="3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85" + ) self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_HUMAN, TEST_KEY_HUMAN + TEST_KEY_FINGERPRINT_HUMAN, TEST_KEY_HUMAN, m_gpg ) - def test_apt_key_list_success_machine(self): + def test_apt_key_list_success_machine(self, m_gpg): """Verify expected key output, machine""" + m_gpg.list_keys = mock.Mock( + return_value="3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85" + ) self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_MACHINE, TEST_KEY_MACHINE, human_output=False + TEST_KEY_FINGERPRINT_MACHINE, + TEST_KEY_MACHINE, + m_gpg, + human_output=False, ) - @mock.patch.object(os, "listdir", return_value=()) - @mock.patch.object(subp, "subp", return_value=("", "")) - def test_apt_key_list_fail_no_keys(self, *args): + @mock.patch.object(cc_apt_configure.os, "listdir", return_value=()) + @mock.patch.object(cc_apt_configure.os.path, "isfile", return_value=False) + def test_apt_key_list_fail_no_keys(self, m_isfile, m_listdir, m_gpg): """Ensure falsy output for no keys""" - keys = cc_apt_configure.apt_key("list") + keys = cc_apt_configure.apt_key("list", m_gpg) assert not keys - @mock.patch.object(os, "listdir", return_value="file_not_gpg_key.txt") - @mock.patch.object(subp, "subp", return_value=("", "")) - def test_apt_key_list_fail_no_keys_file(self, *args): + @mock.patch.object(os, "listdir", return_value=["file_not_gpg_key.txt"]) + def test_apt_key_list_fail_no_keys_file(self, m_listdir, m_gpg, *args): """Ensure non-gpg file is not returned. apt-key used file extensions for this, so we do too """ - assert not cc_apt_configure.apt_key("list") + assert "file_not_gpg_key.txt" not in cc_apt_configure.apt_key( + "list", m_gpg + ) - @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError) - @mock.patch.object(os, "listdir", return_value="bad_gpg_key.gpg") - def test_apt_key_list_fail_bad_key_file(self, *args): + @mock.patch.object(cc_apt_configure.os, "listdir", return_value=()) + @mock.patch.object(cc_apt_configure.os.path, "isfile", return_value=False) + def test_apt_key_list_fail_bad_key_file(self, m_isfile, m_listdir, m_gpg): """Ensure bad gpg key doesn't throw exeption.""" - assert not cc_apt_configure.apt_key("list") + m_gpg.list_keys = mock.Mock(side_effect=subp.ProcessExecutionError) + assert not cc_apt_configure.apt_key("list", m_gpg) diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py index aa00e2452..93d0cc083 100644 --- a/tests/unittests/config/test_apt_source_v1.py +++ b/tests/unittests/config/test_apt_source_v1.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -""" test_handler_apt_source_v1 +"""test_handler_apt_source_v1 Testing various config variations of the apt_source config This calls all things with v1 format to stress the conversion code on top of the actually tested code. @@ -8,15 +8,16 @@ import os import pathlib import re -import signal from functools import partial +from textwrap import dedent from unittest import mock from unittest.mock import call import pytest -from cloudinit import gpg, subp, util +from cloudinit import subp, util from cloudinit.config import cc_apt_configure +from cloudinit.subp import SubpResult from tests.unittests.util import get_cloud original_join = os.path.join @@ -41,7 +42,7 @@ class FakeDistro: """Fake Distro helper object""" - def update_package_sources(self): + def update_package_sources(self, *, force=False): """Fake update_package_sources helper method""" return @@ -76,7 +77,9 @@ def common_mocks(self, mocker): mocker.patch( "cloudinit.util.get_dpkg_architecture", return_value="amd64" ) - mocker.patch.object(subp, "subp", return_value=("PPID PID", "")) + mocker.patch.object( + subp, "subp", return_value=SubpResult("PPID PID", "") + ) mocker.patch("cloudinit.config.cc_apt_configure._ensure_dependencies") def _get_default_params(self): @@ -109,13 +112,15 @@ def myjoin(self, tmpfile, *args, **kwargs): else: return original_join(*args, **kwargs) - def apt_src_basic(self, filename, cfg): + def apt_src_basic(self, filename, cfg, gpg): """apt_src_basic Test Fix deb source string, has to overwrite mirror conf in params """ cfg = self.wrapv1conf(cfg) - cc_apt_configure.handle("test", cfg, get_cloud(), []) + with mock.patch.object(cc_apt_configure, "GPG") as my_gpg: + my_gpg.return_value = gpg + cc_apt_configure.handle("test", cfg, get_cloud(), []) assert os.path.isfile(filename) @@ -132,7 +137,7 @@ def apt_src_basic(self, filename, cfg): flags=re.IGNORECASE, ) - def test_apt_src_basic(self, apt_lists): + def test_apt_src_basic(self, apt_lists, m_gpg): """Test deb source string, overwrite mirror and filename""" cfg = { "source": ( @@ -142,9 +147,9 @@ def test_apt_src_basic(self, apt_lists): ), "filename": apt_lists[0], } - self.apt_src_basic(apt_lists[0], [cfg]) + self.apt_src_basic(apt_lists[0], [cfg], m_gpg) - def test_apt_src_basic_dict(self, apt_lists): + def test_apt_src_basic_dict(self, apt_lists, m_gpg): """Test deb source string, overwrite mirror and filename (dict)""" cfg = { apt_lists[0]: { @@ -155,15 +160,15 @@ def test_apt_src_basic_dict(self, apt_lists): ) } } - self.apt_src_basic(apt_lists[0], cfg) + self.apt_src_basic(apt_lists[0], cfg, m_gpg) - def apt_src_basic_tri(self, cfg, apt_lists): + def apt_src_basic_tri(self, cfg, apt_lists, m_gpg): """apt_src_basic_tri Test Fix three deb source string, has to overwrite mirror conf in params. Test with filenames provided in config. generic part to check three files with different content """ - self.apt_src_basic(apt_lists[0], cfg) + self.apt_src_basic(apt_lists[0], cfg, m_gpg) # extra verify on two extra files of this test contents = util.load_text_file(apt_lists[1]) @@ -191,7 +196,7 @@ def apt_src_basic_tri(self, cfg, apt_lists): flags=re.IGNORECASE, ) - def test_apt_src_basic_tri(self, apt_lists): + def test_apt_src_basic_tri(self, apt_lists, m_gpg): """Test Fix three deb source string with filenames""" cfg1 = { "source": ( @@ -217,9 +222,9 @@ def test_apt_src_basic_tri(self, apt_lists): ), "filename": apt_lists[2], } - self.apt_src_basic_tri([cfg1, cfg2, cfg3], apt_lists) + self.apt_src_basic_tri([cfg1, cfg2, cfg3], apt_lists, m_gpg) - def test_apt_src_basic_dict_tri(self, apt_lists): + def test_apt_src_basic_dict_tri(self, apt_lists, m_gpg): """Test Fix three deb source string with filenames (dict)""" cfg = { apt_lists[0]: { @@ -244,9 +249,9 @@ def test_apt_src_basic_dict_tri(self, apt_lists): ) }, } - self.apt_src_basic_tri(cfg, apt_lists) + self.apt_src_basic_tri(cfg, apt_lists, m_gpg) - def test_apt_src_basic_nofn(self, fallback_path, tmpdir): + def test_apt_src_basic_nofn(self, fallback_path, tmpdir, m_gpg): """Test Fix three deb source string without filenames (dict)""" cfg = { "source": ( @@ -258,7 +263,7 @@ def test_apt_src_basic_nofn(self, fallback_path, tmpdir): with mock.patch.object( os.path, "join", side_effect=partial(self.myjoin, tmpdir) ): - self.apt_src_basic(fallback_path, [cfg]) + self.apt_src_basic(fallback_path, [cfg], m_gpg) def apt_src_replacement(self, filename, cfg): """apt_src_replace @@ -347,14 +352,19 @@ def test_apt_src_replace_nofn(self, fallback_path, tmpdir): ): self.apt_src_replacement(fallback_path, [cfg]) - def apt_src_keyid(self, filename, cfg, keynum): + def apt_src_keyid(self, filename, cfg, keynum, gpg): """apt_src_keyid Test specification of a source + keyid """ cfg = self.wrapv1conf(cfg) cloud = get_cloud() - with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj: + with mock.patch.object( + cc_apt_configure, "GPG" + ) as this_gpg, mock.patch.object( + cc_apt_configure, "add_apt_key" + ) as mockobj: + this_gpg.return_value = gpg cc_apt_configure.handle("test", cfg, cloud, []) # check if it added the right number of keys @@ -362,7 +372,7 @@ def apt_src_keyid(self, filename, cfg, keynum): sources = cfg["apt"]["sources"] for src in sources: print(sources[src]) - calls.append(call(sources[src], cloud)) + calls.append(call(sources[src], cloud, gpg)) mockobj.assert_has_calls(calls, any_order=True) @@ -381,7 +391,7 @@ def apt_src_keyid(self, filename, cfg, keynum): flags=re.IGNORECASE, ) - def test_apt_src_keyid(self, apt_lists): + def test_apt_src_keyid(self, apt_lists, m_gpg): """Test specification of a source + keyid with filename being set""" cfg = { "source": ( @@ -393,9 +403,9 @@ def test_apt_src_keyid(self, apt_lists): "keyid": "03683F77", "filename": apt_lists[0], } - self.apt_src_keyid(apt_lists[0], [cfg], 1) + self.apt_src_keyid(apt_lists[0], [cfg], 1, m_gpg) - def test_apt_src_keyid_tri(self, apt_lists): + def test_apt_src_keyid_tri(self, apt_lists, m_gpg): """Test 3x specification of a source + keyid with filename being set""" cfg1 = { "source": ( @@ -428,7 +438,7 @@ def test_apt_src_keyid_tri(self, apt_lists): "filename": apt_lists[2], } - self.apt_src_keyid(apt_lists[0], [cfg1, cfg2, cfg3], 3) + self.apt_src_keyid(apt_lists[0], [cfg1, cfg2, cfg3], 3, m_gpg) contents = util.load_text_file(apt_lists[1]) assert re.search( r"%s %s %s %s\n" @@ -454,7 +464,7 @@ def test_apt_src_keyid_tri(self, apt_lists): flags=re.IGNORECASE, ) - def test_apt_src_keyid_nofn(self, fallback_path, tmpdir): + def test_apt_src_keyid_nofn(self, fallback_path, tmpdir, m_gpg): """Test specification of a source + keyid without filename being set""" cfg = { "source": ( @@ -468,24 +478,28 @@ def test_apt_src_keyid_nofn(self, fallback_path, tmpdir): with mock.patch.object( os.path, "join", side_effect=partial(self.myjoin, tmpdir) ): - self.apt_src_keyid(fallback_path, [cfg], 1) + self.apt_src_keyid(fallback_path, [cfg], 1, m_gpg) - def apt_src_key(self, filename, cfg): + def apt_src_key(self, filename, cfg, gpg): """apt_src_key Test specification of a source + key """ cfg = self.wrapv1conf([cfg]) cloud = get_cloud() - with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj: + with mock.patch.object( + cc_apt_configure, "GPG" + ) as this_gpg, mock.patch.object( + cc_apt_configure, "add_apt_key" + ) as mockobj: + this_gpg.return_value = gpg cc_apt_configure.handle("test", cfg, cloud, []) # check if it added the right amount of keys sources = cfg["apt"]["sources"] calls = [] for src in sources: - print(sources[src]) - calls.append(call(sources[src], cloud)) + calls.append(call(sources[src], cloud, gpg)) mockobj.assert_has_calls(calls, any_order=True) @@ -504,7 +518,7 @@ def apt_src_key(self, filename, cfg): flags=re.IGNORECASE, ) - def test_apt_src_key(self, apt_lists): + def test_apt_src_key(self, apt_lists, m_gpg): """Test specification of a source + key with filename being set""" cfg = { "source": ( @@ -516,9 +530,9 @@ def test_apt_src_key(self, apt_lists): "key": "fakekey 4321", "filename": apt_lists[0], } - self.apt_src_key(apt_lists[0], cfg) + self.apt_src_key(apt_lists[0], cfg, m_gpg) - def test_apt_src_key_nofn(self, fallback_path, tmpdir): + def test_apt_src_key_nofn(self, fallback_path, tmpdir, m_gpg): """Test specification of a source + key without filename being set""" cfg = { "source": ( @@ -532,18 +546,22 @@ def test_apt_src_key_nofn(self, fallback_path, tmpdir): with mock.patch.object( os.path, "join", side_effect=partial(self.myjoin, tmpdir) ): - self.apt_src_key(fallback_path, cfg) + self.apt_src_key(fallback_path, cfg, m_gpg) - def test_apt_src_keyonly(self, apt_lists): + def test_apt_src_keyonly(self, apt_lists, m_gpg): """Test specifying key without source""" cfg = {"key": "fakekey 4242", "filename": apt_lists[0]} cfg = self.wrapv1conf([cfg]) - with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: + with mock.patch.object( + cc_apt_configure, "GPG" + ) as gpg, mock.patch.object(cc_apt_configure, "apt_key") as mockobj: + gpg.return_value = m_gpg cc_apt_configure.handle("test", cfg, get_cloud(), []) calls = ( call( "add", + m_gpg, output_file=pathlib.Path(apt_lists[0]).stem, data="fakekey 4242", hardened=False, @@ -554,88 +572,93 @@ def test_apt_src_keyonly(self, apt_lists): # filename should be ignored on key only assert not os.path.isfile(apt_lists[0]) - def test_apt_src_keyidonly(self, apt_lists): + def test_apt_src_keyidonly(self, apt_lists, m_gpg): """Test specification of a keyid without source""" cfg = {"keyid": "03683F77", "filename": apt_lists[0]} cfg = self.wrapv1conf([cfg]) - SAMPLE_GPG_AGENT_DIRMNGR_PIDS = """\ - PPID PID - 1 1057 - 1 1095 - 1511 2493 - 1511 2509 -""" + m_gpg.getkeybyid = mock.Mock(return_value="fakekey 1212") + SAMPLE_GPG_AGENT_DIRMNGR_PIDS = dedent( + """\ + PPID PID + 1 1057 + 1 1095 + 1511 2493 + 1511 2509 + """ + ) with mock.patch.object( subp, "subp", side_effect=[ - ("fakekey 1212", ""), - (SAMPLE_GPG_AGENT_DIRMNGR_PIDS, ""), + SubpResult("fakekey 1212", ""), + SubpResult(SAMPLE_GPG_AGENT_DIRMNGR_PIDS, ""), ], - ): - with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: - with mock.patch.object(cc_apt_configure.os, "kill") as m_kill: - cc_apt_configure.handle("test", cfg, get_cloud(), []) + ), mock.patch.object( + cc_apt_configure, "GPG" + ) as gpg, mock.patch.object( + cc_apt_configure, "apt_key" + ) as mockobj: + gpg.return_value = m_gpg + cc_apt_configure.handle("test", cfg, get_cloud(), []) calls = ( call( "add", + m_gpg, output_file=pathlib.Path(apt_lists[0]).stem, data="fakekey 1212", hardened=False, ), ) mockobj.assert_has_calls(calls, any_order=True) - assert ( - [call(1057, signal.SIGKILL), call(1095, signal.SIGKILL)] - ) == m_kill.call_args_list # filename should be ignored on key only assert not os.path.isfile(apt_lists[0]) def apt_src_keyid_real( - self, apt_lists, cfg, expectedkey, is_hardened=None + self, apt_lists, cfg, expectedkey, gpg, is_hardened=None ): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the environment as is) """ - key = cfg["keyid"] - keyserver = cfg.get("keyserver", "keyserver.ubuntu.com") cfg = self.wrapv1conf([cfg]) + gpg.getkeybyid = mock.Mock(return_value=expectedkey) with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockkey: - with mock.patch.object( - gpg, "getkeybyid", return_value=expectedkey - ) as mockgetkey: + with mock.patch.object(cc_apt_configure, "GPG") as my_gpg: + my_gpg.return_value = gpg cc_apt_configure.handle("test", cfg, get_cloud(), []) if is_hardened is not None: mockkey.assert_called_with( - expectedkey, apt_lists[0], hardened=is_hardened + expectedkey, apt_lists[0], gpg, hardened=is_hardened ) else: - mockkey.assert_called_with(expectedkey, apt_lists[0]) - mockgetkey.assert_called_with(key, keyserver) + mockkey.assert_called_with(expectedkey, apt_lists[0], gpg) # filename should be ignored on key only assert not os.path.isfile(apt_lists[0]) - def test_apt_src_keyid_real(self, apt_lists): + def test_apt_src_keyid_real(self, apt_lists, m_gpg): """test_apt_src_keyid_real - Test keyid including key add""" keyid = "03683F77" cfg = {"keyid": keyid, "filename": apt_lists[0]} - self.apt_src_keyid_real(apt_lists, cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real( + apt_lists, cfg, EXPECTEDKEY, m_gpg, is_hardened=False + ) - def test_apt_src_longkeyid_real(self, apt_lists): + def test_apt_src_longkeyid_real(self, apt_lists, m_gpg): """test_apt_src_longkeyid_real - Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = {"keyid": keyid, "filename": apt_lists[0]} - self.apt_src_keyid_real(apt_lists, cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real( + apt_lists, cfg, EXPECTEDKEY, m_gpg, is_hardened=False + ) - def test_apt_src_longkeyid_ks_real(self, apt_lists): + def test_apt_src_longkeyid_ks_real(self, apt_lists, m_gpg): """test_apt_src_longkeyid_ks_real - Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = { @@ -644,20 +667,25 @@ def test_apt_src_longkeyid_ks_real(self, apt_lists): "filename": apt_lists[0], } - self.apt_src_keyid_real(apt_lists, cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real( + apt_lists, cfg, EXPECTEDKEY, m_gpg, is_hardened=False + ) - def test_apt_src_ppa(self, apt_lists, mocker): + def test_apt_src_ppa(self, apt_lists, mocker, m_gpg): """Test adding a ppa""" m_subp = mocker.patch.object( - subp, "subp", return_value=("PPID PID", "") + subp, "subp", return_value=SubpResult("PPID PID", "") ) + mocker.patch("cloudinit.gpg.subp.which", return_value=False) cfg = { "source": "ppa:smoser/cloud-init-test", "filename": apt_lists[0], } cfg = self.wrapv1conf([cfg]) - cc_apt_configure.handle("test", cfg, get_cloud(), []) + with mock.patch.object(cc_apt_configure, "GPG") as my_gpg: + my_gpg.return_value = m_gpg + cc_apt_configure.handle("test", cfg, get_cloud(), []) assert m_subp.call_args_list == [ mock.call( [ @@ -666,26 +694,11 @@ def test_apt_src_ppa(self, apt_lists, mocker): "ppa:smoser/cloud-init-test", ], ), - mock.call( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ), ] # adding ppa should ignore filename (uses add-apt-repository) assert not os.path.isfile(apt_lists[0]) - def test_apt_src_ppa_tri(self, apt_lists): + def test_apt_src_ppa_tri(self, apt_lists, m_gpg): """Test adding three ppa's""" cfg1 = { "source": "ppa:smoser/cloud-init-test", @@ -702,9 +715,11 @@ def test_apt_src_ppa_tri(self, apt_lists): cfg = self.wrapv1conf([cfg1, cfg2, cfg3]) with mock.patch.object( - subp, "subp", return_value=("PPID PID", "") + subp, "subp", return_value=SubpResult("PPID PID", "") ) as mockobj: - cc_apt_configure.handle("test", cfg, get_cloud(), []) + with mock.patch.object(cc_apt_configure, "GPG") as my_gpg: + my_gpg.return_value = m_gpg + cc_apt_configure.handle("test", cfg, get_cloud(), []) calls = [ call( [ @@ -802,7 +817,7 @@ def test_convert_to_new_format_collision(self): with pytest.raises(ValueError, match=match): cc_apt_configure.convert_to_v3_apt_format(cfgconflict) - def test_convert_to_new_format_dict_collision(self, apt_lists): + def test_convert_to_new_format_dict_collision(self, apt_lists, m_gpg): cfg1 = { "source": "deb $MIRROR $RELEASE multiverse", "filename": apt_lists[0], diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py index 5971e2f4a..9bdfa9548 100644 --- a/tests/unittests/config/test_apt_source_v3.py +++ b/tests/unittests/config/test_apt_source_v3.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init """test_handler_apt_source_v3 Testing various config variations of the apt_source custom config @@ -63,9 +64,9 @@ def setup(self, mocker, tmpdir): self.matcher = re.compile(ADD_APT_REPO_MATCH).search @staticmethod - def _add_apt_sources(*args, **kwargs): - with mock.patch.object(cc_apt_configure, "update_packages"): - cc_apt_configure.add_apt_sources(*args, **kwargs) + def _add_apt_sources(cfg, cloud, gpg, **kwargs): + # with mock.patch.object(cloud.distro, "update_package_sources"): + cc_apt_configure.add_apt_sources(cfg, cloud, gpg, **kwargs) @staticmethod def _get_default_params(): @@ -80,7 +81,7 @@ def _get_default_params(): ] return params - def _apt_src_basic(self, filename, cfg, tmpdir): + def _apt_src_basic(self, filename, cfg, tmpdir, gpg): """_apt_src_basic Test Fix deb source string, has to overwrite mirror conf in params """ @@ -88,7 +89,8 @@ def _apt_src_basic(self, filename, cfg, tmpdir): self._add_apt_sources( cfg, - cloud=None, + cloud=mock.Mock(), + gpg=gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -110,7 +112,7 @@ def _apt_src_basic(self, filename, cfg, tmpdir): flags=re.IGNORECASE, ), f"Unexpected APT config in {filename}: {contents}" - def test_apt_v3_src_basic(self, tmpdir): + def test_apt_v3_src_basic(self, tmpdir, m_gpg): """test_apt_v3_src_basic - Test fix deb source string""" cfg = { self.aptlistfile: { @@ -121,9 +123,9 @@ def test_apt_v3_src_basic(self, tmpdir): ) } } - self._apt_src_basic(self.aptlistfile, cfg, tmpdir) + self._apt_src_basic(self.aptlistfile, cfg, tmpdir, m_gpg) - def test_apt_v3_src_basic_tri(self, tmpdir): + def test_apt_v3_src_basic_tri(self, tmpdir, m_gpg): """test_apt_v3_src_basic_tri - Test multiple fix deb source strings""" cfg = { self.aptlistfile: { @@ -148,7 +150,7 @@ def test_apt_v3_src_basic_tri(self, tmpdir): ) }, } - self._apt_src_basic(self.aptlistfile, cfg, tmpdir) + self._apt_src_basic(self.aptlistfile, cfg, tmpdir, m_gpg) # extra verify on two extra files of this test contents = util.load_text_file(self.aptlistfile2) @@ -176,14 +178,15 @@ def test_apt_v3_src_basic_tri(self, tmpdir): flags=re.IGNORECASE, ), f"Unexpected APT format of {self.aptlistfile3}: contents" - def _apt_src_replacement(self, filename, cfg, tmpdir): + def _apt_src_replacement(self, filename, cfg, tmpdir, gpg): """apt_src_replace Test Autoreplacement of MIRROR and RELEASE in source specs """ params = self._get_default_params() self._add_apt_sources( cfg, - cloud=None, + cloud=mock.Mock(), + gpg=gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -198,12 +201,12 @@ def _apt_src_replacement(self, filename, cfg, tmpdir): flags=re.IGNORECASE, ) - def test_apt_v3_src_replace(self, tmpdir): + def test_apt_v3_src_replace(self, tmpdir, m_gpg): """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE""" cfg = {self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}} - self._apt_src_replacement(self.aptlistfile, cfg, tmpdir) + self._apt_src_replacement(self.aptlistfile, cfg, tmpdir, m_gpg) - def test_apt_v3_src_replace_fn(self, tmpdir): + def test_apt_v3_src_replace_fn(self, tmpdir, m_gpg): """test_apt_v3_src_replace_fn - Test filename overwritten in dict""" cfg = { "ignored": { @@ -212,14 +215,14 @@ def test_apt_v3_src_replace_fn(self, tmpdir): } } # second file should overwrite the dict key - self._apt_src_replacement(self.aptlistfile, cfg, tmpdir) + self._apt_src_replacement(self.aptlistfile, cfg, tmpdir, m_gpg) - def _apt_src_replace_tri(self, cfg, tmpdir): + def _apt_src_replace_tri(self, cfg, tmpdir, gpg): """_apt_src_replace_tri Test three autoreplacements of MIRROR and RELEASE in source specs with generic part """ - self._apt_src_replacement(self.aptlistfile, cfg, tmpdir) + self._apt_src_replacement(self.aptlistfile, cfg, tmpdir, gpg) # extra verify on two extra files of this test params = self._get_default_params() @@ -238,7 +241,7 @@ def _apt_src_replace_tri(self, cfg, tmpdir): flags=re.IGNORECASE, ), f"Unexpected APT format {self.aptlistfile3}: {contents}" - def test_apt_v3_src_replace_tri(self, tmpdir): + def test_apt_v3_src_replace_tri(self, tmpdir, m_gpg): """test_apt_v3_src_replace_tri - Test multiple replace/overwrites""" cfg = { self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}, @@ -248,18 +251,22 @@ def test_apt_v3_src_replace_tri(self, tmpdir): }, self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"}, } - self._apt_src_replace_tri(cfg, tmpdir) + self._apt_src_replace_tri(cfg, tmpdir, m_gpg) - def _apt_src_keyid(self, filename, cfg, keynum, tmpdir, is_hardened=None): + def _apt_src_keyid( + self, filename, cfg, keynum, tmpdir, gpg, is_hardened=None + ): """_apt_src_keyid Test specification of a source + keyid """ params = self._get_default_params() + cloud = get_cloud() with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj: self._add_apt_sources( cfg, - cloud=None, + cloud=cloud, + gpg=gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -268,9 +275,9 @@ def _apt_src_keyid(self, filename, cfg, keynum, tmpdir, is_hardened=None): calls = [] for key in cfg: if is_hardened is not None: - calls.append(call(cfg[key], None, hardened=is_hardened)) + calls.append(call(cfg[key], cloud, gpg, hardened=is_hardened)) else: - calls.append(call(cfg[key], None)) + calls.append(call(cfg[key], cloud, gpg)) mockobj.assert_has_calls(calls, any_order=True) @@ -289,7 +296,7 @@ def _apt_src_keyid(self, filename, cfg, keynum, tmpdir, is_hardened=None): flags=re.IGNORECASE, ) - def test_apt_v3_src_keyid(self, tmpdir): + def test_apt_v3_src_keyid(self, tmpdir, m_gpg): """test_apt_v3_src_keyid - Test source + keyid with filename""" cfg = { self.aptlistfile: { @@ -303,9 +310,9 @@ def test_apt_v3_src_keyid(self, tmpdir): "keyid": "03683F77", } } - self._apt_src_keyid(self.aptlistfile, cfg, 1, tmpdir) + self._apt_src_keyid(self.aptlistfile, cfg, 1, tmpdir, m_gpg) - def test_apt_v3_src_keyid_tri(self, tmpdir): + def test_apt_v3_src_keyid_tri(self, tmpdir, m_gpg): """test_apt_v3_src_keyid_tri - Test multiple src+key+file writes""" cfg = { self.aptlistfile: { @@ -339,7 +346,7 @@ def test_apt_v3_src_keyid_tri(self, tmpdir): }, } - self._apt_src_keyid(self.aptlistfile, cfg, 3, tmpdir) + self._apt_src_keyid(self.aptlistfile, cfg, 3, tmpdir, m_gpg) contents = util.load_text_file(self.aptlistfile2) assert re.search( r"%s %s %s %s\n" @@ -365,7 +372,7 @@ def test_apt_v3_src_keyid_tri(self, tmpdir): flags=re.IGNORECASE, ) - def test_apt_v3_src_key(self, mocker): + def test_apt_v3_src_key(self, mocker, m_gpg): """test_apt_v3_src_key - Test source + key""" params = self._get_default_params() cfg = { @@ -383,7 +390,8 @@ def test_apt_v3_src_key(self, mocker): mockobj = mocker.patch.object(cc_apt_configure, "apt_key") self._add_apt_sources( cfg, - cloud=None, + cloud=mock.Mock(), + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -391,6 +399,7 @@ def test_apt_v3_src_key(self, mocker): calls = ( call( "add", + m_gpg, output_file=pathlib.Path(self.aptlistfile).stem, data="fakekey 4321", hardened=False, @@ -410,15 +419,17 @@ def test_apt_v3_src_key(self, mocker): flags=re.IGNORECASE, ) - def test_apt_v3_src_keyonly(self, mocker): + def test_apt_v3_src_keyonly(self, mocker, m_gpg): """test_apt_v3_src_keyonly - Test key without source""" + m_gpg.getkeybyid = mock.Mock(return_value="fakekey 4242") params = self._get_default_params() cfg = {self.aptlistfile: {"key": "fakekey 4242"}} mockobj = mocker.patch.object(cc_apt_configure, "apt_key") self._add_apt_sources( cfg, - cloud=None, + cloud=mock.Mock(), + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -426,6 +437,7 @@ def test_apt_v3_src_keyonly(self, mocker): calls = ( call( "add", + m_gpg, output_file=pathlib.Path(self.aptlistfile).stem, data="fakekey 4242", hardened=False, @@ -436,8 +448,9 @@ def test_apt_v3_src_keyonly(self, mocker): # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_keyidonly(self): + def test_apt_v3_src_keyidonly(self, m_gpg): """test_apt_v3_src_keyidonly - Test keyid without source""" + m_gpg.getkeybyid = mock.Mock(return_value="fakekey 1212") params = self._get_default_params() cfg = {self.aptlistfile: {"keyid": "03683F77"}} with mock.patch.object( @@ -446,7 +459,8 @@ def test_apt_v3_src_keyidonly(self): with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: self._add_apt_sources( cfg, - cloud=None, + cloud=mock.Mock(), + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -454,6 +468,7 @@ def test_apt_v3_src_keyidonly(self): calls = ( call( "add", + m_gpg, output_file=pathlib.Path(self.aptlistfile).stem, data="fakekey 1212", hardened=False, @@ -466,7 +481,7 @@ def test_apt_v3_src_keyidonly(self): os.path.isfile(self.aptlistfile) is False ), f"Unexpected file {self.aptlistfile} found" - def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): + def apt_src_keyid_real(self, cfg, expectedkey, gpg, is_hardened=None): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the @@ -480,7 +495,8 @@ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): ) as mockgetkey: self._add_apt_sources( cfg, - cloud=None, + cloud=mock.Mock(), + gpg=gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -493,27 +509,28 @@ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): mockkey.assert_called_with( expectedkey, keycfg["keyfile"], + gpg, hardened=is_hardened, ) # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_keyid_real(self): + def test_apt_v3_src_keyid_real(self, m_gpg): """test_apt_v3_src_keyid_real - Test keyid including key add""" keyid = "03683F77" cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, m_gpg, is_hardened=False) - def test_apt_v3_src_longkeyid_real(self): + def test_apt_v3_src_longkeyid_real(self, m_gpg): """test_apt_v3_src_longkeyid_real Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, m_gpg, is_hardened=False) - def test_apt_v3_src_longkeyid_ks_real(self): + def test_apt_v3_src_longkeyid_ks_real(self, m_gpg): """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = { @@ -524,9 +541,9 @@ def test_apt_v3_src_longkeyid_ks_real(self): } } - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, m_gpg) - def test_apt_v3_src_keyid_keyserver(self): + def test_apt_v3_src_keyid_keyserver(self, m_gpg): """test_apt_v3_src_keyid_keyserver - Test custom keyserver""" keyid = "03683F77" params = self._get_default_params() @@ -540,30 +557,27 @@ def test_apt_v3_src_keyid_keyserver(self): # in some test environments only *.ubuntu.com is reachable # so mock the call and check if the config got there - with mock.patch.object( - gpg, "getkeybyid", return_value="fakekey" - ) as mockgetkey: - with mock.patch.object( - cc_apt_configure, "add_apt_key_raw" - ) as mockadd: - self._add_apt_sources( - cfg, - cloud=None, - template_params=params, - aa_repo_match=self.matcher, - ) + with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockadd: + self._add_apt_sources( + cfg, + cloud=mock.Mock(), + gpg=m_gpg, + template_params=params, + aa_repo_match=self.matcher, + ) - mockgetkey.assert_called_with("03683F77", "test.random.com") + m_gpg.getkeybyid.assert_called_with("03683F77", "test.random.com") mockadd.assert_called_with( - "fakekey", + "", self.aptlistfile, + m_gpg, hardened=False, ) # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_ppa(self): + def test_apt_v3_src_ppa(self, m_gpg): """test_apt_v3_src_ppa - Test specification of a ppa""" params = self._get_default_params() cfg = {self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"}} @@ -571,7 +585,8 @@ def test_apt_v3_src_ppa(self): with mock.patch("cloudinit.subp.subp") as mockobj: self._add_apt_sources( cfg, - cloud=None, + cloud=mock.Mock(), + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -588,7 +603,7 @@ def test_apt_v3_src_ppa(self): os.path.isfile(self.aptlistfile) is False ), f"Unexpected file found {self.aptlistfile}" - def test_apt_v3_src_ppa_tri(self): + def test_apt_v3_src_ppa_tri(self, m_gpg): """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's""" params = self._get_default_params() cfg = { @@ -600,7 +615,8 @@ def test_apt_v3_src_ppa_tri(self): with mock.patch("cloudinit.subp.subp") as mockobj: self._add_apt_sources( cfg, - cloud=None, + cloud=mock.Mock(), + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -1202,7 +1218,7 @@ def test_apt_v3_mirror_search_dns(self, m_get_hostname): assert mirrors["PRIMARY"] == pmir assert mirrors["SECURITY"] == smir - def test_apt_v3_add_mirror_keys(self, tmpdir): + def test_apt_v3_add_mirror_keys(self, tmpdir, m_gpg): """test_apt_v3_add_mirror_keys - Test adding key for mirrors""" arch = "amd64" cfg = { @@ -1225,10 +1241,10 @@ def test_apt_v3_add_mirror_keys(self, tmpdir): } with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockadd: - cc_apt_configure.add_mirror_keys(cfg, None) + cc_apt_configure.add_mirror_keys(cfg, None, gpg) calls = [ - mock.call("fakekey_primary", "primary", hardened=False), - mock.call("fakekey_security", "security", hardened=False), + mock.call("fakekey_primary", "primary", gpg, hardened=False), + mock.call("fakekey_security", "security", gpg, hardened=False), ] mockadd.assert_has_calls(calls, any_order=True) diff --git a/tests/unittests/config/test_cc_ansible.py b/tests/unittests/config/test_cc_ansible.py index 685dbd70f..271d9d037 100644 --- a/tests/unittests/config/test_cc_ansible.py +++ b/tests/unittests/config/test_cc_ansible.py @@ -287,8 +287,8 @@ def test_filter_args(self): ), ) def test_required_keys(self, cfg, exception, mocker): - mocker.patch(M_PATH + "subp", return_value=("", "")) - mocker.patch(M_PATH + "which", return_value=True) + mocker.patch(M_PATH + "subp.subp", return_value=("", "")) + mocker.patch(M_PATH + "subp.which", return_value=True) mocker.patch(M_PATH + "AnsiblePull.check_deps") mocker.patch( M_PATH + "AnsiblePull.get_version", @@ -319,28 +319,30 @@ def test_required_keys(self, cfg, exception, mocker): ["python3-pip"] ) - @mock.patch(M_PATH + "which", return_value=False) + @mock.patch(M_PATH + "subp.which", return_value=False) def test_deps_not_installed(self, m_which): """assert exception raised if package not installed""" with raises(ValueError): cc_ansible.AnsiblePullDistro(get_cloud().distro).check_deps() - @mock.patch(M_PATH + "which", return_value=True) + @mock.patch(M_PATH + "subp.which", return_value=True) def test_deps(self, m_which): """assert exception not raised if package installed""" cc_ansible.AnsiblePullDistro(get_cloud().distro).check_deps() - @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr")) - @mock.patch(M_PATH + "which", return_value=False) + @mock.patch(M_PATH + "subp.subp", return_value=("stdout", "stderr")) + @mock.patch(M_PATH + "subp.which", return_value=False) def test_pip_bootstrap(self, m_which, m_subp): distro = get_cloud(mocked_distro=True).distro with mock.patch("builtins.__import__", side_effect=ImportError): cc_ansible.AnsiblePullPip(distro, "ansible").install("") distro.install_packages.assert_called_once() - @mock.patch(M_PATH + "which", return_value=True) - @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr")) - @mock.patch("cloudinit.distros.subp", return_value=("stdout", "stderr")) + @mock.patch(M_PATH + "subp.which", return_value=True) + @mock.patch(M_PATH + "subp.subp", return_value=("stdout", "stderr")) + @mock.patch( + "cloudinit.distros.subp.subp", return_value=("stdout", "stderr") + ) @mark.parametrize( ("cfg", "expected"), ( @@ -406,7 +408,8 @@ def test_do_not_run(self, m_validate): assert not m_validate.called @mock.patch( - "cloudinit.config.cc_ansible.subp", side_effect=[(distro_version, "")] + "cloudinit.config.cc_ansible.subp.subp", + side_effect=[(distro_version, "")], ) def test_parse_version_distro(self, m_subp): """Verify that the expected version is returned""" @@ -424,8 +427,8 @@ def test_parse_version_pip(self, m_subp): expected = util.Version(2, 13, 2) assert received == expected - @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr")) - @mock.patch(M_PATH + "which", return_value=True) + @mock.patch(M_PATH + "subp.subp", return_value=("stdout", "stderr")) + @mock.patch(M_PATH + "subp.which", return_value=True) def test_ansible_env_var(self, m_which, m_subp): cc_ansible.handle("", CFG_FULL_PULL, get_cloud(), []) diff --git a/tests/unittests/config/test_cc_apk_configure.py b/tests/unittests/config/test_cc_apk_configure.py index 47777b470..6d09c5738 100644 --- a/tests/unittests/config/test_cc_apk_configure.py +++ b/tests/unittests/config/test_cc_apk_configure.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -""" test_apk_configure +"""test_apk_configure Test creation of repositories file """ @@ -10,7 +10,7 @@ import pytest -from cloudinit import cloud, helpers, temp_utils, util +from cloudinit import cloud, helpers, util from cloudinit.config import cc_apk_configure from cloudinit.config.schema import ( SchemaValidationError, @@ -51,7 +51,7 @@ def test_no_config(self): class TestConfig(FilesystemMockingTestCase): def setUp(self): - super(TestConfig, self).setUp() + super().setUp() self.new_root = self.tmp_dir() self.new_root = self.reRoot(root=self.new_root) for dirname in ["tmp", "etc/apk"]: @@ -60,11 +60,14 @@ def setUp(self): self.name = "apk_configure" self.cloud = cloud.Cloud(None, self.paths, None, None, None) self.args = [] - temp_utils._TMPDIR = self.new_root + self.mock = mock.patch( + "cloudinit.temp_utils.get_tmp_ancestor", lambda *_: self.new_root + ) + self.mock.start() def tearDown(self): + self.mock.stop() super().tearDown() - temp_utils._TMPDIR = None @mock.patch(CC_APK + "._write_repositories_file") def test_no_repo_settings(self, m_write_repos): diff --git a/tests/unittests/config/test_cc_apt_configure.py b/tests/unittests/config/test_cc_apt_configure.py index a75acd3cd..7b4ce0128 100644 --- a/tests/unittests/config/test_cc_apt_configure.py +++ b/tests/unittests/config/test_cc_apt_configure.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -""" Tests for cc_apt_configure module """ +"""Tests for cc_apt_configure module""" import re from pathlib import Path @@ -298,6 +298,7 @@ class TestAptConfigure: ), ) @mock.patch(M_PATH + "get_apt_cfg") + @mock.patch.object(features, "APT_DEB822_SOURCE_LIST_FILE", True) def test_remove_source( self, m_get_apt_cfg, @@ -312,7 +313,6 @@ def test_remove_source( "sourceparts": f"{tmpdir}/etc/apt/sources.list.d/", } cloud = get_cloud(distro_name) - features.APT_DEB822_SOURCE_LIST_FILE = True sources_file = tmpdir.join("/etc/apt/sources.list") deb822_sources_file = tmpdir.join( f"/etc/apt/sources.list.d/{distro_name}.sources" diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py index 46b2a6352..7013a95db 100644 --- a/tests/unittests/config/test_cc_ca_certs.py +++ b/tests/unittests/config/test_cc_ca_certs.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import re import shutil import tempfile @@ -379,6 +380,7 @@ def test_non_existent_cert_cfg(self): cc_ca_certs.disable_default_ca_certs(distro_name, conf) +@pytest.mark.usefixtures("clear_deprecation_log") class TestCACertsSchema: """Directly test schema rather than through handle.""" diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py index 368706aae..734f5e431 100644 --- a/tests/unittests/config/test_cc_disk_setup.py +++ b/tests/unittests/config/test_cc_disk_setup.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import random import tempfile @@ -14,15 +15,13 @@ from tests.unittests.helpers import ( CiTestCase, ExitStack, - TestCase, mock, skipUnlessJsonSchema, ) -class TestIsDiskUsed(TestCase): - def setUp(self): - super(TestIsDiskUsed, self).setUp() +class TestIsDiskUsed: + def setup_method(self): self.patches = ExitStack() mod_name = "cloudinit.config.cc_disk_setup" self.enumerate_disk = self.patches.enter_context( @@ -32,14 +31,13 @@ def setUp(self): mock.patch("{0}.check_fs".format(mod_name)) ) - def tearDown(self): - super().tearDown() + def teardown_method(self): self.patches.close() def test_multiple_child_nodes_returns_true(self): self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2)) self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) - self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) + assert cc_disk_setup.is_disk_used(mock.MagicMock()) def test_valid_filesystem_returns_true(self): self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) @@ -48,44 +46,29 @@ def test_valid_filesystem_returns_true(self): "ext4", mock.MagicMock(), ) - self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) + assert cc_disk_setup.is_disk_used(mock.MagicMock()) def test_one_child_nodes_and_no_fs_returns_false(self): self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) - self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock())) + assert not cc_disk_setup.is_disk_used(mock.MagicMock()) -class TestGetMbrHddSize(TestCase): - def setUp(self): - super(TestGetMbrHddSize, self).setUp() - self.patches = ExitStack() - self.subp = self.patches.enter_context( - mock.patch.object(cc_disk_setup.subp, "subp") - ) - - def tearDown(self): - super().tearDown() - self.patches.close() +class TestGetMbrHddSize: + def _test_for_sector_size(self, sector_size): + size_in_bytes = random.randint(10000, 10000000) * 512 + size_in_sectors = size_in_bytes / sector_size - def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes): def _subp(cmd, *args, **kwargs): - self.assertEqual(3, len(cmd)) + assert 3 == len(cmd) if "--getsize64" in cmd: - return hdd_size_in_bytes, None + return size_in_bytes, None elif "--getss" in cmd: - return sector_size_in_bytes, None + return sector_size, None raise RuntimeError("Unexpected blockdev command called") - self.subp.side_effect = _subp - - def _test_for_sector_size(self, sector_size): - size_in_bytes = random.randint(10000, 10000000) * 512 - size_in_sectors = size_in_bytes / sector_size - self._configure_subp_mock(size_in_bytes, sector_size) - self.assertEqual( - size_in_sectors, cc_disk_setup.get_hdd_size("/dev/sda1") - ) + with mock.patch.object(cc_disk_setup.subp, "subp", _subp): + assert size_in_sectors == cc_disk_setup.get_hdd_size("/dev/sda1") def test_size_for_512_byte_sectors(self): self._test_for_sector_size(512) @@ -100,36 +83,32 @@ def test_size_for_4096_byte_sectors(self): self._test_for_sector_size(4096) -class TestGetPartitionMbrLayout(TestCase): +class TestGetPartitionMbrLayout: def test_single_partition_using_boolean(self): - self.assertEqual( - ",,83", cc_disk_setup.get_partition_mbr_layout(1000, True) - ) + assert ",,83" == cc_disk_setup.get_partition_mbr_layout(1000, True) def test_single_partition_using_list(self): disk_size = random.randint(1000000, 1000000000000) - self.assertEqual( - ",,83", cc_disk_setup.get_partition_mbr_layout(disk_size, [100]) + assert ",,83" == cc_disk_setup.get_partition_mbr_layout( + disk_size, [100] ) def test_half_and_half(self): disk_size = random.randint(1000000, 1000000000000) expected_partition_size = int(float(disk_size) / 2) - self.assertEqual( - ",{0},83\n,,83".format(expected_partition_size), - cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50]), - ) + assert ",{0},83\n,,83".format( + expected_partition_size + ) == cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50]) def test_thirds_with_different_partition_type(self): disk_size = random.randint(1000000, 1000000000000) expected_partition_size = int(float(disk_size) * 0.33) - self.assertEqual( - ",{0},83\n,,82".format(expected_partition_size), - cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]]), - ) + assert ",{0},83\n,,82".format( + expected_partition_size + ) == cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]]) -class TestUpdateFsSetupDevices(TestCase): +class TestUpdateFsSetupDevices: def test_regression_1634678(self): # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678 fs_setup = { @@ -144,17 +123,14 @@ def test_regression_1634678(self): [fs_setup], lambda device: device ) - self.assertEqual( - { - "_origname": "/dev/xvdb1", - "partition": "auto", - "device": "/dev/xvdb1", - "overwrite": False, - "label": "test", - "filesystem": "ext4", - }, - fs_setup, - ) + assert { + "_origname": "/dev/xvdb1", + "partition": "auto", + "device": "/dev/xvdb1", + "overwrite": False, + "label": "test", + "filesystem": "ext4", + } == fs_setup def test_dotted_devname(self): fs_setup = { @@ -168,17 +144,14 @@ def test_dotted_devname(self): [fs_setup], lambda device: device ) - self.assertEqual( - { - "_origname": "ephemeral0.0", - "_partition": "auto", - "partition": "0", - "device": "ephemeral0", - "label": "test2", - "filesystem": "xfs", - }, - fs_setup, - ) + assert { + "_origname": "ephemeral0.0", + "_partition": "auto", + "partition": "0", + "device": "ephemeral0", + "label": "test2", + "filesystem": "xfs", + } == fs_setup def test_dotted_devname_populates_partition(self): fs_setup = { @@ -189,19 +162,16 @@ def test_dotted_devname_populates_partition(self): cc_disk_setup.update_fs_setup_devices( [fs_setup], lambda device: device ) - self.assertEqual( - { - "_origname": "ephemeral0.1", - "device": "ephemeral0", - "partition": "1", - "label": "test2", - "filesystem": "xfs", - }, - fs_setup, - ) + assert { + "_origname": "ephemeral0.1", + "device": "ephemeral0", + "partition": "1", + "label": "test2", + "filesystem": "xfs", + } == fs_setup -class TestPurgeDisk(TestCase): +class TestPurgeDisk: @mock.patch( "cloudinit.config.cc_disk_setup.read_parttbl", return_value=None ) @@ -215,7 +185,7 @@ def test_purge_disk_ptable(self, *args): expected = b"\0" * (1024 * 1024) - self.assertEqual(expected, actual) + assert expected == actual @mock.patch( @@ -245,17 +215,15 @@ def test_with_cmd(self, subp, *args): } ) - self.assertIn( + assert ( "extra_opts " "ignored because cmd was specified: mkfs -t ext4 -L with_cmd " - "/dev/xdb1", - self.logs.getvalue(), + "/dev/xdb1" in self.logs.getvalue() ) - self.assertIn( + assert ( "overwrite " "ignored because cmd was specified: mkfs -t ext4 -L with_cmd " - "/dev/xdb1", - self.logs.getvalue(), + "/dev/xdb1" in self.logs.getvalue() ) subp.assert_called_once_with( @@ -304,10 +272,10 @@ def test_mkswap(self, m_which, subp, *args): } ) - self.assertEqual( - [mock.call("mkfs.swap"), mock.call("mkswap")], - m_which.call_args_list, - ) + assert [ + mock.call("mkfs.swap"), + mock.call("mkswap"), + ] == m_which.call_args_list subp.assert_called_once_with( ["/sbin/mkswap", "-L", "swap", "-f", "/dev/xdb1"], shell=False ) diff --git a/tests/unittests/config/test_cc_final_message.py b/tests/unittests/config/test_cc_final_message.py index 191915d3b..e612e3dad 100644 --- a/tests/unittests/config/test_cc_final_message.py +++ b/tests/unittests/config/test_cc_final_message.py @@ -1,9 +1,11 @@ # This file is part of cloud-init. See LICENSE file for license information. -from unittest import mock +from logging import DEBUG, WARNING +from pathlib import Path import pytest from cloudinit.config.cc_final_message import handle +from tests.unittests.util import get_cloud class TestHandle: @@ -24,17 +26,15 @@ def test_boot_finished_written( file_is_written, expected_log_substring, caplog, + paths, tmpdir, ): - instance_dir = tmpdir.join("var/lib/cloud/instance") + instance_dir = Path(paths.get_ipath_cur()) if instance_dir_exists: - instance_dir.ensure_dir() - boot_finished = instance_dir.join("boot-finished") - - m_cloud = mock.Mock( - paths=mock.Mock(boot_finished=boot_finished.strpath) - ) + instance_dir.mkdir() + boot_finished = instance_dir / "boot-finished" + m_cloud = get_cloud(paths=paths) handle(None, {}, m_cloud, []) # We should not change the status of the instance directory @@ -43,3 +43,36 @@ def test_boot_finished_written( if expected_log_substring: assert expected_log_substring in caplog.text + + @pytest.mark.parametrize( + "dsname,datasource_list,expected_log,log_level", + [ + ("None", ["None"], "Used fallback datasource", DEBUG), + ("None", ["LXD", "None"], "Used fallback datasource", WARNING), + ("LXD", ["LXD", "None"], None, DEBUG), + ], + ) + def test_only_warn_when_datasourcenone_is_fallback_in_datasource_list( + self, + dsname, + datasource_list, + expected_log, + log_level, + caplog, + paths, + ): + """Only warn when None is a fallback in multi-item datasource_list. + + It is not a warning when datasource_list: [ None ] is configured. + """ + m_cloud = get_cloud(paths=paths) + m_cloud.datasource.dsname = dsname + Path(paths.get_ipath_cur()).mkdir() + with caplog.at_level(log_level): + handle(None, {}, m_cloud, []) + + # We should not change the status of the instance directory + if expected_log: + assert expected_log in caplog.text + else: + assert "Used fallback datasource" not in caplog.text diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py index 3913542d0..2be728b15 100644 --- a/tests/unittests/config/test_cc_growpart.py +++ b/tests/unittests/config/test_cc_growpart.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import errno import logging @@ -13,19 +14,21 @@ import pytest -from cloudinit import cloud, subp, temp_utils +from cloudinit import cloud, distros, subp, temp_utils from cloudinit.config import cc_growpart from cloudinit.config.schema import ( SchemaValidationError, get_schema, validate_cloudconfig_schema, ) +from cloudinit.distros.bsd import BSD from cloudinit.subp import SubpResult from tests.unittests.helpers import ( TestCase, does_not_raise, skipUnlessJsonSchema, ) +from tests.unittests.util import MockDistro # growpart: # mode: auto # off, on, auto, 'growpart' @@ -212,7 +215,7 @@ def test_force_lang_check_tempfile(self, *args, **kwargs): diskdev = "/dev/sdb" partnum = 1 partdev = "/dev/sdb" - ret.resize(diskdev, partnum, partdev) + ret.resize(diskdev, partnum, partdev, None) mockobj.assert_has_calls( [ mock.call( @@ -316,15 +319,14 @@ def test_handle_with_no_growpart_entry(self): factory.assert_called_once_with( "auto", distro=self.distro, devices=["/"] ) - rsdevs.assert_called_once_with(myresizer, ["/"], self.distro.name) + rsdevs.assert_called_once_with(myresizer, ["/"], self.distro) class TestResize(unittest.TestCase): def setUp(self): - super(TestResize, self).setUp() + super().setUp() self.name = "growpart" - self.distro = mock.Mock() - self.distro.name = "ubuntu" + self.distro = MockDistro() self.log = logging.getLogger("TestResize") def test_simple_devices(self): @@ -349,8 +351,8 @@ def test_simple_devices(self): resize_calls = [] class myresizer: - def resize(self, diskdev, partnum, partdev): - resize_calls.append((diskdev, partnum, partdev)) + def resize(self, diskdev, partnum, partdev, fs): + resize_calls.append((diskdev, partnum, partdev, fs)) if partdev == "/dev/YYda2": return (1024, 2048) return (1024, 1024) # old size, new size @@ -364,13 +366,13 @@ def mystat(path): raise e return real_stat(path) - opinfo = cc_growpart.device_part_info + opinfo = self.distro.device_part_info try: - cc_growpart.device_part_info = simple_device_part_info + self.distro.device_part_info = simple_device_part_info os.stat = mystat resized = cc_growpart.resize_devices( - myresizer(), devs + enoent, self.distro.name + myresizer(), devs + enoent, self.distro ) def find(name, res): @@ -388,15 +390,81 @@ def find(name, res): self.assertEqual( cc_growpart.RESIZE.SKIPPED, find(enoent[0], resized)[1] ) - # self.assertEqual(resize_calls, - # [("/dev/XXda", "1", "/dev/XXda1"), - # ("/dev/YYda", "2", "/dev/YYda2")]) finally: - cc_growpart.device_part_info = opinfo + self.distro.device_part_info = opinfo os.stat = real_stat +class TestResizeZFS: + def _devent2dev_side_effect(self, value): + if value.startswith("zroot"): + return value, "zfs" + raise RuntimeError(f"unexpected value {value}") + + def _subp_side_effect(self, value, **kwargs): + if value[0] == "growpart": + raise subp.ProcessExecutionError() + elif value[0] == "zpool": + return ("1024\n", "") + raise subp.ProcessExecutionError() + + @pytest.fixture + def common_mocks(self, mocker): + # These are all "happy path" mocks which will get overridden + # when needed + mocker.patch( + "cloudinit.config.cc_growpart.devent2dev", + side_effect=self._devent2dev_side_effect, + ) + mocker.patch("cloudinit.util.is_container", return_value=False) + # Find /etc/rc.d/growfs + mocker.patch("os.path.isfile", return_value=True) + mocker.patch( + "cloudinit.config.cc_growpart.subp.subp", + side_effect=self._subp_side_effect, + ) + cls = distros.fetch("freebsd") + # patch ifconfig -a + mocker.patch( + "cloudinit.distros.networking.subp.subp", return_value=("", None) + ) + self.distro = cls("freebsd", {}, None) + + @pytest.mark.parametrize( + "dev, expected", + [ + ("zroot/ROOT/changed", cc_growpart.RESIZE.CHANGED), + ("zroot/ROOT/nochange", cc_growpart.RESIZE.NOCHANGE), + ], + ) + def test_zroot(self, dev, expected, common_mocks): + resize_calls = [] + + class MyResizer(cc_growpart.ResizeGrowFS): + def resize(self, diskdev, partnum, partdev, fs): + resize_calls.append((diskdev, partnum, partdev, fs)) + if partdev == "zroot/ROOT/changed": + return (1024, 2048) + return (1024, 1024) # old size, new size + + def get_status_from_device(device_name, resize_results): + for result in resize_results: + if result[0] == device_name: + return result[1] + raise ValueError( + f"Device {device_name} not found in {resize_results}" + ) + + resized = cc_growpart.resize_devices( + resizer=MyResizer(distro=self.distro), + devices=[dev], + distro=self.distro, + ) + assert expected == get_status_from_device(dev, resized) + + class TestGetSize: + # TODO: add tests for get_zfs_size() @pytest.mark.parametrize( "file_exists, expected", ( @@ -409,7 +477,7 @@ def test_get_size_behaves(self, file_exists, expected, tmp_path): tmp_file = tmp_path / "tmp.txt" if file_exists: tmp_file.write_bytes(b"0") - assert expected == cc_growpart.get_size(tmp_file) + assert expected == cc_growpart.get_size(tmp_file, None) class TestEncrypted: @@ -438,11 +506,13 @@ def _device_part_info_side_effect(self, value): def _devent2dev_side_effect(self, value): if value == "/fake_encrypted": - return "/dev/mapper/fake" + return "/dev/mapper/fake", "ext3" elif value == "/": - return "/dev/vdz" + return "/dev/vdz", "ext4" + elif value.startswith("zroot"): + return value, "zfs" elif value.startswith("/dev"): - return value + return value, None raise RuntimeError(f"unexpected value {value}") def _realpath_side_effect(self, value): @@ -468,10 +538,9 @@ def assert_no_resize_or_cleanup(self): def common_mocks(self, mocker): # These are all "happy path" mocks which will get overridden # when needed - mocker.patch( - "cloudinit.config.cc_growpart.device_part_info", - side_effect=self._device_part_info_side_effect, - ) + self.distro = MockDistro + original_device_part_info = self.distro.device_part_info + self.distro.device_part_info = self._device_part_info_side_effect mocker.patch("os.stat") mocker.patch("stat.S_ISBLK") mocker.patch("stat.S_ISCHR") @@ -502,15 +571,14 @@ def common_mocks(self, mocker): mocker.patch("pathlib.Path.exists", return_value=True) self.m_unlink = mocker.patch("pathlib.Path.unlink", autospec=True) - self.distro = mock.Mock() - self.distro.name = "ubuntu" - self.resizer = mock.Mock() self.resizer.resize = mock.Mock(return_value=(1024, 1024)) + yield + self.distro.device_part_info = original_device_part_info def test_resize_when_encrypted(self, common_mocks, caplog): info = cc_growpart.resize_devices( - self.resizer, ["/fake_encrypted"], self.distro.name + self.resizer, ["/fake_encrypted"], self.distro ) assert len(info) == 2 assert info[0][0] == "/dev/vdx1" @@ -529,9 +597,7 @@ def test_resize_when_encrypted(self, common_mocks, caplog): self.assert_resize_and_cleanup() def test_resize_when_unencrypted(self, common_mocks): - info = cc_growpart.resize_devices( - self.resizer, ["/"], self.distro.name - ) + info = cc_growpart.resize_devices(self.resizer, ["/"], self.distro) assert len(info) == 1 assert info[0][0] == "/" assert "encrypted" not in info[0][2] @@ -545,7 +611,7 @@ def test_encrypted_but_cryptsetup_not_found( return_value=None, ) info = cc_growpart.resize_devices( - self.resizer, ["/fake_encrypted"], self.distro.name + self.resizer, ["/fake_encrypted"], self.distro ) assert len(info) == 1 @@ -563,7 +629,7 @@ def _subp_side_effect(value, **kwargs): side_effect=_subp_side_effect, ) info = cc_growpart.resize_devices( - self.resizer, ["/fake_encrypted"], self.distro.name + self.resizer, ["/fake_encrypted"], self.distro ) assert len(info) == 1 assert info[0][0] == "/fake_encrypted" @@ -584,7 +650,7 @@ def _subp_side_effect(value, **kwargs): side_effect=_subp_side_effect, ) info = cc_growpart.resize_devices( - self.resizer, ["/fake_encrypted"], self.distro.name + self.resizer, ["/fake_encrypted"], self.distro ) assert len(info) == 1 assert info[0][0] == "/fake_encrypted" @@ -599,7 +665,7 @@ def test_missing_keydata(self, common_mocks, mocker, caplog): # on a system with an encrypted root partition mocker.patch("pathlib.Path.open", side_effect=FileNotFoundError()) info = cc_growpart.resize_devices( - self.resizer, ["/fake_encrypted"], self.distro.name + self.resizer, ["/fake_encrypted"], self.distro ) assert len(info) == 2 assert info[0][0] == "/dev/vdx1" @@ -628,7 +694,7 @@ def _subp_side_effect(value, **kwargs): ) info = cc_growpart.resize_devices( - self.resizer, ["/fake_encrypted"], self.distro.name + self.resizer, ["/fake_encrypted"], self.distro ) assert len(info) == 2 assert info[0][0] == "/dev/vdx1" @@ -648,7 +714,7 @@ def _subp_side_effect(value, **kwargs): def test_resize_skipped(self, common_mocks, mocker, caplog): mocker.patch("pathlib.Path.exists", return_value=False) info = cc_growpart.resize_devices( - self.resizer, ["/fake_encrypted"], self.distro.name + self.resizer, ["/fake_encrypted"], self.distro ) assert len(info) == 2 assert info[1] == ( @@ -672,31 +738,31 @@ def __init__(self, **kwds): class TestDevicePartInfo: @pytest.mark.parametrize( - "devpath, is_BSD, expected, raised_exception", + "devpath, expected, raised_exception", ( pytest.param( "/dev/vtbd0p2", - True, ("/dev/vtbd0", "2"), does_not_raise(), id="gpt_partition", ), pytest.param( "/dev/vbd0s3a", - True, ("/dev/vbd0", "3a"), does_not_raise(), id="bsd_mbr_slice_and_partition", ), + pytest.param( + "zroot/ROOТ/default", + (), + pytest.raises(ValueError), + id="zfs_dataset", + ), ), ) - @mock.patch("cloudinit.util.is_BSD") - def test_device_part_info( - self, m_is_BSD, is_BSD, devpath, expected, raised_exception - ): - m_is_BSD.return_value = is_BSD + def test_device_part_info(self, devpath, expected, raised_exception): with raised_exception: - assert expected == cc_growpart.device_part_info(devpath) + assert expected == BSD.device_part_info(devpath) class TestGrowpartSchema: @@ -712,7 +778,8 @@ class TestGrowpartSchema: "Cloud config schema deprecations: " "growpart.mode: Changed in version 22.3. " "Specifying a boolean ``false`` value for " - "``mode`` is deprecated. Use ``off`` instead." + "``mode`` is deprecated. Use the string ``'off'`` " + "instead." ), ), ), diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py index b4bd48df1..36ef7fd98 100644 --- a/tests/unittests/config/test_cc_grub_dpkg.py +++ b/tests/unittests/config/test_cc_grub_dpkg.py @@ -300,8 +300,8 @@ class TestGrubDpkgSchema: pytest.raises( SchemaValidationError, match=( - "Cloud config schema deprecations: grub-dpkg: An alias" - " for ``grub_dpkg`` Deprecated in version 22.2. Use " + "Cloud config schema deprecations: grub-dpkg:" + " Deprecated in version 22.2. Use " "``grub_dpkg`` instead." ), ), diff --git a/tests/unittests/config/test_cc_landscape.py b/tests/unittests/config/test_cc_landscape.py index 03a31e023..7ddbe77f3 100644 --- a/tests/unittests/config/test_cc_landscape.py +++ b/tests/unittests/config/test_cc_landscape.py @@ -3,6 +3,7 @@ import pytest +from cloudinit import subp from cloudinit.config import cc_landscape from cloudinit.config.schema import ( SchemaValidationError, @@ -14,8 +15,10 @@ LOG = logging.getLogger(__name__) +MPATH = "cloudinit.config.cc_landscape" -@mock.patch("cloudinit.config.cc_landscape.subp.subp") + +@mock.patch(f"{MPATH}.subp.subp") class TestLandscape: def test_skip_empty_landscape_cloudconfig(self, m_subp): """Empty landscape cloud-config section does no work.""" @@ -59,6 +62,9 @@ def test_handler_restarts_landscape_client(self, m_subp, tmpdir): ["landscape-client"] ) assert [ + mock.call( + ["landscape-config", "--silent", "--is-registered"], rcs=[5] + ), mock.call( [ "landscape-config", @@ -91,6 +97,9 @@ def test_handler_installs_client_from_ppa_and_supports_overrides( } } expected_calls = [ + mock.call( + ["landscape-config", "--silent", "--is-registered"], rcs=[5] + ), mock.call( [ "landscape-config", @@ -122,7 +131,6 @@ def test_handler_installs_client_from_ppa_and_supports_overrides( ["landscape-client"] ) assert expected_calls == m_subp.call_args_list - assert "RUN=1\n" == default_fn.read() def test_handler_writes_merged_client_config_file_with_defaults( self, m_subp, tmpdir @@ -136,6 +144,9 @@ def test_handler_writes_merged_client_config_file_with_defaults( mycloud.distro = mock.MagicMock() cfg = {"landscape": {"client": {}}} expected_calls = [ + mock.call( + ["landscape-config", "--silent", "--is-registered"], rcs=[5] + ), mock.call( [ "landscape-config", @@ -179,6 +190,9 @@ def test_handler_writes_merged_provided_cloudconfig_with_defaults( mycloud.distro = mock.MagicMock() cfg = {"landscape": {"client": {"computer_title": 'My" PC'}}} expected_calls = [ + mock.call( + ["landscape-config", "--silent", "--is-registered"], rcs=[5] + ), mock.call( [ "landscape-config", @@ -210,6 +224,37 @@ def test_handler_writes_merged_provided_cloudconfig_with_defaults( ) assert expected_calls == m_subp.call_args_list + @mock.patch(f"{MPATH}.merge_together") + def test_handler_client_failed_registering(self, m_merge_together, m_subp): + """landscape-client could not be registered""" + mycloud = get_cloud("ubuntu") + mycloud.distro = mock.MagicMock() + cfg = {"landscape": {"client": {"computer_title": 'My" PC'}}} + m_subp.side_effect = subp.ProcessExecutionError( + "Could not register client" + ) + match = ( + "Failure registering client:\nUnexpected error while" + " running command.\nCommand: -\nExit code: -\nReason: -\n" + "Stdout: Could not register client\nStderr: -" + ) + with pytest.raises(RuntimeError, match=match): + cc_landscape.handle("notimportant", cfg, mycloud, None) + + @mock.patch(f"{MPATH}.merge_together") + def test_handler_client_is_already_registered( + self, m_merge_together, m_subp, caplog + ): + """landscape-client is already registered""" + mycloud = get_cloud("ubuntu") + mycloud.distro = mock.MagicMock() + cfg = {"landscape": {"client": {"computer_title": 'My" PC'}}} + m_subp.side_effect = subp.ProcessExecutionError( + "Client already registered to Landscape", exit_code=0 + ) + cc_landscape.handle("notimportant", cfg, mycloud, None) + assert "Client already registered to Landscape" in caplog.text + class TestLandscapeSchema: @pytest.mark.parametrize( diff --git a/tests/unittests/config/test_cc_ntp.py b/tests/unittests/config/test_cc_ntp.py index 74ccf2de4..ead6f5213 100644 --- a/tests/unittests/config/test_cc_ntp.py +++ b/tests/unittests/config/test_cc_ntp.py @@ -138,16 +138,14 @@ def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): servers = [] pools = ["10.0.0.1", "10.0.0.2"] (confpath, template_fn) = self._generate_template() - mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR" - with mock.patch(mock_path, self.new_root): - cc_ntp.write_ntp_config_template( - "ubuntu", - servers=servers, - pools=pools, - path=confpath, - template_fn=template_fn, - template=None, - ) + cc_ntp.write_ntp_config_template( + "ubuntu", + servers=servers, + pools=pools, + path=confpath, + template_fn=template_fn, + template=None, + ) self.assertEqual( "servers []\npools ['10.0.0.1', '10.0.0.2']\n", util.load_text_file(confpath), @@ -163,16 +161,14 @@ def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): pools = cc_ntp.generate_server_names(distro) servers = [] (confpath, template_fn) = self._generate_template() - mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR" - with mock.patch(mock_path, self.new_root): - cc_ntp.write_ntp_config_template( - distro, - servers=servers, - pools=pools, - path=confpath, - template_fn=template_fn, - template=None, - ) + cc_ntp.write_ntp_config_template( + distro, + servers=servers, + pools=pools, + path=confpath, + template_fn=template_fn, + template=None, + ) self.assertEqual( "servers []\npools {0}\n".format(pools), util.load_text_file(confpath), @@ -249,6 +245,7 @@ def test_distro_ntp_client_configs(self): ) def _get_expected_pools(self, pools, distro, client): + expected_pools = None if client in ["ntp", "chrony"]: if client == "ntp" and distro == "alpine": # NTP for Alpine Linux is Busybox's ntp which does not @@ -264,6 +261,7 @@ def _get_expected_pools(self, pools, distro, client): return expected_pools def _get_expected_servers(self, servers, distro, client): + expected_servers = None if client in ["ntp", "chrony"]: if client == "ntp" and distro == "alpine": # NTP for Alpine Linux is Busybox's ntp which only supports @@ -670,8 +668,8 @@ def test_ntp_user_provided_config_with_template(self, m_install): } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) - mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR" - with mock.patch(mock_path, self.new_root): + mock_path = "cloudinit.config.cc_ntp.temp_utils.get_tmp_ancestor" + with mock.patch(mock_path, lambda *_: self.new_root): cc_ntp.handle("notimportant", cfg, mycloud, None) self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, @@ -710,8 +708,8 @@ def test_ntp_user_provided_config_template_only( ) confpath = ntpconfig["confpath"] m_select.return_value = ntpconfig - mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR" - with mock.patch(mock_path, self.new_root): + mock_path = "cloudinit.config.cc_ntp.temp_utils.get_tmp_ancestor" + with mock.patch(mock_path, lambda *_: self.new_root): cc_ntp.handle("notimportant", {"ntp": cfg}, mycloud, None) self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, diff --git a/tests/unittests/config/test_cc_package_update_upgrade_install.py b/tests/unittests/config/test_cc_package_update_upgrade_install.py index 08db05a03..ad3651ad7 100644 --- a/tests/unittests/config/test_cc_package_update_upgrade_install.py +++ b/tests/unittests/config/test_cc_package_update_upgrade_install.py @@ -300,16 +300,16 @@ class TestPackageUpdateUpgradeSchema: ( {"apt_update": False}, ( - "Cloud config schema deprecations: apt_update: " - "Default: ``false``. Deprecated in version 22.2. " + "Cloud config schema deprecations: apt_update: " + "Deprecated in version 22.2. " "Use ``package_update`` instead." ), ), ( {"apt_upgrade": False}, ( - "Cloud config schema deprecations: apt_upgrade: " - "Default: ``false``. Deprecated in version 22.2. " + "Cloud config schema deprecations: apt_upgrade: " + "Deprecated in version 22.2. " "Use ``package_upgrade`` instead." ), ), @@ -317,8 +317,7 @@ class TestPackageUpdateUpgradeSchema: {"apt_reboot_if_required": False}, ( "Cloud config schema deprecations: " - "apt_reboot_if_required: Default: ``false``. " - "Deprecated in version 22.2. Use " + "apt_reboot_if_required: Deprecated in version 22.2. Use " "``package_reboot_if_required`` instead." ), ), diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py index f6443af90..36daf9f89 100644 --- a/tests/unittests/config/test_cc_resizefs.py +++ b/tests/unittests/config/test_cc_resizefs.py @@ -113,10 +113,10 @@ def test_handle_warns_on_unknown_mount_info(self, m_log, m_get_mount_info): ) @mock.patch("cloudinit.config.cc_resizefs.LOG") - def test_handle_warns_on_undiscoverable_root_path_in_commandline( + def test_handle_warns_on_undiscoverable_root_path_in_command_line( self, m_log ): - """handle noops when the root path is not found on the commandline.""" + """handle noops when the root path is not found on the command line.""" cfg = {"resize_rootfs": True} exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists" @@ -421,8 +421,8 @@ def test_maybe_get_writable_device_path_non_block_on_container(self): self.logs.getvalue(), ) - def test_maybe_get_writable_device_path_returns_cmdline_root(self): - """When root device is UUID in kernel commandline, update devpath.""" + def test_maybe_get_writable_device_path_returns_command_line_root(self): + """When root device is UUID in kernel command_line, update devpath.""" # XXX Long-term we want to use FilesystemMocking test to avoid # touching os.stat. FakeStat = namedtuple( diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py index 955b092ba..d811d16a5 100644 --- a/tests/unittests/config/test_cc_rh_subscription.py +++ b/tests/unittests/config/test_cc_rh_subscription.py @@ -184,7 +184,7 @@ class TestBadInput(CiTestCase): "rh_subscription": { "activation-key": "abcdef1234", "fookey": "bar", - "org": "123", + "org": "ABC", } } @@ -330,6 +330,20 @@ class TestRhSubscriptionSchema: {"rh_subscription": {"disable-repo": "name"}}, "'name' is not of type 'array'", ), + ( + { + "rh_subscription": { + "activation-key": "foobar", + "org": "ABC", + } + }, + None, + ), + ( + {"rh_subscription": {"activation-key": "foobar", "org": 314}}, + "Deprecated in version 24.2. Use of type integer for this" + " value is deprecated. Use a string instead.", + ), ], ) @skipUnlessJsonSchema() diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py index c49bd45e9..76b9b796a 100644 --- a/tests/unittests/config/test_cc_seed_random.py +++ b/tests/unittests/config/test_cc_seed_random.py @@ -7,10 +7,12 @@ # Based on test_handler_set_hostname.py # # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import gzip import logging import tempfile from io import BytesIO +from unittest import mock import pytest @@ -21,28 +23,25 @@ get_schema, validate_cloudconfig_schema, ) -from tests.unittests.helpers import TestCase, skipUnlessJsonSchema +from tests.unittests.helpers import skipUnlessJsonSchema from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) -class TestRandomSeed(TestCase): - def setUp(self): - super(TestRandomSeed, self).setUp() +class TestRandomSeed: + def setup_method(self): self._seed_file = tempfile.mktemp() self.unapply = [] # by default 'which' has nothing in its path self.apply_patches([(subp, "which", self._which)]) - self.apply_patches([(subp, "subp", self._subp)]) self.subp_called = [] self.whichdata = {} - def tearDown(self): + def teardown_method(self): apply_patches([i for i in reversed(self.unapply)]) util.del_file(self._seed_file) - super().tearDown() def apply_patches(self, patches): ret = apply_patches(patches) @@ -74,7 +73,7 @@ def test_append_random(self): } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) contents = util.load_text_file(self._seed_file) - self.assertEqual("tiny-tim-was-here", contents) + assert "tiny-tim-was-here" == contents def test_append_random_unknown_encoding(self): data = self._compress(b"tiny-toe") @@ -85,7 +84,7 @@ def test_append_random_unknown_encoding(self): "encoding": "special_encoding", } } - self.assertRaises( + pytest.raises( IOError, cc_seed_random.handle, "test", @@ -105,7 +104,7 @@ def test_append_random_gzip(self): } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) contents = util.load_text_file(self._seed_file) - self.assertEqual("tiny-toe", contents) + assert "tiny-toe" == contents def test_append_random_gz(self): data = self._compress(b"big-toe") @@ -118,7 +117,7 @@ def test_append_random_gz(self): } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) contents = util.load_text_file(self._seed_file) - self.assertEqual("big-toe", contents) + assert "big-toe" == contents def test_append_random_base64(self): data = atomic_helper.b64e("bubbles") @@ -131,7 +130,7 @@ def test_append_random_base64(self): } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) contents = util.load_text_file(self._seed_file) - self.assertEqual("bubbles", contents) + assert "bubbles" == contents def test_append_random_b64(self): data = atomic_helper.b64e("kit-kat") @@ -144,7 +143,7 @@ def test_append_random_b64(self): } cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), []) contents = util.load_text_file(self._seed_file) - self.assertEqual("kit-kat", contents) + assert "kit-kat" == contents def test_append_random_metadata(self): cfg = { @@ -156,16 +155,23 @@ def test_append_random_metadata(self): c = get_cloud("ubuntu", metadata={"random_seed": "-so-was-josh"}) cc_seed_random.handle("test", cfg, c, []) contents = util.load_text_file(self._seed_file) - self.assertEqual("tiny-tim-was-here-so-was-josh", contents) + assert "tiny-tim-was-here-so-was-josh" == contents def test_seed_command_provided_and_available(self): c = get_cloud("ubuntu") self.whichdata = {"pollinate": "/usr/bin/pollinate"} cfg = {"random_seed": {"command": ["pollinate", "-q"]}} - cc_seed_random.handle("test", cfg, c, []) - - subp_args = [f["args"] for f in self.subp_called] - self.assertIn(["pollinate", "-q"], subp_args) + with mock.patch.object(cc_seed_random.subp, "subp") as subp: + cc_seed_random.handle("test", cfg, c, []) + + assert ( + mock.call( + ["pollinate", "-q"], + update_env={"RANDOM_SEED_FILE": "/dev/urandom"}, + capture=False, + ) + in subp.call_args_list + ) def test_seed_command_not_provided(self): c = get_cloud("ubuntu") @@ -173,7 +179,7 @@ def test_seed_command_not_provided(self): cc_seed_random.handle("test", {}, c, []) # subp should not have been called as which would say not available - self.assertFalse(self.subp_called) + assert not self.subp_called def test_unavailable_seed_command_and_required_raises_error(self): c = get_cloud("ubuntu") @@ -184,17 +190,18 @@ def test_unavailable_seed_command_and_required_raises_error(self): "command_required": True, } } - self.assertRaises( - ValueError, cc_seed_random.handle, "test", cfg, c, [] - ) + pytest.raises(ValueError, cc_seed_random.handle, "test", cfg, c, []) def test_seed_command_and_required(self): c = get_cloud("ubuntu") self.whichdata = {"foo": "foo"} cfg = {"random_seed": {"command_required": True, "command": ["foo"]}} - cc_seed_random.handle("test", cfg, c, []) - - self.assertIn(["foo"], [f["args"] for f in self.subp_called]) + with mock.patch.object(cc_seed_random.subp, "subp") as m_subp: + cc_seed_random.handle("test", cfg, c, []) + assert ( + mock.call(["foo"], update_env=mock.ANY, capture=mock.ANY) + == m_subp.call_args + ) def test_file_in_environment_for_command(self): c = get_cloud("ubuntu") @@ -206,12 +213,14 @@ def test_file_in_environment_for_command(self): "file": self._seed_file, } } - cc_seed_random.handle("test", cfg, c, []) + with mock.patch.object(cc_seed_random.subp, "subp") as m_subp: + cc_seed_random.handle("test", cfg, c, []) - # this just instists that the first time subp was called, + # this just insists that the first time subp was called, # RANDOM_SEED_FILE was in the environment set up correctly - subp_env = [f["update_env"] for f in self.subp_called] - self.assertEqual(subp_env[0].get("RANDOM_SEED_FILE"), self._seed_file) + assert m_subp.call_args == mock.call( + ["foo"], update_env={"RANDOM_SEED_FILE": mock.ANY}, capture=False + ) def apply_patches(patches): diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py index 40df838a3..b0a435a2d 100644 --- a/tests/unittests/config/test_cc_set_passwords.py +++ b/tests/unittests/config/test_cc_set_passwords.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy import logging from unittest import mock @@ -453,7 +454,10 @@ def test_chpasswd_parity(self, list_def, users_def): class TestExpire: @pytest.mark.parametrize("cfg", expire_cases) def test_expire(self, cfg, mocker, caplog): - features.EXPIRE_APPLIES_TO_HASHED_USERS = True + # Stable release sets EXPIRE_APPLIES_TO_HASHED_USERS=False + # This test wants True + mocker.patch.object(features, "EXPIRE_APPLIES_TO_HASHED_USERS", True) + cfg = copy.deepcopy(cfg) cloud = get_cloud() mocker.patch(f"{MODPATH}subp.subp") mocker.patch.object(cloud.distro, "chpasswd") @@ -479,7 +483,9 @@ def test_expire(self, cfg, mocker, caplog): def test_expire_old_behavior(self, cfg, mocker, caplog): # Previously expire didn't apply to hashed passwords. # Ensure we can preserve that case on older releases - features.EXPIRE_APPLIES_TO_HASHED_USERS = False + mocker.patch.object(features, "EXPIRE_APPLIES_TO_HASHED_USERS", False) + + cfg = copy.deepcopy(cfg) cloud = get_cloud() mocker.patch(f"{MODPATH}subp.subp") mocker.patch.object(cloud.distro, "chpasswd") diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py index 544e0b67b..49327bb67 100644 --- a/tests/unittests/config/test_cc_ssh.py +++ b/tests/unittests/config/test_cc_ssh.py @@ -38,8 +38,10 @@ def publish_hostkey_test_setup(tmpdir): with open(filepath, "w") as f: f.write(" ".join(test_hostkeys[key_type])) - cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, "ssh_host_%s_key") - yield test_hostkeys, test_hostkey_files + with mock.patch.object( + cc_ssh, "KEY_FILE_TPL", os.path.join(hostkey_tmpdir, "ssh_host_%s_key") + ): + yield test_hostkeys, test_hostkey_files def _replace_options(user: Optional[str] = None) -> str: @@ -255,6 +257,7 @@ def test_handle_default_root( @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") @mock.patch(MODPATH + "util.fips_enabled", return_value=False) + @mock.patch.object(cc_ssh, "PUBLISH_HOST_KEYS", True) def test_handle_publish_hostkeys( self, m_fips, @@ -268,7 +271,6 @@ def test_handle_publish_hostkeys( ): """Test handle with various configs for ssh_publish_hostkeys.""" test_hostkeys, test_hostkey_files = publish_hostkey_test_setup - cc_ssh.PUBLISH_HOST_KEYS = True keys = ["key1"] user = "clouduser" # Return no matching keys for first glob, test keys for second. @@ -282,7 +284,6 @@ def test_handle_publish_hostkeys( m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys}) - cloud.datasource.publish_host_keys = mock.Mock() expected_calls = [] if expected_key_types is not None: @@ -294,14 +295,19 @@ def test_handle_publish_hostkeys( ] ) ] - cc_ssh.handle("name", cfg, cloud, None) - assert ( - expected_calls == cloud.datasource.publish_host_keys.call_args_list - ) + + with mock.patch.object( + cloud.datasource, "publish_host_keys", mock.Mock() + ): + cc_ssh.handle("name", cfg, cloud, None) + assert ( + expected_calls + == cloud.datasource.publish_host_keys.call_args_list + ) @pytest.mark.parametrize( "ssh_keys_group_exists,sshd_version,expected_private_permissions", - [(False, 0, 0), (True, 8, 0o640), (True, 10, 0o600)], + [(False, 9, 0o600), (True, 8, 0o640), (True, 10, 0o600)], ) @mock.patch(MODPATH + "subp.subp", return_value=("", "")) @mock.patch(MODPATH + "util.get_group_id", return_value=10) @@ -330,18 +336,17 @@ def test_ssh_hostkey_permissions( m_gid.return_value = 10 if ssh_keys_group_exists else -1 m_sshd_version.return_value = util.Version(sshd_version, 0) key_path = cc_ssh.KEY_FILE_TPL % "rsa" - cloud = get_cloud(distro="ubuntu") + cloud = get_cloud(distro="centos") cc_ssh.handle("name", {"ssh_genkeytypes": ["rsa"]}, cloud, []) if ssh_keys_group_exists: m_chown.assert_called_once_with(key_path, -1, 10) - assert m_chmod.call_args_list == [ - mock.call(key_path, expected_private_permissions), - mock.call(f"{key_path}.pub", 0o644), - ] else: - m_sshd_version.assert_not_called() m_chown.assert_not_called() - m_chmod.assert_not_called() + + assert m_chmod.call_args_list == [ + mock.call(key_path, expected_private_permissions), + mock.call(f"{key_path}.pub", 0o644), + ] @pytest.mark.parametrize("with_sshd_dconf", [False, True]) @mock.patch(MODPATH + "util.ensure_dir") diff --git a/tests/unittests/config/test_cc_ubuntu_autoinstall.py b/tests/unittests/config/test_cc_ubuntu_autoinstall.py index b130ecb62..f0533c560 100644 --- a/tests/unittests/config/test_cc_ubuntu_autoinstall.py +++ b/tests/unittests/config/test_cc_ubuntu_autoinstall.py @@ -64,7 +64,7 @@ def test_runtime_validation_errors(self, src_cfg, error_msg): cc_ubuntu_autoinstall.validate_config_schema(src_cfg) -@mock.patch(MODPATH + "subp") +@mock.patch(MODPATH + "subp.subp") class TestHandleAutoinstall: """Test cc_ubuntu_autoinstall handling of config.""" diff --git a/tests/unittests/config/test_cc_ubuntu_pro.py b/tests/unittests/config/test_cc_ubuntu_pro.py index f68a688f9..df47e7ae4 100644 --- a/tests/unittests/config/test_cc_ubuntu_pro.py +++ b/tests/unittests/config/test_cc_ubuntu_pro.py @@ -5,7 +5,6 @@ import sys from collections import namedtuple -import jsonschema import pytest from cloudinit import subp @@ -28,6 +27,11 @@ from tests.unittests.helpers import does_not_raise, mock, skipUnlessJsonSchema from tests.unittests.util import get_cloud +try: + import jsonschema +except ImportError: + jsonschema = None # type: ignore + # Module path used in mocks MPATH = "cloudinit.config.cc_ubuntu_pro" diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py index 3300b77b2..9fac84d77 100644 --- a/tests/unittests/config/test_cc_users_groups.py +++ b/tests/unittests/config/test_cc_users_groups.py @@ -97,6 +97,7 @@ def test_handle_users_in_cfg_calls_create_users_on_bsd( "lock_passwd": True, "groups": ["wheel"], "shell": "/bin/tcsh", + "homedir": "/home/freebsd", } } metadata = {} @@ -116,6 +117,7 @@ def test_handle_users_in_cfg_calls_create_users_on_bsd( groups="wheel", lock_passwd=True, shell="/bin/tcsh", + homedir="/home/freebsd", ), mock.call("me2", uid=1234, default=False), ], @@ -371,9 +373,20 @@ class TestUsersGroupsSchema: SchemaValidationError, match=( "Cloud config schema deprecations: " - "users.0.lock-passwd: Default: ``true`` " - "Deprecated in version 22.3. Use " - "``lock_passwd`` instead." + "users.0.lock-passwd: Deprecated in version 22.3." + " Use ``lock_passwd`` instead." + ), + ), + False, + ), + ( + {"users": [{"name": "bbsw", "no-create-home": True}]}, + pytest.raises( + SchemaValidationError, + match=( + "Cloud config schema deprecations: " + "users.0.no-create-home: Deprecated in version 24.2." + " Use ``no_create_home`` instead." ), ), False, @@ -394,13 +407,10 @@ class TestUsersGroupsSchema: SchemaValidationError, match=( "Cloud config schema deprecations: " - "users.0.groups.adm: When providing an object " - "for users.groups the ```` keys " - "are the groups to add this user to Deprecated" - " in version 23.1., users.0.groups.sudo: When " - "providing an object for users.groups the " - "```` keys are the groups to add " - "this user to Deprecated in version 23.1." + "users.0.groups.adm: Deprecated in version 23.1. " + "The use of ``object`` type is deprecated. Use " + "``string`` or ``array`` of ``string`` instead., " + "users.0.groups.sudo: Deprecated in version 23.1." ), ), False, @@ -456,10 +466,7 @@ class TestUsersGroupsSchema: SchemaValidationError, match=( "Cloud config schema deprecations: " - "user.groups.sbuild: When providing an object " - "for users.groups the ```` keys " - "are the groups to add this user to Deprecated" - " in version 23.1." + "user.groups.sbuild: Deprecated in version 23.1." ), ), False, @@ -503,6 +510,36 @@ class TestUsersGroupsSchema: ), True, ), + ( + { + "users": [ + { + "name": "lima", + "uid": "1000", + "homedir": "/home/lima.linux", + "shell": "/bin/bash", + "sudo": "ALL=(ALL) NOPASSWD:ALL", + "lock_passwd": True, + "ssh-authorized-keys": ["ssh-ed25519 ..."], + } + ] + }, + pytest.raises( + SchemaValidationError, + match=( + "Cloud config schema deprecations: " + "users.0.ssh-authorized-keys: " + " Deprecated in version 18.3." + " Use ``ssh_authorized_keys`` instead." + ", " + "users.0.uid: " + " Changed in version 22.3." + " The use of ``string`` type is deprecated." + " Use an ``integer`` instead." + ), + ), + False, + ), ], ) @skipUnlessJsonSchema() diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py index 1707860a1..e6a9109ee 100644 --- a/tests/unittests/config/test_cc_yum_add_repo.py +++ b/tests/unittests/config/test_cc_yum_add_repo.py @@ -31,6 +31,7 @@ def test_bad_config(self): "yum_repos": { "epel-testing": { "name": "Extra Packages for Enterprise Linux 5 - Testing", + # At least one of baseurl or metalink must be present. # Missing this should cause the repo not to be written # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch', "enabled": False, @@ -46,6 +47,43 @@ def test_bad_config(self): IOError, util.load_text_file, "/etc/yum.repos.d/epel_testing.repo" ) + def test_metalink_config(self): + cfg = { + "yum_repos": { + "epel-testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", + "metalink": "http://blah.org/pub/epel/testing/5/$basearch", + "enabled": False, + "gpgcheck": True, + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "failovermethod": "priority", + }, + }, + } + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + cc_yum_add_repo.handle("yum_add_repo", cfg, None, []) + contents = util.load_text_file("/etc/yum.repos.d/epel-testing.repo") + parser = configparser.ConfigParser() + parser.read_string(contents) + expected = { + "epel-testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", + "failovermethod": "priority", + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "enabled": "0", + "metalink": "http://blah.org/pub/epel/testing/5/$basearch", + "gpgcheck": "1", + } + } + for section in expected: + self.assertTrue( + parser.has_section(section), + "Contains section {0}".format(section), + ) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) + def test_write_config(self): cfg = { "yum_repos": { diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index ce55534be..184857583 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -18,7 +18,9 @@ from typing import List, Optional, Sequence, Set import pytest +import yaml +from cloudinit import features from cloudinit.config.schema import ( VERSIONED_USERDATA_SCHEMA_FILE, MetaSchema, @@ -28,6 +30,7 @@ annotated_cloudconfig_file, get_jsonschema_validator, get_meta_doc, + get_module_docs, get_schema, get_schema_dir, handle_schema_args, @@ -39,7 +42,7 @@ validate_cloudconfig_schema, ) from cloudinit.distros import OSFAMILIES -from cloudinit.safeyaml import load, load_with_marks +from cloudinit.safeyaml import load_with_marks from cloudinit.settings import FREQUENCIES from cloudinit.sources import DataSourceNotFoundException from cloudinit.templater import JinjaSyntaxParsingException @@ -296,14 +299,78 @@ def test_get_schema_coalesces_known_schema(self): assert [] == sorted(legacy_schema_keys) +MODULE_DATA_YAML_TMPL = """\ +{mod_id}: + name: {name} + title: My Module + description: + My amazing module description + examples: + - comment: "comment 1" + file: {examplefile} +""" + + +class TestGetModuleDocs: + def test_get_module_docs_loads_all_data_yaml_files_from_modules_dirs( + self, mocker, paths + ): + """get_module_docs aggregates all data.yaml module docs.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) + modules_dir = Path(paths.docs_dir, "module-docs") + + assert {} == get_module_docs() + + mod1_dir = Path(modules_dir, "cc_mod1") + mod1_dir.mkdir(parents=True) + mod1_data = Path(mod1_dir, "data.yaml") + # Skip any subdir that does not contain a data.yaml + assert {} == get_module_docs() + # Create data file to any subdir that does not contain a data.yaml + mod1_content = MODULE_DATA_YAML_TMPL.format( + mod_id="cc_mod1", + name="mod1", + examplefile=mod1_data, + ) + mod1_data.write_text(mod1_content) + expected = yaml.safe_load(mod1_content) + assert expected == get_module_docs() + mod2_dir = Path(modules_dir, "cc_mod2") + mod2_dir.mkdir(parents=True) + mod2_data = Path(mod2_dir, "data.yaml") + mod2_content = MODULE_DATA_YAML_TMPL.format( + mod_id="cc_mod2", + name="mod2", + examplefile=mod2_data, + ) + mod2_data.write_text(mod2_content) + expected.update(yaml.safe_load(mod2_content)) + assert expected == get_module_docs() + + def test_validate_data_file_schema(self, mocker, paths): + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) + root_dir = Path(__file__).parent.parent.parent.parent + for mod_data_f in root_dir.glob("doc/module-docs/*/data.yaml"): + docs_metadata = yaml.safe_load(mod_data_f.read_text()) + assert docs_metadata.get(mod_data_f.parent.stem), ( + f"Top-level key in {mod_data_f} doesn't match" + f" {mod_data_f.parent.stem}" + ) + assert ["description", "examples", "name", "title"] == sorted( + docs_metadata[mod_data_f.parent.stem].keys() + ) + + class TestLoadDoc: docs = get_module_variable("__doc__") + # TODO(remove when last __doc__ = load_meta_doc is removed) @pytest.mark.parametrize( "module_name", - ("cc_apt_pipelining",), # new style composite schema file + ("cc_zypper_add_repo",), ) - def test_report_docs_consolidated_schema(self, module_name): + def test_report_docs_consolidated_schema(self, module_name, mocker, paths): + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) doc = load_doc([module_name]) assert doc, "Unexpected empty docs for {}".format(module_name) assert self.docs[module_name] == doc @@ -354,11 +421,11 @@ class TestNetplanValidateNetworkSchema: ({"version": 1}, ""), ( {"version": 2}, - "Skipping netplan schema validation. No netplan available", + "Skipping netplan schema validation. No netplan API available", ), ( {"network": {"version": 2}}, - "Skipping netplan schema validation. No netplan available", + "Skipping netplan schema validation. No netplan API available", ), ), ) @@ -382,8 +449,9 @@ def test_network_config_schema_validation_false_when_skipped( column=12, message="incorrect YAML value: yes for dhcp value", ), - r"Invalid network-config provided:.*format-l1.c12: Invalid" - " netplan schema. incorrect YAML value: yes for dhcp value", + r"network-config failed schema validation!.*format-l1.c12: " + "Invalid netplan schema. incorrect YAML value: yes for dhcp " + "value", ), ), ) @@ -447,8 +515,8 @@ def test_validateconfig_schema_non_strict_emits_warnings(self, caplog): assert "cloudinit.config.schema" == module assert logging.WARNING == log_level assert ( - "Invalid cloud-config provided:\np1: -1 is not of type 'string'" - == log_msg + "cloud-config failed schema validation!\n" + "p1: -1 is not of type 'string'" == log_msg ) @skipUnlessJsonSchema() @@ -468,8 +536,9 @@ def test_validateconfig_schema_sensitive(self, caplog): assert "cloudinit.config.schema" == module assert logging.WARNING == log_level assert ( - "Invalid cloud-config provided: Please run 'sudo cloud-init " - "schema --system' to see the schema errors." == log_msg + "cloud-config failed schema validation! You may run " + "'sudo cloud-init schema --system' to check the details." + == log_msg ) @skipUnlessJsonSchema() @@ -561,7 +630,7 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"a-b": "asdf"}, - "Deprecated cloud-config provided:\na-b: " + "Deprecated cloud-config provided: a-b: " "Deprecated in version 22.1.", ), ( @@ -582,7 +651,7 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"x": "+5"}, - "Deprecated cloud-config provided:\nx: " + "Deprecated cloud-config provided: x: " "Deprecated in version 22.1.", ), ( @@ -603,7 +672,7 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"x": "5"}, - "Deprecated cloud-config provided:\nx: " + "Deprecated cloud-config provided: x: " "Deprecated in version 22.1. ", ), ( @@ -624,7 +693,7 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"x": "5"}, - "Deprecated cloud-config provided:\nx: " + "Deprecated cloud-config provided: x: " "Deprecated in version 22.1.", ), ( @@ -640,7 +709,7 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"x": "+5"}, - "Deprecated cloud-config provided:\nx: " + "Deprecated cloud-config provided: x: " "Deprecated in version 22.1.", ), ( @@ -677,7 +746,7 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"x": "+5"}, - "Deprecated cloud-config provided:\nx: " + "Deprecated cloud-config provided: x: " "Deprecated in version 32.3.", ), ( @@ -702,7 +771,7 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"x": "+5"}, - "Deprecated cloud-config provided:\nx: Deprecated in " + "Deprecated cloud-config provided: x: Deprecated in " "version 27.2.", ), ( @@ -718,7 +787,7 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"a-b": "asdf"}, - "Deprecated cloud-config provided:\na-b: " + "Deprecated cloud-config provided: a-b: " "Deprecated in version 27.2.", ), pytest.param( @@ -736,8 +805,8 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( }, }, {"a-b": "asdf"}, - "Deprecated cloud-config provided:\na-b: Deprecated " - "in version 27.2.\na-b: Changed in version 22.2. " + "Deprecated cloud-config provided: a-b: Deprecated " + "in version 27.2., a-b: Changed in version 22.2. " "Drop ballast.", id="deprecated_pattern_property_without_description", ), @@ -746,12 +815,13 @@ def test_validateconfig_strict_metaschema_do_not_raise_exception( def test_validateconfig_logs_deprecations( self, schema, config, expected_msg, log_deprecations, caplog ): - validate_cloudconfig_schema( - config, - schema=schema, - strict_metaschema=True, - log_deprecations=log_deprecations, - ) + with mock.patch.object(features, "DEPRECATION_INFO_BOUNDARY", "devel"): + validate_cloudconfig_schema( + config, + schema=schema, + strict_metaschema=True, + log_deprecations=log_deprecations, + ) if expected_msg is None: return log_record = (M_PATH[:-1], DEPRECATED_LOG_LEVEL, expected_msg) @@ -777,7 +847,7 @@ def test_validateconfig_schema_of_example(self, schema_id, example): according to the unified schema of all config modules """ schema = get_schema() - config_load = load(example) + config_load = yaml.safe_load(example) # cloud-init-schema-v1 is permissive of additionalProperties at the # top-level. # To validate specific schemas against known documented examples @@ -922,7 +992,6 @@ def test_validateconfig_file_no_cloud_cfg( def test_validateconfig_file_raises_jinja_syntax_error( self, annotate, tmpdir, mocker, capsys ): - """ """ # will throw error because of space between last two }'s invalid_jinja_template = "## template: jinja\na:b\nc:{{ d } }" mocker.patch("os.path.exists", return_value=True) @@ -973,8 +1042,8 @@ class TestSchemaDocMarkdown: "frequency": "frequency", "distros": ["debian", "rhel"], "examples": [ - 'prop1:\n [don\'t, expand, "this"]', - "prop2: true", + '\nExample 1:\nprop1:\n [don\'t, expand, "this"]', + "\nExample 2:\nprop2: true", ], } @@ -986,8 +1055,11 @@ class TestSchemaDocMarkdown: {"activate_by_schema_keys": []}, ], ) - def test_get_meta_doc_returns_restructured_text(self, meta_update): + def test_get_meta_doc_returns_restructured_text( + self, meta_update, paths, mocker + ): """get_meta_doc returns restructured text for a cloudinit schema.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) full_schema = deepcopy(self.required_schema) full_schema.update( { @@ -1020,17 +1092,20 @@ def test_get_meta_doc_returns_restructured_text(self, meta_update): " * **prop1:** (array of integer) prop-description", " .. tab-item:: Examples", " ::", - " # --- Example1 ---", + " Example 1:", " prop1:", ' [don\'t, expand, "this"]', - " # --- Example2 ---", + " Example 2:", " prop2: true", ] for line in [ln for ln in doc.splitlines() if ln.strip()]: assert line in expected_lines - def test_get_meta_doc_full_with_activate_by_schema_keys(self): + def test_get_meta_doc_full_with_activate_by_schema_keys( + self, paths, mocker + ): + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) full_schema = deepcopy(self.required_schema) full_schema.update( { @@ -1068,24 +1143,28 @@ def test_get_meta_doc_full_with_activate_by_schema_keys(self): " * **prop2:** (boolean) prop2-description.", " .. tab-item:: Examples", " ::", - " # --- Example1 ---", + " Example 1:", " prop1:", + " Example 2:", ' [don\'t, expand, "this"]', - " # --- Example2 ---", " prop2: true", ] for line in [ln for ln in doc.splitlines() if ln.strip()]: assert line in expected_lines - def test_get_meta_doc_handles_multiple_types(self): + def test_get_meta_doc_handles_multiple_types(self, paths, mocker): """get_meta_doc delimits multiple property types with a '/'.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = {"properties": {"prop1": {"type": ["string", "integer"]}}} assert "**prop1:** (string/integer)" in get_meta_doc(self.meta, schema) @pytest.mark.parametrize("multi_key", ["oneOf", "anyOf"]) - def test_get_meta_doc_handles_multiple_types_recursive(self, multi_key): + def test_get_meta_doc_handles_multiple_types_recursive( + self, multi_key, mocker, paths + ): """get_meta_doc delimits multiple property types with a '/'.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = { "properties": { "prop1": { @@ -1100,8 +1179,9 @@ def test_get_meta_doc_handles_multiple_types_recursive(self, multi_key): self.meta, schema ) - def test_references_are_flattened_in_schema_docs(self): + def test_references_are_flattened_in_schema_docs(self, paths, mocker): """get_meta_doc flattens and renders full schema definitions.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = { "$defs": { "flattenit": { @@ -1142,8 +1222,11 @@ def test_references_are_flattened_in_schema_docs(self): ), ), ) - def test_get_meta_doc_handles_enum_types(self, sub_schema, expected): + def test_get_meta_doc_handles_enum_types( + self, sub_schema, expected, mocker, paths + ): """get_meta_doc converts enum types to yaml and delimits with '/'.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = {"properties": {"prop1": sub_schema}} assert expected in get_meta_doc(self.meta, schema) @@ -1186,19 +1269,21 @@ def test_get_meta_doc_handles_enum_types(self, sub_schema, expected): ), ) def test_get_meta_doc_hidden_hides_specific_properties_from_docs( - self, schema, expected + self, schema, expected, paths, mocker ): """Docs are hidden for any property in the hidden list. Useful for hiding deprecated key schema. """ + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) assert "".join(expected) in get_meta_doc(self.meta, schema) @pytest.mark.parametrize("multi_key", ["oneOf", "anyOf"]) def test_get_meta_doc_handles_nested_multi_schema_property_types( - self, multi_key + self, multi_key, paths, mocker ): """get_meta_doc describes array items oneOf declarations in type.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = { "properties": { "prop1": { @@ -1214,8 +1299,11 @@ def test_get_meta_doc_handles_nested_multi_schema_property_types( ) @pytest.mark.parametrize("multi_key", ["oneOf", "anyOf"]) - def test_get_meta_doc_handles_types_as_list(self, multi_key): + def test_get_meta_doc_handles_types_as_list( + self, multi_key, paths, mocker + ): """get_meta_doc renders types which have a list value.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = { "properties": { "prop1": { @@ -1231,8 +1319,9 @@ def test_get_meta_doc_handles_types_as_list(self, multi_key): in get_meta_doc(self.meta, schema) ) - def test_get_meta_doc_handles_flattening_defs(self): + def test_get_meta_doc_handles_flattening_defs(self, paths, mocker): """get_meta_doc renders $defs.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = { "$defs": { "prop1object": { @@ -1247,14 +1336,15 @@ def test_get_meta_doc_handles_flattening_defs(self): in get_meta_doc(self.meta, schema) ) - def test_get_meta_doc_handles_string_examples(self): + def test_get_meta_doc_handles_string_examples(self, paths, mocker): """get_meta_doc properly indented examples as a list of strings.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) full_schema = deepcopy(self.required_schema) full_schema.update( { "examples": [ - 'ex1:\n [don\'t, expand, "this"]', - "ex2: true", + 'Example 1:\nex1:\n [don\'t, expand, "this"]', + "Example 2:\nex2: true", ], "properties": { "prop1": { @@ -1270,16 +1360,17 @@ def test_get_meta_doc_handles_string_examples(self): " * **prop1:** (array of integer) prop-description.\n\n", " .. tab-item:: Examples\n\n", " ::\n\n\n", - " # --- Example1 ---\n\n", + " Example 1:\n", " prop1:\n", ' [don\'t, expand, "this"]\n', - " # --- Example2 ---\n\n", + " Example 2:\n", " prop2: true", ] assert "".join(expected) in get_meta_doc(self.meta, full_schema) - def test_get_meta_doc_properly_parse_description(self): + def test_get_meta_doc_properly_parse_description(self, paths, mocker): """get_meta_doc description properly formatted""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = { "properties": { "p1": { @@ -1348,7 +1439,10 @@ def test_get_meta_doc_raises_key_errors(self, key): ), ], ) - def test_get_meta_doc_additional_keys(self, key, expectation): + def test_get_meta_doc_additional_keys( + self, key, expectation, paths, mocker + ): + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = { "properties": { "prop1": { @@ -1364,8 +1458,9 @@ def test_get_meta_doc_additional_keys(self, key, expectation): with expectation: get_meta_doc(invalid_meta, schema) - def test_label_overrides_property_name(self): + def test_label_overrides_property_name(self, paths, mocker): """get_meta_doc overrides property name with label.""" + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) schema = { "properties": { "old_prop1": { @@ -1651,7 +1746,10 @@ def test_label_overrides_property_name(self): ), ], ) - def test_get_meta_doc_render_deprecated_info(self, schema, expected_lines): + def test_get_meta_doc_render_deprecated_info( + self, schema, expected_lines, paths, mocker + ): + mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) doc = get_meta_doc(self.meta, schema) for line in expected_lines: assert line in doc @@ -1669,29 +1767,6 @@ def test_annotated_cloudconfig_file_no_schema_errors(self): schema_errors=[], ) - def test_annotated_cloudconfig_file_with_non_dict_cloud_config(self): - """Error when empty non-dict cloud-config is provided. - - OurJSON validation when user-data is None type generates a bunch - schema validation errors of the format: - ('', "None is not of type 'object'"). Ignore those symptoms and - report the general problem instead. - """ - content = "\n\n\n" - expected = "\n".join( - [ - content, - "# Errors: -------------", - "# E1: Cloud-config is not a YAML dict.\n\n", - ] - ) - assert expected == annotated_cloudconfig_file( - None, - content, - schemamarks={}, - schema_errors=[SchemaProblem("", "None is not of type 'object'")], - ) - def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self): """With schema_errors, error lines are annotated and a footer added.""" content = dedent( @@ -1816,9 +1891,10 @@ def test_main_exclusive_args(self, _read_cfg_paths, params, capsys): ) @mock.patch(M_PATH + "os.getuid", return_value=100) def test_main_ignores_schema_type( - self, _read_cfg_paths, _os_getuid, params, expectation, capsys + self, _os_getuid, read_cfg_paths, params, expectation, paths, capsys ): """Main ignores --schema-type param when --system or --docs present.""" + read_cfg_paths.return_value = paths params = list(itertools.chain(*[a.split() for a in params])) with mock.patch( "sys.argv", ["mycmd", "--schema-type", "network-config"] + params @@ -1869,8 +1945,13 @@ def test_main_invalid_flag_combo(self, _read_cfg_paths, capsys): "Cannot use --annotate with --docs\n" == err ) - def test_main_prints_docs(self, _read_cfg_paths, capsys): + def test_main_prints_docs(self, read_cfg_paths, paths, capsys): """When --docs parameter is provided, main generates documentation.""" + paths.docs_dir = Path( + Path(__file__).parent.parent.parent.parent, "doc/" + ) + read_cfg_paths.return_value = paths + myargs = ["mycmd", "--docs", "all"] with mock.patch("sys.argv", myargs): assert 0 == main(), "Expected 0 exit code" @@ -1889,8 +1970,7 @@ def test_main_prints_docs(self, _read_cfg_paths, capsys): b"network: {'version': 2, 'ethernets':" b" {'eth0': {'dhcp': true}}}" ), - "Skipping network-config schema validation. No network schema" - " for version: 2", + "Valid schema", ), ( "network-config", @@ -1902,8 +1982,10 @@ def test_main_prints_docs(self, _read_cfg_paths, capsys): ), ), ) + @mock.patch("cloudinit.net.netplan.available", return_value=False) def test_main_validates_config_file( self, + _netplan_available, _read_cfg_paths, schema_type, content, @@ -1978,7 +2060,6 @@ def test_main_processed_data_preference_over_raw_data( paths, capsys, ): - """""" paths.get_ipath = paths.get_ipath_cur read_cfg_paths.return_value = paths path_content_by_key = { @@ -2042,11 +2123,11 @@ def test_main_processed_data_preference_over_raw_data( id="netv1_schema_validated", ), pytest.param( - "network:\n version: 2\n ethernets:\n eth0:\n dhcp4:true\n", - "Skipping network-config schema validation." - " No network schema for version: 2", + "network:\n version: 2\n ethernets:\n eth0:\n" + " dhcp4: true\n", + " Valid schema network-config", does_not_raise(), - id="netv2_validation_is_skipped", + id="netv2_schema_validated_non_netplan", ), pytest.param( "network: {}\n", @@ -2073,10 +2154,12 @@ def test_main_processed_data_preference_over_raw_data( ) @mock.patch(M_PATH + "read_cfg_paths") @mock.patch(M_PATH + "os.getuid", return_value=0) + @mock.patch("cloudinit.net.netplan.available", return_value=False) def test_main_validates_system_userdata_vendordata_and_network_config( self, - _read_cfg_paths, + _netplan_available, _getuid, + _read_cfg_paths, read_cfg_paths, net_config, net_output, @@ -2162,7 +2245,8 @@ def _get_meta_doc_examples(file_glob="cloud-config*.txt"): class TestSchemaDocExamples: schema = get_schema() - net_schema = get_schema(schema_type=SchemaType.NETWORK_CONFIG) + net_schema_v1 = get_schema(schema_type=SchemaType.NETWORK_CONFIG_V1) + net_schema_v2 = get_schema(schema_type=SchemaType.NETWORK_CONFIG_V2) @pytest.mark.parametrize("example_path", _get_meta_doc_examples()) @skipUnlessJsonSchema() @@ -2176,8 +2260,22 @@ def test_cloud_config_schema_doc_examples(self, example_path): @skipUnlessJsonSchema() def test_network_config_schema_v1_doc_examples(self, example_path): validate_cloudconfig_schema( - config=load(open(example_path)), - schema=self.net_schema, + config=yaml.safe_load(open(example_path)), + schema=self.net_schema_v1, + schema_type=SchemaType.NETWORK_CONFIG_V1, + strict=True, + ) + + @pytest.mark.parametrize( + "example_path", + _get_meta_doc_examples(file_glob="network-config-v2*yaml"), + ) + @skipUnlessJsonSchema() + def test_network_config_schema_v2_doc_examples(self, example_path): + validate_cloudconfig_schema( + config=yaml.safe_load(open(example_path)), + schema=self.net_schema_v2, + schema_type=SchemaType.NETWORK_CONFIG_V2, strict=True, ) @@ -2238,16 +2336,108 @@ class TestNetworkSchema: net_schema = get_schema(schema_type=SchemaType.NETWORK_CONFIG) @pytest.mark.parametrize( - "src_config, expectation, log", + "src_config, schema_type_version, expectation, log", ( pytest.param( {"network": {"config": [], "version": 2}}, + SchemaType.NETWORK_CONFIG_V2, + pytest.raises( + SchemaValidationError, + match=re.escape( + "Additional properties are not allowed ('config' was " + "unexpected)" + ), + ), + "", + id="net_v2_invalid_config", + ), + pytest.param( + { + "network": { + "version": 2, + "ethernets": {"eno1": {"dhcp4": True}}, + } + }, + SchemaType.NETWORK_CONFIG_V2, does_not_raise(), - "Skipping netplan schema validation. No netplan available", - id="net_v2_skipped", + "", + id="net_v2_simple_example", + ), + pytest.param( + { + "version": 2, + "ethernets": {"eno1": {"dhcp4": True}}, + }, + SchemaType.NETWORK_CONFIG_V2, + does_not_raise(), + "", + id="net_v2_no_top_level", + ), + pytest.param( + { + "network": { + "version": 2, + "ethernets": { + "id0": { + "match": { + "macaddress": "00:11:22:33:44:55", + }, + "wakeonlan": True, + "dhcp4": True, + "addresses": [ + "192.168.14.2/24", + "2001:1::1/64", + ], + "gateway4": "192.168.14.1", + "gateway6": "2001:1::2", + "nameservers": { + "search": ["foo.local", "bar.local"], + "addresses": ["8.8.8.8"], + }, + "routes": [ + { + "to": "192.0.2.0/24", + "via": "11.0.0.1", + "metric": 3, + }, + ], + }, + "lom": { + "match": {"driver": "ixgbe"}, + "set-name": "lom1", + "dhcp6": True, + }, + "switchports": { + "match": {"name": "enp2*"}, + "mtu": 1280, + }, + }, + "bonds": { + "bond0": {"interfaces": ["id0", "lom"]}, + }, + "bridges": { + "br0": { + "interfaces": ["wlp1s0", "switchports"], + "dhcp4": True, + }, + }, + "vlans": { + "en-intra": { + "id": 1, + "link": "id0", + "dhcp4": "yes", + }, + }, + } + }, + SchemaType.NETWORK_CONFIG_V2, + does_not_raise(), + "", + id="net_v2_complex_example", ), pytest.param( {"network": {"version": 1}}, + SchemaType.NETWORK_CONFIG_V1, pytest.raises( SchemaValidationError, match=re.escape("'config' is a required property"), @@ -2257,6 +2447,7 @@ class TestNetworkSchema: ), pytest.param( {"network": {"version": 1, "config": []}}, + SchemaType.NETWORK_CONFIG_V1, does_not_raise(), "", id="config_key_required", @@ -2268,6 +2459,7 @@ class TestNetworkSchema: "config": [{"name": "me", "type": "typo"}], } }, + SchemaType.NETWORK_CONFIG_V1, pytest.raises( SchemaValidationError, match=( @@ -2280,6 +2472,7 @@ class TestNetworkSchema: ), pytest.param( {"network": {"version": 1, "config": [{"type": "physical"}]}}, + SchemaType.NETWORK_CONFIG_V1, pytest.raises( SchemaValidationError, match=r"network.config.0: 'name' is a required property.*", @@ -2294,6 +2487,7 @@ class TestNetworkSchema: "config": [{"type": "physical", "name": "a"}], } }, + SchemaType.NETWORK_CONFIG_V1, does_not_raise(), "", id="physical_with_name_succeeds", @@ -2307,6 +2501,7 @@ class TestNetworkSchema: ], } }, + SchemaType.NETWORK_CONFIG_V1, pytest.raises( SchemaValidationError, match=r"Additional properties are not allowed.*", @@ -2321,6 +2516,7 @@ class TestNetworkSchema: "config": [VALID_PHYSICAL_CONFIG], } }, + SchemaType.NETWORK_CONFIG_V1, does_not_raise(), "", id="physical_with_all_known_properties", @@ -2332,6 +2528,7 @@ class TestNetworkSchema: "config": [VALID_BOND_CONFIG], } }, + SchemaType.NETWORK_CONFIG_V1, does_not_raise(), "", id="bond_with_all_known_properties", @@ -2346,18 +2543,29 @@ class TestNetworkSchema: ], } }, + SchemaType.NETWORK_CONFIG_V1, does_not_raise(), "", id="GH-4710_mtu_none_and_str_address", ), ), ) - def test_network_schema(self, src_config, expectation, log, caplog): + @mock.patch("cloudinit.net.netplan.available", return_value=False) + def test_network_schema( + self, + _netplan_available, + src_config, + schema_type_version, + expectation, + log, + caplog, + ): + net_schema = get_schema(schema_type=schema_type_version) with expectation: validate_cloudconfig_schema( config=src_config, - schema=self.net_schema, - schema_type=SchemaType.NETWORK_CONFIG, + schema=net_schema, + schema_type=schema_type_version, strict=True, ) if log: @@ -2528,10 +2736,11 @@ def test_handle_schema_unable_to_read_cfg_paths( assert expected_log in caplog.text @pytest.mark.parametrize( - "annotate, expected_output", + "annotate, deprecation_info_boundary, expected_output", [ - ( + pytest.param( True, + "devel", dedent( """\ #cloud-config @@ -2542,27 +2751,51 @@ def test_handle_schema_unable_to_read_cfg_paths( apt_reboot_if_required: true # D3 # Deprecations: ------------- - # D1: Default: ``false``. Deprecated in version 22.2. Use ``package_update`` instead. - # D2: Default: ``false``. Deprecated in version 22.2. Use ``package_upgrade`` instead. - # D3: Default: ``false``. Deprecated in version 22.2. Use ``package_reboot_if_required`` instead. + # D1: Deprecated in version 22.2. Use ``package_update`` instead. + # D2: Deprecated in version 22.2. Use ``package_upgrade`` instead. + # D3: Deprecated in version 22.2. Use ``package_reboot_if_required`` instead. Valid schema {cfg_file} """ # noqa: E501 ), + id="test_annotated_deprecation_info_boundary_devel_shows", ), - ( + pytest.param( + True, + "22.1", + dedent( + """\ + #cloud-config + packages: + - htop + apt_update: true # D1 + apt_upgrade: true # D2 + apt_reboot_if_required: true # D3 + + # Deprecations: ------------- + # D1: Deprecated in version 22.2. Use ``package_update`` instead. + # D2: Deprecated in version 22.2. Use ``package_upgrade`` instead. + # D3: Deprecated in version 22.2. Use ``package_reboot_if_required`` instead. + + Valid schema {cfg_file} + """ # noqa: E501 + ), + id="test_annotated_deprecation_info_boundary_below_unredacted", + ), + pytest.param( False, + "18.2", dedent( """\ Cloud config schema deprecations: \ -apt_reboot_if_required: Default: ``false``. Deprecated in version 22.2.\ - Use ``package_reboot_if_required`` instead., apt_update: Default: \ -``false``. Deprecated in version 22.2. Use ``package_update`` instead.,\ - apt_upgrade: Default: ``false``. Deprecated in version 22.2. Use \ -``package_upgrade`` instead.\ +apt_reboot_if_required: Deprecated in version 22.2. Use\ + ``package_reboot_if_required`` instead., apt_update: Deprecated in version\ + 22.2. Use ``package_update`` instead., apt_upgrade: Deprecated in version\ + 22.2. Use ``package_upgrade`` instead.\ Valid schema {cfg_file} """ # noqa: E501 ), + id="test_deprecation_info_boundary_does_unannotated_unredacted", ), ], ) @@ -2571,11 +2804,13 @@ def test_handle_schema_args_annotate_deprecated_config( self, read_cfg_paths, annotate, + deprecation_info_boundary, expected_output, paths, caplog, capsys, tmpdir, + mocker, ): paths.get_ipath = paths.get_ipath_cur read_cfg_paths.return_value = paths @@ -2593,6 +2828,9 @@ def test_handle_schema_args_annotate_deprecated_config( """ ) ) + mocker.patch.object( + features, "DEPRECATION_INFO_BOUNDARY", deprecation_info_boundary + ) args = self.Args( config_file=str(user_data_fn), schema_type="cloud-config", diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index 7748a3883..e0baa63b9 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -1,14 +1,34 @@ import builtins import glob import os +import pathlib +import shutil from pathlib import Path from unittest import mock import pytest from cloudinit import atomic_helper, log, util +from cloudinit.cmd.devel import logs +from cloudinit.gpg import GPG from tests.hypothesis import HAS_HYPOTHESIS -from tests.unittests.helpers import retarget_many_wrapper +from tests.unittests.helpers import example_netdev, retarget_many_wrapper + + +@pytest.fixture +def m_gpg(): + MockGPG = mock.Mock(spec=GPG) + MockGPG.configure_mock(**{"getkeybyid.return_value": "fakekey"}) + gpg = MockGPG() + gpg.list_keys = mock.Mock(return_value="") + gpg.getkeybyid = mock.Mock(return_value="") + + # to make tests for cc_apt_configure behave, we need the mocked GPG + # to actually behave like a context manager + gpg.__enter__ = GPG.__enter__ + gpg.__exit__ = GPG.__exit__ + yield gpg + FS_FUNCS = { os.path: [ @@ -22,6 +42,7 @@ os: [ ("listdir", 1), ("mkdir", 1), + ("rmdir", 1), ("lstat", 1), ("symlink", 2), ("stat", 1), @@ -49,32 +70,32 @@ ("write_file", 1), ("write_json", 1), ], + shutil: [ + ("rmtree", 1), + ], } @pytest.fixture def fake_filesystem(mocker, tmpdir): """Mocks fs functions to operate under `tmpdir`""" + # This allows fake_filesystem to be used with production code that + # creates temporary directories. Functions like TemporaryDirectory() + # attempt to create a directory under "/tmp" assuming that it already + # exists, but then it fails because of the retargeting that happens here. + tmpdir.mkdir("tmp") + for (mod, funcs) in FS_FUNCS.items(): for f, nargs in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(str(tmpdir), nargs, func) mocker.patch.object(mod, f, trap_func) + yield str(tmpdir) @pytest.fixture(scope="session", autouse=True) -def disable_sysfs_net(request, tmpdir_factory): - """Avoid tests which read the undertying host's /syc/class/net. - - To allow unobscured reads of /sys/class/net on the host we can - parametrize the fixture with: - - @pytest.mark.parametrize("disable_sysfs_net", [False], indirect=True) - """ - if hasattr(request, "param") and getattr(request, "param") is False: - # Test disabled this fixture, perform no mocks. - yield - return +def disable_sysfs_net(tmpdir_factory): + """Avoid tests which read the underlying host's /syc/class/net.""" mock_sysfs = f"{tmpdir_factory.mktemp('sysfs')}/" with mock.patch( "cloudinit.net.get_sys_class_path", return_value=mock_sysfs @@ -82,6 +103,15 @@ def disable_sysfs_net(request, tmpdir_factory): yield mock_sysfs +@pytest.fixture(scope="class") +def disable_netdev_info(request): + """Avoid tests which read the underlying host's /syc/class/net.""" + with mock.patch( + "cloudinit.netinfo.netdev_info", return_value=example_netdev + ) as mock_netdev: + yield mock_netdev + + @pytest.fixture(autouse=True) def disable_dns_lookup(request): if "allow_dns_lookup" in request.keywords: @@ -110,12 +140,21 @@ def dhclient_exists(): log.configure_root_logger() -@pytest.fixture(autouse=True) -def disable_root_logger_setup(request): - with mock.patch("cloudinit.cmd.main.configure_root_logger", autospec=True): +@pytest.fixture(autouse=True, scope="session") +def disable_root_logger_setup(): + with mock.patch("cloudinit.log.configure_root_logger", autospec=True): yield +@pytest.fixture +def clear_deprecation_log(): + """Clear any deprecation warnings before and after running tests.""" + # Since deprecations are de-duped, the existance (or non-existance) of + # a deprecation warning in a previous test can cause the next test to + # fail. + setattr(util.deprecate, "log", set()) + + PYTEST_VERSION_TUPLE = tuple(map(int, pytest.__version__.split("."))) if PYTEST_VERSION_TUPLE < (3, 9, 0): @@ -130,3 +169,19 @@ def tmp_path(tmpdir): settings.register_profile("ci", max_examples=1000) settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default")) + + +@pytest.fixture +def m_log_paths(mocker, tmp_path): + """Define logs.LogPaths for testing and mock get_log_paths with it.""" + paths = logs.LogPaths( + userdata_raw=tmp_path / "userdata_raw", + cloud_data=tmp_path / "cloud_data", + run_dir=tmp_path / "run_dir", + instance_data_sensitive=tmp_path + / "run_dir" + / "instance_data_sensitive", + ) + pathlib.Path(paths.run_dir).mkdir() + mocker.patch.object(logs, "get_log_paths", return_value=paths) + yield paths diff --git a/tests/unittests/distros/package_management/test_apt.py b/tests/unittests/distros/package_management/test_apt.py index 570bd7df0..5c039a230 100644 --- a/tests/unittests/distros/package_management/test_apt.py +++ b/tests/unittests/distros/package_management/test_apt.py @@ -1,13 +1,18 @@ # This file is part of cloud-init. See LICENSE file for license information. +import tempfile from itertools import count, cycle from unittest import mock import pytest -from cloudinit import subp +from cloudinit import helpers, subp +from cloudinit.distros.package_management import apt from cloudinit.distros.package_management.apt import APT_GET_COMMAND, Apt +from tests.unittests.helpers import get_mock_paths +from tests.unittests.util import FakeDataSource M_PATH = "cloudinit.distros.package_management.apt.Apt." +TMP_DIR = tempfile.TemporaryDirectory() @mock.patch.dict("os.environ", {}, clear=True) @@ -112,3 +117,44 @@ def test_search_stem(self, m_subp, m_which, mocker): "pkg5^", ], ) + + +@mock.patch.object( + apt, + "APT_LOCK_FILES", + [f"{TMP_DIR}/{FILE}" for FILE in apt.APT_LOCK_FILES], +) +class TestUpdatePackageSources: + def __init__(self): + MockPaths = get_mock_paths(TMP_DIR) + self.MockPaths = MockPaths({}, FakeDataSource()) + + @mock.patch.object(apt.subp, "which", return_value=True) + @mock.patch.object(apt.subp, "subp") + def test_force_update_calls_twice(self, m_subp, m_which): + """Ensure that force=true calls apt update again""" + instance = apt.Apt(helpers.Runners(self.MockPaths)) + instance.update_package_sources() + instance.update_package_sources(force=True) + assert 2 == len(m_subp.call_args_list) + TMP_DIR.cleanup() + + @mock.patch.object(apt.subp, "which", return_value=True) + @mock.patch.object(apt.subp, "subp") + def test_force_update_twice_calls_twice(self, m_subp, m_which): + """Ensure that force=true calls apt update again when called twice""" + instance = apt.Apt(helpers.Runners(self.MockPaths)) + instance.update_package_sources(force=True) + instance.update_package_sources(force=True) + assert 2 == len(m_subp.call_args_list) + TMP_DIR.cleanup() + + @mock.patch.object(apt.subp, "which", return_value=True) + @mock.patch.object(apt.subp, "subp") + def test_no_force_update_calls_once(self, m_subp, m_which): + """Ensure that apt-get update calls are deduped unless expected""" + instance = apt.Apt(helpers.Runners(self.MockPaths)) + instance.update_package_sources() + instance.update_package_sources() + assert 1 == len(m_subp.call_args_list) + TMP_DIR.cleanup() diff --git a/tests/unittests/distros/test__init__.py b/tests/unittests/distros/test__init__.py index 49401086a..39583b137 100644 --- a/tests/unittests/distros/test__init__.py +++ b/tests/unittests/distros/test__init__.py @@ -55,6 +55,8 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): + with_logs = True + def setUp(self): super(TestGenericDistro, self).setUp() # Make a temp directoy for tests to use. @@ -231,6 +233,47 @@ def test_sudoers_ensure_append(self): self.assertTrue(os.path.isdir("/b")) self.assertIn("josh", contents) self.assertEqual(2, contents.count("josh")) + self.assertIn( + "Added '#includedir /b' to /etc/sudoers", self.logs.getvalue() + ) + + def test_sudoers_ensure_append_sudoer_file(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + util.write_file("/etc/sudoers", "josh, josh\n") + d.ensure_sudo_dir("/b", "/etc/sudoers") + contents = util.load_text_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + self.assertIn("josh", contents) + self.assertEqual(2, contents.count("josh")) + + def test_usr_sudoers_ensure_new(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + util.write_file("/usr/etc/sudoers", "josh, josh\n") + d.ensure_sudo_dir("/b") + contents = util.load_text_file("/etc/sudoers") + self.assertIn("josh", contents) + self.assertEqual(2, contents.count("josh")) + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + self.assertIn( + "Using content from '/usr/etc/sudoers", self.logs.getvalue() + ) + + def test_usr_sudoers_ensure_no_etc_create_when_include_in_usr_etc(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + util.write_file("/usr/etc/sudoers", "#includedir /b") + d.ensure_sudo_dir("/b") + self.assertTrue(not os.path.exists("/etc/sudoers")) def test_sudoers_ensure_only_one_includedir(self): cls = distros.fetch("ubuntu") diff --git a/tests/unittests/distros/test_alpine.py b/tests/unittests/distros/test_alpine.py new file mode 100644 index 000000000..2c8daaeb1 --- /dev/null +++ b/tests/unittests/distros/test_alpine.py @@ -0,0 +1,78 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from unittest import mock + +import pytest + +from cloudinit import distros, util +from tests.unittests.helpers import TestCase + + +class TestAlpineBusyboxUserGroup: + @mock.patch("cloudinit.distros.alpine.subp.subp") + @mock.patch("cloudinit.distros.subp.which", return_value=False) + def test_busybox_add_group(self, m_which, m_subp): + distro = distros.fetch("alpine")("alpine", {}, None) + + group = "mygroup" + + distro.create_group(group) + + m_subp.assert_called_with(["addgroup", group]) + + @pytest.mark.usefixtures("fake_filesystem") + @mock.patch("cloudinit.distros.alpine.subp.subp") + @mock.patch("cloudinit.distros.subp.which", return_value=False) + def test_busybox_add_user(self, m_which, m_subp, tmpdir): + distro = distros.fetch("alpine")("alpine", {}, None) + + shadow_file = tmpdir.join("/etc/shadow") + shadow_file.dirpath().mkdir() + + user = "me2" + + # Need to place entry for user in /etc/shadow as + # "adduser" is stubbed and so will not create it. + root_entry = "root::19848:0:::::" + shadow_file.write( + root_entry + "\n" + user + ":!:19848:0:99999:7:::" + "\n" + ) + + distro.shadow_fn = shadow_file + + distro.add_user(user, lock_passwd=True) + + m_subp.assert_called_with(["adduser", "-D", user]) + + contents = util.load_text_file(shadow_file) + expected = root_entry + "\n" + user + ":!:19848::::::" + "\n" + + assert contents == expected + + +class TestAlpineShadowUserGroup(TestCase): + distro = distros.fetch("alpine")("alpine", {}, None) + + @mock.patch("cloudinit.distros.alpine.subp.subp") + @mock.patch( + "cloudinit.distros.subp.which", return_value=("/usr/sbin/groupadd") + ) + def test_shadow_add_group(self, m_which, m_subp): + group = "mygroup" + + self.distro.create_group(group) + + m_subp.assert_called_with(["groupadd", group]) + + @mock.patch("cloudinit.distros.alpine.subp.subp") + @mock.patch( + "cloudinit.distros.subp.which", return_value=("/usr/sbin/useradd") + ) + def test_shadow_add_user(self, m_which, m_subp): + user = "me2" + + self.distro.add_user(user) + + m_subp.assert_called_with( + ["useradd", user, "-m"], logstring=["useradd", user, "-m"] + ) diff --git a/tests/unittests/distros/test_azurelinux.py b/tests/unittests/distros/test_azurelinux.py new file mode 100644 index 000000000..03c895bc2 --- /dev/null +++ b/tests/unittests/distros/test_azurelinux.py @@ -0,0 +1,25 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.helpers import CiTestCase + +from . import _get_distro + +SYSTEM_INFO = { + "paths": { + "cloud_dir": "/var/lib/cloud/", + "templates_dir": "/etc/cloud/templates/", + }, + "network": {"renderers": "networkd"}, +} + + +class TestAzurelinux(CiTestCase): + with_logs = True + distro = _get_distro("azurelinux", SYSTEM_INFO) + expected_log_line = "Rely on Azure Linux default network config" + + def test_network_renderer(self): + self.assertEqual(self.distro._cfg["network"]["renderers"], "networkd") + + def test_get_distro(self): + self.assertEqual(self.distro.osfamily, "azurelinux") diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py index 039723aaa..8fa7f0cc0 100644 --- a/tests/unittests/distros/test_create_users.py +++ b/tests/unittests/distros/test_create_users.py @@ -4,7 +4,8 @@ import pytest -from cloudinit import distros, ssh_util +from cloudinit import distros, features, ssh_util +from cloudinit.util import should_log_deprecation from tests.unittests.helpers import mock from tests.unittests.util import abstract_to_concrete @@ -142,7 +143,14 @@ def test_create_groups_with_dict_deprecated( ] assert m_subp.call_args_list == expected - assert caplog.records[0].levelname in ["WARNING", "DEPRECATED"] + expected_levels = ( + ["WARNING", "DEPRECATED"] + if should_log_deprecation( + "23.1", features.DEPRECATION_INFO_BOUNDARY + ) + else ["INFO"] + ) + assert caplog.records[0].levelname in expected_levels assert ( "The user foo_user has a 'groups' config value of type dict" in caplog.records[0].message @@ -170,11 +178,18 @@ def test_explicit_sudo_false(self, m_subp, dist, caplog): mock.call(["passwd", "-l", USER]), ] - assert caplog.records[1].levelname in ["WARNING", "DEPRECATED"] + expected_levels = ( + ["WARNING", "DEPRECATED"] + if should_log_deprecation( + "22.2", features.DEPRECATION_INFO_BOUNDARY + ) + else ["INFO"] + ) + assert caplog.records[1].levelname in expected_levels assert ( "The value of 'false' in user foo_user's 'sudo' " - "config is deprecated in 22.3 and scheduled to be removed" - " in 27.3. Use 'null' instead." + "config is deprecated in 22.2 and scheduled to be removed" + " in 27.2. Use 'null' instead." ) in caplog.text def test_explicit_sudo_none(self, m_subp, dist, caplog): diff --git a/tests/unittests/distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py index 70f2c7c69..c4c067ead 100644 --- a/tests/unittests/distros/test_freebsd.py +++ b/tests/unittests/distros/test_freebsd.py @@ -65,6 +65,32 @@ def test_find_freebsd_part_gpt(self, mock_subp): res = find_freebsd_part("/dev/gpt/rootfs") self.assertEqual("vtbd0p3", res) + @mock.patch("cloudinit.subp.subp") + def test_find_freebsd_part_gptid(self, mock_subp): + glabel_out = """ + gpt/bootfs N/A vtbd0p1 + gpt/efiesp N/A vtbd0p2 + gpt/swapfs N/A vtbd0p3 +gptid/4cd084b4-7fb4-11ee-a7ba-002590ec5bf2 N/A vtbd0p4 +""" + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part( + "/dev/gptid/4cd084b4-7fb4-11ee-a7ba-002590ec5bf2" + ) + self.assertEqual("vtbd0p4", res) + + @mock.patch("cloudinit.subp.subp") + def test_find_freebsd_part_ufsid(self, mock_subp): + glabel_out = """ + gpt/bootfs N/A vtbd0p1 + gpt/efiesp N/A vtbd0p2 + gpt/swapfs N/A vtbd0p3 + ufsid/654e0663786f5131 N/A vtbd0p4 +""" + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/ufsid/654e0663786f5131") + self.assertEqual("vtbd0p4", res) + def test_get_path_dev_freebsd_label(self): mnt_list = """ /dev/label/rootfs / ufs rw 1 1 diff --git a/tests/unittests/distros/test_ifconfig.py b/tests/unittests/distros/test_ifconfig.py index c5c1dee90..f29504459 100644 --- a/tests/unittests/distros/test_ifconfig.py +++ b/tests/unittests/distros/test_ifconfig.py @@ -28,6 +28,13 @@ def test_is_vlan(self): ifs = Ifconfig().parse(self.ifs_txt) assert ifs["re0.33"].is_vlan + def test_netmask(self): + ifs = Ifconfig().parse(self.ifs_txt) + # netmasks are Normalized from non-contiguous hex bitmasks to + # contiguous decimal bitmasks + assert ifs["bridge0"].inet["192.168.1.1"]["netmask"] == "255.255.255.0" + assert ifs["lo0"].inet["127.0.0.1"]["netmask"] == "255.0.0.0" + def test_description(self): """assert vnet0:11 is associated with jail: webirc""" ifs = Ifconfig().parse(self.ifs_txt) @@ -55,6 +62,24 @@ def test_duplicate_mac(self): ) +class TestIfconfigParserFreeBSDCIDR(TestCase): + def setUp(self): + super(TestIfconfigParserFreeBSDCIDR, self).setUp() + self.ifs_txt = readResource("netinfo/freebsd-ifconfig-cidr-output") + + def test_parse_freebsd(self): + """assert parsing works without any exceptions""" + Ifconfig().parse(self.ifs_txt) + + def test_netmask(self): + ifs = Ifconfig().parse(self.ifs_txt) + # netmasks are Normalized from CIDR to contiguous decimal bitmasks + assert ( + ifs["vtnet0"].inet["198.51.100.13"]["netmask"] == "255.255.255.255" + ) + assert ifs["lo0"].inet["127.0.0.1"]["netmask"] == "255.0.0.0" + + class TestIfconfigParserOpenBSD(TestCase): def setUp(self): super(TestIfconfigParserOpenBSD, self).setUp() diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index dfee37a8a..bf2d7b979 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -7,15 +7,9 @@ from textwrap import dedent from unittest import mock -from cloudinit import ( - distros, - features, - helpers, - safeyaml, - settings, - subp, - util, -) +import yaml + +from cloudinit import distros, features, helpers, settings, subp, util from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit.net.activators import IfUpDownActivator from tests.unittests.helpers import ( @@ -303,7 +297,7 @@ def setUp(self): def _get_distro(self, dname, renderers=None, activators=None): cls = distros.fetch(dname) - cfg = settings.CFG_BUILTIN + cfg = copy.deepcopy(settings.CFG_BUILTIN) cfg["system_info"]["distro"] = dname system_info_network_cfg = {} if renderers: @@ -735,7 +729,6 @@ def test_apply_network_config_rh(self): GATEWAY=192.168.1.254 IPADDR=192.168.1.5 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -745,7 +738,6 @@ def test_apply_network_config_rh(self): """\ BOOTPROTO=dhcp DEVICE=eth1 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -776,7 +768,6 @@ def test_apply_network_config_ipv6_rh(self): IPV6_AUTOCONF=no IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -786,7 +777,6 @@ def test_apply_network_config_ipv6_rh(self): """\ BOOTPROTO=dhcp DEVICE=eth1 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -833,7 +823,6 @@ def test_vlan_render_unsupported(self): HWADDR=00:16:3e:60:7c:df IPADDR=192.10.1.2 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -845,7 +834,6 @@ def test_vlan_render_unsupported(self): DEVICE=infra0 IPADDR=10.0.1.2 NETMASK=255.255.0.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 USERCTL=no @@ -881,7 +869,6 @@ def test_vlan_render(self): DEVICE=eth0 IPADDR=192.10.1.2 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -893,7 +880,6 @@ def test_vlan_render(self): DEVICE=eth0.1001 IPADDR=10.0.1.2 NETMASK=255.255.0.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 USERCTL=no @@ -1172,7 +1158,7 @@ def test_photon_network_config_v1_with_duplicates(self): [Address] Address=192.168.0.102/24""" - net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) expected = self.create_conf_dict(expected.splitlines()) expected_cfgs = { @@ -1297,7 +1283,132 @@ def test_mariner_network_config_v1_with_duplicates(self): [Address] Address=192.168.0.102/24""" - net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) + + expected = self.create_conf_dict(expected.splitlines()) + expected_cfgs = { + self.nwk_file_path("eth0"): expected, + } + + self._apply_and_verify( + self.distro.apply_network_config, net_cfg, expected_cfgs.copy() + ) + + +class TestNetCfgDistroAzureLinux(TestNetCfgDistroBase): + def setUp(self): + super().setUp() + self.distro = self._get_distro("azurelinux", renderers=["networkd"]) + + def create_conf_dict(self, contents): + content_dict = {} + for line in contents: + if line: + line = line.strip() + if line and re.search(r"^\[(.+)\]$", line): + content_dict[line] = [] + key = line + elif line: + assert key + content_dict[key].append(line) + + return content_dict + + def compare_dicts(self, actual, expected): + for k, v in actual.items(): + self.assertEqual(sorted(expected[k]), sorted(v)) + + def _apply_and_verify( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): + if not expected_cfgs: + raise ValueError("expected_cfg must not be None") + + tmpd = None + with mock.patch("cloudinit.net.networkd.available") as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + actual = self.create_conf_dict(results[cfgpath].splitlines()) + self.compare_dicts(actual, expected) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def nwk_file_path(self, ifname): + return "/etc/systemd/network/10-cloud-init-%s.network" % ifname + + def net_cfg_1(self, ifname): + ret = ( + """\ + [Match] + Name=%s + [Network] + DHCP=no + [Address] + Address=192.168.1.5/24 + [Route] + Gateway=192.168.1.254""" + % ifname + ) + return ret + + def net_cfg_2(self, ifname): + ret = ( + """\ + [Match] + Name=%s + [Network] + DHCP=ipv4""" + % ifname + ) + return ret + + def test_azurelinux_network_config_v1(self): + tmp = self.net_cfg_1("eth0").splitlines() + expected_eth0 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2("eth1").splitlines() + expected_eth1 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path("eth0"): expected_eth0, + self.nwk_file_path("eth1"): expected_eth1, + } + + self._apply_and_verify( + self.distro.apply_network_config, V1_NET_CFG, expected_cfgs.copy() + ) + + def test_azurelinux_network_config_v2(self): + tmp = self.net_cfg_1("eth7").splitlines() + expected_eth7 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2("eth9").splitlines() + expected_eth9 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path("eth7"): expected_eth7, + self.nwk_file_path("eth9"): expected_eth9, + } + + self._apply_and_verify( + self.distro.apply_network_config, V2_NET_CFG, expected_cfgs.copy() + ) + + def test_azurelinux_network_config_v1_with_duplicates(self): + expected = """\ + [Match] + Name=eth0 + [Network] + DHCP=no + DNS=1.2.3.4 + Domains=test.com + [Address] + Address=192.168.0.102/24""" + + net_cfg = yaml.safe_load(V1_NET_CFG_WITH_DUPS) expected = self.create_conf_dict(expected.splitlines()) expected_cfgs = { diff --git a/tests/unittests/distros/test_networking.py b/tests/unittests/distros/test_networking.py index 9a8cf507c..e80b88586 100644 --- a/tests/unittests/distros/test_networking.py +++ b/tests/unittests/distros/test_networking.py @@ -5,9 +5,9 @@ from unittest import mock import pytest +import yaml from cloudinit import net -from cloudinit import safeyaml as yaml from cloudinit.distros.networking import ( BSDNetworking, LinuxNetworking, @@ -143,7 +143,7 @@ def test_calls_subp_return_true(self, m_subp, m_is_up): is_success = LinuxNetworking().try_set_link_up(devname) assert ( - mock.call(["ip", "link", "set", devname, "up"]) + mock.call(["ip", "link", "set", "dev", devname, "up"]) == m_subp.call_args_list[-1] ) assert is_success @@ -154,7 +154,7 @@ def test_calls_subp_return_false(self, m_subp, m_is_up): is_success = LinuxNetworking().try_set_link_up(devname) assert ( - mock.call(["ip", "link", "set", devname, "up"]) + mock.call(["ip", "link", "set", "dev", devname, "up"]) == m_subp.call_args_list[-1] ) assert not is_success @@ -356,7 +356,7 @@ def test_apply_renames( networking = LinuxNetworking() m_device_driver.return_value = "virtio_net" m_device_devid.return_value = "0x15d8" - netcfg = yaml.load(getattr(self, config_attr)) + netcfg = yaml.safe_load(getattr(self, config_attr)) with mock.patch.object( networking, "_rename_interfaces" @@ -381,7 +381,7 @@ def test_apply_v2_renames_skips_without_setname_or_mac( self, config_attr: str ): networking = LinuxNetworking() - netcfg = yaml.load(getattr(self, config_attr)) + netcfg = yaml.safe_load(getattr(self, config_attr)) with mock.patch.object( networking, "_rename_interfaces" ) as m_rename_interfaces: @@ -391,4 +391,4 @@ def test_apply_v2_renames_skips_without_setname_or_mac( def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self): networking = LinuxNetworking() with pytest.raises(RuntimeError): - networking.apply_network_config_names(yaml.load("version: 3")) + networking.apply_network_config_names(yaml.safe_load("version: 3")) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index bed234dfb..ab97973e0 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import functools import io @@ -53,6 +54,41 @@ "(is too short|should be non-empty|does not have enough properties)" ) +example_netdev = { + "eth0": { + "hwaddr": "00:16:3e:16:db:54", + "ipv4": [ + { + "bcast": "10.85.130.255", + "ip": "10.85.130.116", + "mask": "255.255.255.0", + "scope": "global", + } + ], + "ipv6": [ + { + "ip": "fd42:baa2:3dd:17a:216:3eff:fe16:db54/64", + "scope6": "global", + }, + {"ip": "fe80::216:3eff:fe16:db54/64", "scope6": "link"}, + ], + "up": True, + }, + "lo": { + "hwaddr": "", + "ipv4": [ + { + "bcast": "", + "ip": "127.0.0.1", + "mask": "255.0.0.0", + "scope": "host", + } + ], + "ipv6": [{"ip": "::1/128", "scope6": "host"}], + "up": True, + }, +} + # Makes the old path start # with new base instead of whatever @@ -161,6 +197,7 @@ def setUp(self): self.old_handlers = self.logger.handlers self.logger.handlers = [handler] self.old_level = logging.root.level + self.logger.level = logging.DEBUG if self.allowed_subp is True: subp.subp = _real_subp else: @@ -236,11 +273,8 @@ def tmp_cloud(self, distro, sys_cfg=None, metadata=None): self.new_root = self.tmp_dir() if not sys_cfg: sys_cfg = {} - tmp_paths = {} - for var in ["templates_dir", "run_dir", "cloud_dir"]: - tmp_paths[var] = self.tmp_path(var, dir=self.new_root) - util.ensure_dir(tmp_paths[var]) - self.paths = ch.Paths(tmp_paths) + MockPaths = get_mock_paths(self.new_root) + self.paths = MockPaths({}) cls = distros.fetch(distro) mydist = cls(distro, sys_cfg, self.paths) myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths) @@ -427,6 +461,24 @@ def _ensure_url_default_path(url): ) +def get_mock_paths(temp_dir): + class MockPaths(ch.Paths): + def __init__(self, path_cfgs: dict, ds=None): + super().__init__(path_cfgs=path_cfgs, ds=ds) + + self.cloud_dir: str = path_cfgs.get( + "cloud_dir", f"{temp_dir}/var/lib/cloud" + ) + self.run_dir: str = path_cfgs.get( + "run_dir", f"{temp_dir}/run/cloud/" + ) + self.template_dir: str = path_cfgs.get( + "templates_dir", f"{temp_dir}/etc/cloud/templates/" + ) + + return MockPaths + + class ResponsesTestCase(CiTestCase): def setUp(self): super().setUp() diff --git a/tests/unittests/net/network_configs.py b/tests/unittests/net/network_configs.py new file mode 100644 index 000000000..12846d335 --- /dev/null +++ b/tests/unittests/net/network_configs.py @@ -0,0 +1,4925 @@ +"""A (hopefully) temporary home for network config test data.""" + +import textwrap + +NETWORK_CONFIGS = { + "small_suse_dhcp6": { + "expected_sysconfig_opensuse": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=cf:d6:af:48:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + LLADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + STARTMODE=auto""" + ), + }, + "yaml_v1": textwrap.dedent( + """ + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth99 + mac_address: c0:d6:9f:2c:e8:80 + subnets: + - type: dhcp4 + - type: dhcp6 + - type: static + address: 192.168.21.3/24 + dns_nameservers: + - 8.8.8.8 + - 8.8.4.4 + dns_search: barley.maas sach.maas + routes: + - gateway: 65.61.151.37 + netmask: 0.0.0.0 + network: 0.0.0.0 + metric: 10000 + - type: physical + name: eth1 + mac_address: cf:d6:af:48:e8:80 + - type: nameserver + address: + - 1.2.3.4 + - 5.6.7.8 + search: + - wark.maas + """ + ), + "yaml_v2": textwrap.dedent( + """ + version: 2 + ethernets: + eth1: + match: + macaddress: cf:d6:af:48:e8:80 + set-name: eth1 + eth99: + dhcp4: true + dhcp6: true + addresses: + - 192.168.21.3/24 + match: + macaddress: c0:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 8.8.4.4 + search: + - barley.maas + - sach.maas + routes: + - metric: 10000 + to: 0.0.0.0/0 + via: 65.61.151.37 + set-name: eth99 + """ + ), + }, + "small_v1": { + "expected_networkd_eth99": textwrap.dedent( + """\ + [Match] + Name=eth99 + MACAddress=c0:d6:9f:2c:e8:80 + [Address] + Address=192.168.21.3/24 + [Network] + DHCP=ipv4 + Domains=barley.maas sach.maas + Domains=wark.maas + DNS=1.2.3.4 5.6.7.8 + DNS=8.8.8.8 8.8.4.4 + [Route] + Gateway=65.61.151.37 + Destination=0.0.0.0/0 + Metric=10000 + """ + ).rstrip(" "), + "expected_networkd_eth1": textwrap.dedent( + """\ + [Match] + Name=eth1 + MACAddress=cf:d6:af:48:e8:80 + [Network] + DHCP=no + Domains=wark.maas + DNS=1.2.3.4 5.6.7.8 + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + dns-nameservers 1.2.3.4 5.6.7.8 + dns-search wark.maas + + iface eth1 inet manual + + auto eth99 + iface eth99 inet dhcp + + # control-alias eth99 + iface eth99 inet static + address 192.168.21.3/24 + dns-nameservers 8.8.8.8 8.8.4.4 + dns-search barley.maas sach.maas + post-up route add default gw 65.61.151.37 metric 10000 || true + pre-down route del default gw 65.61.151.37 metric 10000 || true + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + eth1: + match: + macaddress: cf:d6:af:48:e8:80 + set-name: eth1 + eth99: + addresses: + - 192.168.21.3/24 + dhcp4: true + match: + macaddress: c0:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 8.8.4.4 + search: + - barley.maas + - sach.maas + routes: + - metric: 10000 + to: 0.0.0.0/0 + via: 65.61.151.37 + set-name: eth99 + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=cf:d6:af:48:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + LLADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + STARTMODE=auto""" + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=cf:d6:af:48:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEFROUTE=yes + DEVICE=eth99 + DHCLIENT_SET_DEFAULT_ROUTE=yes + DNS1=8.8.8.8 + DNS2=8.8.4.4 + DOMAIN="barley.maas sach.maas" + GATEWAY=65.61.151.37 + HWADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + METRIC=10000 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + }, + "expected_network_manager": { + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=CF:D6:AF:48:E8:80 + + """ + ), + "cloud-init-eth99.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth99 + uuid=b1b88000-1f03-5360-8377-1a2205efffb4 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:D6:9F:2C:E8:80 + + [ipv4] + method=auto + may-fail=false + address1=192.168.21.3/24 + route1=0.0.0.0/0,65.61.151.37 + dns=8.8.8.8;8.8.4.4; + dns-search=barley.maas;sach.maas; + + """ + ), + }, + "yaml": textwrap.dedent( + """ + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth99 + mac_address: c0:d6:9f:2c:e8:80 + subnets: + - type: dhcp4 + - type: static + address: 192.168.21.3/24 + dns_nameservers: + - 8.8.8.8 + - 8.8.4.4 + dns_search: barley.maas sach.maas + routes: + - gateway: 65.61.151.37 + netmask: 0.0.0.0 + network: 0.0.0.0 + metric: 10000 + - type: physical + name: eth1 + mac_address: cf:d6:af:48:e8:80 + - type: nameserver + address: + - 1.2.3.4 + - 5.6.7.8 + search: + - wark.maas + """ + ), + }, + # We test a separate set of configs here because v2 doesn't support + # generic nameservers, so that aspect needs to be modified + "small_v2": { + "expected_networkd_eth99": textwrap.dedent( + """\ + [Match] + Name=eth99 + MACAddress=c0:d6:9f:2c:e8:80 + [Address] + Address=192.168.21.3/24 + [Network] + DHCP=ipv4 + Domains=barley.maas sach.maas + DNS=8.8.8.8 8.8.4.4 + [Route] + Gateway=65.61.151.37 + Destination=0.0.0.0/0 + Metric=10000 + """ + ).rstrip(" "), + "expected_networkd_eth1": textwrap.dedent( + """\ + [Match] + Name=eth1 + MACAddress=cf:d6:af:48:e8:80 + [Network] + DHCP=no + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + iface eth1 inet manual + + auto eth99 + iface eth99 inet dhcp + + # control-alias eth99 + iface eth99 inet static + address 192.168.21.3/24 + dns-nameservers 8.8.8.8 8.8.4.4 + dns-search barley.maas sach.maas + post-up route add default gw 65.61.151.37 metric 10000 || true + pre-down route del default gw 65.61.151.37 metric 10000 || true + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=cf:d6:af:48:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + LLADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + STARTMODE=auto""" + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=cf:d6:af:48:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEFROUTE=yes + DEVICE=eth99 + DHCLIENT_SET_DEFAULT_ROUTE=yes + DNS1=8.8.8.8 + DNS2=8.8.4.4 + DOMAIN="barley.maas sach.maas" + GATEWAY=65.61.151.37 + HWADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + METRIC=10000 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + }, + "expected_network_manager": { + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=CF:D6:AF:48:E8:80 + + """ + ), + "cloud-init-eth99.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth99 + uuid=b1b88000-1f03-5360-8377-1a2205efffb4 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:D6:9F:2C:E8:80 + + [ipv4] + method=auto + may-fail=false + route1=0.0.0.0/0,65.61.151.37 + address1=192.168.21.3/24 + dns=8.8.8.8;8.8.4.4; + dns-search=barley.maas;sach.maas; + + """ + ), + }, + "yaml": textwrap.dedent( + """ + version: 2 + ethernets: + eth1: + match: + macaddress: cf:d6:af:48:e8:80 + set-name: eth1 + eth99: + addresses: + - 192.168.21.3/24 + dhcp4: true + match: + macaddress: c0:d6:9f:2c:e8:80 + nameservers: + addresses: + - 8.8.8.8 + - 8.8.4.4 + search: + - barley.maas + - sach.maas + routes: + - metric: 10000 + to: 0.0.0.0/0 + via: 65.61.151.37 + set-name: eth99 + """ + ), + }, + "v4_and_v6": { + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Network] + DHCP=yes + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + + # control-alias iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + dhcp4: true + dhcp6: true + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + STARTMODE=auto""" + ) + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=true + + [ipv6] + method=auto + may-fail=true + + """ + ), + }, + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp4'} + - {'type': 'dhcp6'} + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp4: true + dhcp6: true + """ + ), + }, + "v1_ipv4_and_ipv6_static": { + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Link] + MTUBytes=8999 + [Network] + DHCP=no + [Address] + Address=192.168.14.2/24 + [Address] + Address=2001:1::1/64 + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet static + address 192.168.14.2/24 + mtu 9000 + + # control-alias iface0 + iface iface0 inet6 static + address 2001:1::1/64 + mtu 1500 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + ipv6-mtu: 1500 + mtu: 9000 + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + mtu: 8999 + subnets: + - type: static + address: 192.168.14.2/24 + mtu: 9000 + - type: static + address: 2001:1::1/64 + mtu: 1500 + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + NETMASK=255.255.255.0 + STARTMODE=auto + MTU=9000 + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + IPADDR=192.168.14.2 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.255.0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + MTU=9000 + IPV6_MTU=1500 + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + + [ipv4] + method=manual + may-fail=false + address1=192.168.14.2/24 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::1/64 + + """ + ), + }, + }, + "v2_ipv4_and_ipv6_static": { + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + mtu: 9000 + """ + ).rstrip(" "), + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Link] + MTUBytes=9000 + [Network] + DHCP=no + [Address] + Address=192.168.14.2/24 + [Address] + Address=2001:1::1/64 + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet static + address 192.168.14.2/24 + mtu 9000 + + # control-alias iface0 + iface iface0 inet6 static + address 2001:1::1/64 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + mtu: 9000 + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + NETMASK=255.255.255.0 + STARTMODE=auto + MTU=9000 + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + IPADDR=192.168.14.2 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.255.0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + MTU=9000 + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + + [ipv4] + method=manual + may-fail=false + address1=192.168.14.2/24 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::1/64 + + """ + ), + }, + }, + "v6_and_v4": { + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + STARTMODE=auto""" + ) + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv6] + method=auto + may-fail=true + + [ipv4] + method=auto + may-fail=true + + """ + ), + }, + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - type: dhcp6 + - type: dhcp4 + """ + ).rstrip(" "), + # Do not include a yaml_v2 here as it renders exactly the same as + # the v4_and_v6 case, and that's fine + }, + "dhcpv6_only": { + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp6: true + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv6] + method=auto + may-fail=false + + """ + ), + }, + }, + "dhcpv6_accept_ra": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 1 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: true + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: true + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + IPv6AcceptRA=True + """ + ).rstrip(" "), + }, + "dhcpv6_reject_ra": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 0 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + accept-ra: false + dhcp6: true + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: false + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: false + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=no + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_networkd": textwrap.dedent( + """\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + IPv6AcceptRA=False + """ + ).rstrip(" "), + }, + "ipv6_slaac": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 0 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_slaac'} + """ + ).rstrip(" "), + # A yaml_v2 doesn't make sense here as the configuration looks exactly + # the same as the dhcpv6_only test. + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=info + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv6] + method=auto + may-fail=false + + [ipv4] + method=disabled + + """ + ), + }, + }, + "static6": { + "yaml_v1": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + accept-ra: 'no' + subnets: + - type: 'static6' + address: 2001:1::1/64 + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + accept-ra: false + addresses: + - 2001:1::1/64 + """ + ), + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + }, + "dhcpv6_stateless": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 1 + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateless'} + """ + ).rstrip(" "), + # yaml_v2 makes no sense here as it would be the exact same + # configuration as the dhcpv6_only test + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=info + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + DHCPV6C_OPTIONS=-S + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv6] + method=auto + may-fail=false + + [ipv4] + method=disabled + + """ + ), + }, + }, + "dhcpv6_stateful": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateful'} + accept-ra: true + """ + ).rstrip(" "), + # yaml_v2 makes no sense here as it would be the exact same + # configuration as the dhcpv6_only test + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FAILURE_FATAL=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + }, + "wakeonlan_disabled": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + ethernets: + iface0: + dhcp4: true + wakeonlan: false + version: 2 + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=iface0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=false + + """ + ), + }, + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp4: true + wakeonlan: false + """ + ).rstrip(" "), + }, + "wakeonlan_enabled": { + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + ethernet-wol g + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ + network: + ethernets: + iface0: + dhcp4: true + wakeonlan: true + version: 2 + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + ETHTOOL_OPTS="wol g" + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=iface0 + ETHTOOL_OPTS="wol g" + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-iface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init iface0 + uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 + autoconnect-priority=120 + type=ethernet + interface-name=iface0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + wake-on-lan=64 + + [ipv4] + method=auto + may-fail=false + + """ + ), + }, + "yaml_v2": textwrap.dedent( + """\ + version: 2 + ethernets: + iface0: + dhcp4: true + wakeonlan: true + """ + ).rstrip(" "), + }, + "large_v1": { + "expected_eni": """\ +auto lo +iface lo inet loopback + dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 + dns-search barley.maas wark.maas foobar.maas + +iface eth0 inet manual + +auto eth1 +iface eth1 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto eth2 +iface eth2 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +iface eth3 inet manual + +iface eth4 inet manual + +# control-manual eth5 +iface eth5 inet dhcp + +auto ib0 +iface ib0 inet static + address 192.168.200.7/24 + mtu 9000 + hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + +auto bond0 +iface bond0 inet6 dhcp + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:ee:ff + +auto br0 +iface br0 inet static + address 192.168.14.2/24 + bridge_ageing 250 + bridge_bridgeprio 22 + bridge_fd 1 + bridge_gcint 2 + bridge_hello 1 + bridge_maxage 10 + bridge_pathcost eth3 50 + bridge_pathcost eth4 75 + bridge_portprio eth3 28 + bridge_portprio eth4 14 + bridge_ports eth3 eth4 + bridge_stp off + bridge_waitport 1 eth3 + bridge_waitport 2 eth4 + hwaddress bb:bb:bb:bb:bb:aa + +# control-alias br0 +iface br0 inet6 static + address 2001:1::1/64 + post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true + pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true + +auto bond0.200 +iface bond0.200 inet dhcp + vlan-raw-device bond0 + vlan_id 200 + +auto eth0.101 +iface eth0.101 inet static + address 192.168.0.2/24 + dns-nameservers 192.168.0.10 10.23.23.134 + dns-search barley.maas sacchromyces.maas brettanomyces.maas + gateway 192.168.0.1 + mtu 1500 + hwaddress aa:bb:cc:dd:ee:11 + vlan-raw-device eth0 + vlan_id 101 + +# control-alias eth0.101 +iface eth0.101 inet static + address 192.168.2.10/24 + +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +""", + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + eth0: + match: + macaddress: c0:d6:9f:2c:e8:80 + set-name: eth0 + eth1: + match: + macaddress: aa:d6:9f:2c:e8:80 + set-name: eth1 + eth2: + match: + macaddress: c0:bb:9f:2c:e8:80 + set-name: eth2 + eth3: + match: + macaddress: 66:bb:9f:2c:e8:80 + set-name: eth3 + eth4: + match: + macaddress: 98:bb:9f:2c:e8:80 + set-name: eth4 + eth5: + dhcp4: true + match: + macaddress: 98:bb:9f:2c:e8:8a + set-name: eth5 + bonds: + bond0: + dhcp6: true + interfaces: + - eth1 + - eth2 + macaddress: aa:bb:cc:dd:ee:ff + parameters: + mii-monitor-interval: 100 + mode: active-backup + transmit-hash-policy: layer3+4 + bridges: + br0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + interfaces: + - eth3 + - eth4 + macaddress: bb:bb:bb:bb:bb:aa + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + parameters: + ageing-time: 250 + forward-delay: 1 + hello-time: 1 + max-age: 10 + path-cost: + eth3: 50 + eth4: 75 + port-priority: + eth3: 28 + eth4: 14 + priority: 22 + stp: false + routes: + - to: ::/0 + via: 2001:4800:78ff:1b::1 + vlans: + bond0.200: + dhcp4: true + id: 200 + link: bond0 + eth0.101: + addresses: + - 192.168.0.2/24 + - 192.168.2.10/24 + id: 101 + link: eth0 + macaddress: aa:bb:cc:dd:ee:11 + mtu: 1500 + nameservers: + addresses: + - 192.168.0.10 + - 10.23.23.134 + search: + - barley.maas + - sacchromyces.maas + - brettanomyces.maas + routes: + - to: 0.0.0.0/0 + via: 192.168.0.1 + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE_0=eth1 + BONDING_SLAVE_1=eth2 + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + LLADDR=aa:bb:cc:dd:ee:ff + STARTMODE=auto""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + ETHERDEVICE=bond0 + STARTMODE=auto + VLAN_ID=200""" + ), + "ifcfg-br0": textwrap.dedent( + """\ + BRIDGE_AGEINGTIME=250 + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + LLADDRESS=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + BRIDGE_PRIORITY=22 + BRIDGE_PORTS='eth3 eth4' + STARTMODE=auto + BRIDGE_STP=off""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=c0:d6:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ETHERDEVICE=eth0 + STARTMODE=auto + VLAN_ID=101""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:d6:9f:2c:e8:80 + STARTMODE=hotplug""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=c0:bb:9f:2c:e8:80 + STARTMODE=hotplug""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=66:bb:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=98:bb:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + LLADDR=98:bb:9f:2c:e8:8a + STARTMODE=manual""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + STARTMODE=auto + TYPE=InfiniBand""" + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE0=eth1 + BONDING_SLAVE1=eth2 + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes + TYPE=Bond + USERCTL=no""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no + ONBOOT=yes + PHYSDEV=bond0 + USERCTL=no + VLAN=yes""" + ), + "ifcfg-br0": textwrap.dedent( + """\ + AGEING=250 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=br0 + IPADDR=192.168.14.2 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + MACADDR=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + ONBOOT=yes + PRIO=22 + STP=no + TYPE=Bridge + USERCTL=no""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=c0:d6:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0.101 + DNS1=192.168.0.10 + DNS2=10.23.23.134 + DOMAIN="barley.maas sacchromyces.maas brettanomyces.maas" + GATEWAY=192.168.0.1 + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + PHYSDEV=eth0 + USERCTL=no + VLAN=yes""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=aa:d6:9f:2c:e8:80 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth2 + HWADDR=c0:bb:9f:2c:e8:80 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth3 + HWADDR=66:bb:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth4 + HWADDR=98:bb:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no + HWADDR=98:bb:9f:2c:e8:8a + ONBOOT=no + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=ib0 + HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + ONBOOT=yes + TYPE=InfiniBand + USERCTL=no""" + ), + }, + "expected_network_manager": { + "cloud-init-eth3.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth3 + uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=66:BB:9F:2C:E8:80 + + """ + ), + "cloud-init-eth5.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth5 + uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=98:BB:9F:2C:E8:8A + + [ipv4] + method=auto + may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-ib0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ib0 + uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b + autoconnect-priority=120 + type=infiniband + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [infiniband] + transport-mode=datagram + mtu=9000 + mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1 + + [ipv4] + method=manual + may-fail=false + address1=192.168.200.7/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-bond0.200.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0.200 + uuid=88984a9c-ff22-5233-9267-86315e0acaa7 + autoconnect-priority=120 + type=vlan + interface-name=bond0.200 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=200 + parent=54317911-f840-516b-a10d-82cb4c1f075c + + [ipv4] + method=auto + may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:D6:9F:2C:E8:80 + + """ + ), + "cloud-init-eth4.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth4 + uuid=e27e4959-fb50-5580-b9a4-2073554627b9 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=98:BB:9F:2C:E8:80 + + """ + ), + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:D6:9F:2C:E8:80 + + """ + ), + "cloud-init-br0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init br0 + uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + autoconnect-priority=120 + type=bridge + interface-name=br0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bridge] + stp=false + priority=22 + mac-address=BB:BB:BB:BB:BB:AA + + [ipv4] + method=manual + may-fail=false + address1=192.168.14.2/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + [ipv6] + method=manual + may-fail=false + address1=2001:1::1/64 + route1=::/0,2001:4800:78ff:1b::1 + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth0.101.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0.101 + uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf + autoconnect-priority=120 + type=vlan + interface-name=eth0.101 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=101 + parent=1dd9a779-d327-56e1-8454-c65e2556c12c + + [ipv4] + method=manual + may-fail=false + address1=192.168.0.2/24 + gateway=192.168.0.1 + address2=192.168.2.10/24 + dns=192.168.0.10;10.23.23.134; + dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; + + """ + ), + "cloud-init-bond0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=active-backup + miimon=100 + xmit_hash_policy=layer3+4 + + [ipv6] + method=auto + may-fail=false + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth2.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth2 + uuid=5559a242-3421-5fdd-896e-9cb8313d5804 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:BB:9F:2C:E8:80 + + """ + ), + }, + "yaml": textwrap.dedent( + """ + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth0 + mac_address: c0:d6:9f:2c:e8:80 + - type: physical + name: eth1 + mac_address: aa:d6:9f:2c:e8:80 + - type: physical + name: eth2 + mac_address: c0:bb:9f:2c:e8:80 + - type: physical + name: eth3 + mac_address: 66:bb:9f:2c:e8:80 + - type: physical + name: eth4 + mac_address: 98:bb:9f:2c:e8:80 + # specify how ifupdown should treat iface + # control is one of ['auto', 'hotplug', 'manual'] + # with manual meaning ifup/ifdown should not affect the iface + # useful for things like iscsi root + dhcp + - type: physical + name: eth5 + mac_address: 98:bb:9f:2c:e8:8a + subnets: + - type: dhcp + control: manual + # VLAN interface. + - type: vlan + name: eth0.101 + vlan_link: eth0 + vlan_id: 101 + mac_address: aa:bb:cc:dd:ee:11 + mtu: 1500 + subnets: + - type: static + # When 'mtu' matches device-level mtu, no warnings + mtu: 1500 + address: 192.168.0.2/24 + gateway: 192.168.0.1 + dns_nameservers: + - 192.168.0.10 + - 10.23.23.134 + dns_search: + - barley.maas + - sacchromyces.maas + - brettanomyces.maas + - type: static + address: 192.168.2.10/24 + # Bond. + - type: bond + name: bond0 + # if 'mac_address' is omitted, the MAC is taken from + # the first slave. + mac_address: aa:bb:cc:dd:ee:ff + bond_interfaces: + - eth1 + - eth2 + params: + bond-mode: active-backup + bond_miimon: 100 + bond-xmit-hash-policy: "layer3+4" + subnets: + - type: dhcp6 + # A Bond VLAN. + - type: vlan + name: bond0.200 + vlan_link: bond0 + vlan_id: 200 + subnets: + - type: dhcp4 + # An infiniband + - type: infiniband + name: ib0 + mac_address: >- + a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + subnets: + - type: static + address: 192.168.200.7/24 + mtu: 9000 + # A bridge. + - type: bridge + name: br0 + bridge_interfaces: + - eth3 + - eth4 + ipv4_conf: + rp_filter: 1 + proxy_arp: 0 + forwarding: 1 + ipv6_conf: + autoconf: 1 + disable_ipv6: 1 + use_tempaddr: 1 + forwarding: 1 + # basically anything in /proc/sys/net/ipv6/conf/.../ + mac_address: bb:bb:bb:bb:bb:aa + params: + bridge_ageing: 250 + bridge_bridgeprio: 22 + bridge_fd: 1 + bridge_gcint: 2 + bridge_hello: 1 + bridge_maxage: 10 + bridge_maxwait: 0 + bridge_pathcost: + - eth3 50 + - eth4 75 + bridge_portprio: + - eth3 28 + - eth4 14 + bridge_stp: 'off' + bridge_waitport: + - 1 eth3 + - 2 eth4 + subnets: + - type: static + address: 192.168.14.2/24 + - type: static + address: 2001:1::1/64 # default to /64 + routes: + - gateway: 2001:4800:78ff:1b::1 + netmask: '::' + network: '::' + # A global nameserver. + - type: nameserver + address: 8.8.8.8 + search: barley.maas + # global nameservers and search in list form + - type: nameserver + address: + - 4.4.4.4 + - 8.8.4.4 + search: + - wark.maas + - foobar.maas + # A global route. + - type: route + destination: 10.0.0.0/8 + gateway: 11.0.0.1 + metric: 3 + """ + ).lstrip(), + }, + "large_v2": { + "expected_eni": """\ +auto lo +iface lo inet loopback + dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 + dns-search barley.maas wark.maas foobar.maas + +iface eth0 inet manual + +auto eth1 +iface eth1 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto eth2 +iface eth2 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +iface eth3 inet manual + +iface eth4 inet manual + +# control-manual eth5 +iface eth5 inet dhcp + +auto ib0 +iface ib0 inet static + address 192.168.200.7/24 + mtu 9000 + hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + +auto bond0 +iface bond0 inet6 dhcp + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:ee:ff + +auto br0 +iface br0 inet static + address 192.168.14.2/24 + bridge_ageing 250 + bridge_bridgeprio 22 + bridge_fd 1 + bridge_gcint 2 + bridge_hello 1 + bridge_maxage 10 + bridge_pathcost eth3 50 + bridge_pathcost eth4 75 + bridge_portprio eth3 28 + bridge_portprio eth4 14 + bridge_ports eth3 eth4 + bridge_stp off + bridge_waitport 1 eth3 + bridge_waitport 2 eth4 + hwaddress bb:bb:bb:bb:bb:aa + +# control-alias br0 +iface br0 inet6 static + address 2001:1::1/64 + post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true + pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true + +auto bond0.200 +iface bond0.200 inet dhcp + vlan-raw-device bond0 + vlan_id 200 + +auto eth0.101 +iface eth0.101 inet static + address 192.168.0.2/24 + dns-nameservers 192.168.0.10 10.23.23.134 + dns-search barley.maas sacchromyces.maas brettanomyces.maas + gateway 192.168.0.1 + mtu 1500 + hwaddress aa:bb:cc:dd:ee:11 + vlan-raw-device eth0 + vlan_id 101 + +# control-alias eth0.101 +iface eth0.101 inet static + address 192.168.2.10/24 + +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +""", + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE_0=eth1 + BONDING_SLAVE_1=eth2 + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + LLADDR=aa:bb:cc:dd:ee:ff + STARTMODE=auto""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + ETHERDEVICE=bond0 + STARTMODE=auto + VLAN_ID=200""" + ), + "ifcfg-br0": textwrap.dedent( + """\ + BRIDGE_AGEINGTIME=250 + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + LLADDRESS=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + BRIDGE_PRIORITY=22 + BRIDGE_PORTS='eth3 eth4' + STARTMODE=auto + BRIDGE_STP=off""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=c0:d6:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ETHERDEVICE=eth0 + STARTMODE=auto + VLAN_ID=101""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:d6:9f:2c:e8:80 + STARTMODE=hotplug""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=c0:bb:9f:2c:e8:80 + STARTMODE=hotplug""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=66:bb:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=98:bb:9f:2c:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ + BOOTPROTO=dhcp4 + LLADDR=98:bb:9f:2c:e8:8a + STARTMODE=manual""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + STARTMODE=auto + TYPE=InfiniBand""" + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE0=eth1 + BONDING_SLAVE1=eth2 + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes + TYPE=Bond + USERCTL=no""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no + ONBOOT=yes + PHYSDEV=bond0 + USERCTL=no + VLAN=yes""" + ), + "ifcfg-br0": textwrap.dedent( + """\ + AGEING=250 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=br0 + IPADDR=192.168.14.2 + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + MACADDR=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + ONBOOT=yes + PRIO=22 + STP=no + TYPE=Bridge + USERCTL=no""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=c0:d6:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0.101 + DNS1=192.168.0.10 + DNS2=10.23.23.134 + DOMAIN="barley.maas sacchromyces.maas brettanomyces.maas" + GATEWAY=192.168.0.1 + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + PHYSDEV=eth0 + USERCTL=no + VLAN=yes""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=aa:d6:9f:2c:e8:80 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth2 + HWADDR=c0:bb:9f:2c:e8:80 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth3 + HWADDR=66:bb:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth4 + HWADDR=98:bb:9f:2c:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no + HWADDR=98:bb:9f:2c:e8:8a + ONBOOT=no + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=ib0 + HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + ONBOOT=yes + TYPE=InfiniBand + USERCTL=no""" + ), + }, + "expected_network_manager": { + "cloud-init-eth3.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth3 + uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=66:BB:9F:2C:E8:80 + + """ + ), + "cloud-init-eth5.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth5 + uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=98:BB:9F:2C:E8:8A + + [ipv4] + method=auto + may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-ib0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ib0 + uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b + autoconnect-priority=120 + type=infiniband + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [infiniband] + transport-mode=datagram + mtu=9000 + mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1 + + [ipv4] + method=manual + may-fail=false + address1=192.168.200.7/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-bond0.200.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0.200 + uuid=88984a9c-ff22-5233-9267-86315e0acaa7 + autoconnect-priority=120 + type=vlan + interface-name=bond0.200 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=200 + parent=54317911-f840-516b-a10d-82cb4c1f075c + + [ipv4] + method=auto + may-fail=false + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:D6:9F:2C:E8:80 + + """ + ), + "cloud-init-eth4.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth4 + uuid=e27e4959-fb50-5580-b9a4-2073554627b9 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=98:BB:9F:2C:E8:80 + + """ + ), + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:D6:9F:2C:E8:80 + + """ + ), + "cloud-init-br0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init br0 + uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + autoconnect-priority=120 + type=bridge + interface-name=br0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bridge] + stp=false + priority=22 + mac-address=BB:BB:BB:BB:BB:AA + + [ipv4] + method=manual + may-fail=false + address1=192.168.14.2/24 + dns=8.8.8.8;4.4.4.4;8.8.4.4; + dns-search=barley.maas;wark.maas;foobar.maas; + + [ipv6] + route1=::/0,2001:4800:78ff:1b::1 + method=manual + may-fail=false + address1=2001:1::1/64 + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth0.101.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0.101 + uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf + autoconnect-priority=120 + type=vlan + interface-name=eth0.101 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=101 + parent=1dd9a779-d327-56e1-8454-c65e2556c12c + + [ipv4] + method=manual + may-fail=false + address1=192.168.0.2/24 + route1=0.0.0.0/0,192.168.0.1 + address2=192.168.2.10/24 + dns=192.168.0.10;10.23.23.134; + dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; + + """ + ), + "cloud-init-bond0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=active-backup + miimon=100 + xmit_hash_policy=layer3+4 + + [ipv6] + method=auto + may-fail=false + dns-search=barley.maas;wark.maas;foobar.maas; + + """ + ), + "cloud-init-eth2.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth2 + uuid=5559a242-3421-5fdd-896e-9cb8313d5804 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=C0:BB:9F:2C:E8:80 + + """ + ), + }, + "yaml": textwrap.dedent( + """ + version: 2 + ethernets: + eth0: + match: + macaddress: c0:d6:9f:2c:e8:80 + set-name: eth0 + eth1: + match: + macaddress: aa:d6:9f:2c:e8:80 + set-name: eth1 + eth2: + match: + macaddress: c0:bb:9f:2c:e8:80 + set-name: eth2 + eth3: + match: + macaddress: 66:bb:9f:2c:e8:80 + set-name: eth3 + eth4: + match: + macaddress: 98:bb:9f:2c:e8:80 + set-name: eth4 + eth5: + dhcp4: true + match: + macaddress: 98:bb:9f:2c:e8:8a + set-name: eth5 + bonds: + bond0: + dhcp6: true + interfaces: + - eth1 + - eth2 + macaddress: aa:bb:cc:dd:ee:ff + parameters: + mii-monitor-interval: 100 + mode: active-backup + transmit-hash-policy: layer3+4 + bridges: + br0: + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + interfaces: + - eth3 + - eth4 + macaddress: bb:bb:bb:bb:bb:aa + nameservers: + addresses: + - 8.8.8.8 + - 4.4.4.4 + - 8.8.4.4 + search: + - barley.maas + - wark.maas + - foobar.maas + parameters: + ageing-time: 250 + forward-delay: 1 + hello-time: 1 + max-age: 10 + path-cost: + eth3: 50 + eth4: 75 + port-priority: + eth3: 28 + eth4: 14 + priority: 22 + stp: false + routes: + - to: ::/0 + via: 2001:4800:78ff:1b::1 + vlans: + bond0.200: + dhcp4: true + id: 200 + link: bond0 + eth0.101: + addresses: + - 192.168.0.2/24 + - 192.168.2.10/24 + id: 101 + link: eth0 + macaddress: aa:bb:cc:dd:ee:11 + mtu: 1500 + nameservers: + addresses: + - 192.168.0.10 + - 10.23.23.134 + search: + - barley.maas + - sacchromyces.maas + - brettanomyces.maas + routes: + - to: 0.0.0.0/0 + via: 192.168.0.1 + """ + ), + }, + "bond_v1": { + "yaml": textwrap.dedent( + """ + version: 1 + config: + - type: physical + name: bond0s0 + mac_address: aa:bb:cc:dd:e8:00 + - type: physical + name: bond0s1 + mac_address: aa:bb:cc:dd:e8:01 + - type: bond + name: bond0 + mac_address: aa:bb:cc:dd:e8:ff + mtu: 9000 + bond_interfaces: + - bond0s0 + - bond0s1 + params: + bond-mode: active-backup + bond_miimon: 100 + bond-xmit-hash-policy: "layer3+4" + bond-num-grat-arp: 5 + bond-downdelay: 10 + bond-updelay: 20 + bond-fail-over-mac: active + bond-primary: bond0s0 + bond-primary-reselect: always + subnets: + - type: static + address: 192.168.0.2/24 + gateway: 192.168.0.1 + routes: + - gateway: 192.168.0.3 + netmask: 255.255.255.0 + network: 10.1.3.0 + - type: static + address: 192.168.1.2/24 + - type: static + address: 2001:1::1/92 + routes: + - gateway: 2001:67c:1562::1 + network: "2001:67c::" + netmask: "ffff:ffff::" + - gateway: 3001:67c:15::1 + network: "3001:67c::" + netmask: "ffff:ffff::" + metric: 10000 + """ + ), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + bond0s0: + match: + macaddress: aa:bb:cc:dd:e8:00 + set-name: bond0s0 + bond0s1: + match: + macaddress: aa:bb:cc:dd:e8:01 + set-name: bond0s1 + bonds: + bond0: + addresses: + - 192.168.0.2/24 + - 192.168.1.2/24 + - 2001:1::1/92 + interfaces: + - bond0s0 + - bond0s1 + macaddress: aa:bb:cc:dd:e8:ff + mtu: 9000 + parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitous-arp: 5 + mii-monitor-interval: 100 + mode: active-backup + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 + routes: + - to: 0.0.0.0/0 + via: 192.168.0.1 + - to: 10.1.3.0/24 + via: 192.168.0.3 + - to: 2001:67c::/32 + via: 2001:67c:1562::1 + - metric: 10000 + to: 3001:67c::/32 + via: 3001:67c:15::1 + """ + ), + "expected_eni": textwrap.dedent( + """\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0s1 +iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-slaves none + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true + pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up route add -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true + pre-down route del -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true + post-up route add -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ +|| true + pre-down route del -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ +|| true + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE_0=bond0s0 + BONDING_SLAVE_1=bond0s1 + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=hotplug + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:bb:cc:dd:e8:01 + STARTMODE=hotplug + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE0=bond0s0 + BONDING_SLAVE1=bond0s1 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=bond0 + GATEWAY=192.168.0.1 + MACADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::1/92 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + TYPE=Bond + USERCTL=no + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """ + ), + "route6-bond0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + 2001:67c::/32 via 2001:67c:1562::1 dev bond0 + 3001:67c::/32 via 3001:67c:15::1 metric 10000 dev bond0 + """ + ), + "route-bond0": textwrap.dedent( + """\ + ADDRESS0=10.1.3.0 + GATEWAY0=192.168.0.3 + NETMASK0=255.255.255.0 + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-bond0s0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0s0 + uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:00 + + """ + ), + "cloud-init-bond0s1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0s1 + uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:01 + + """ + ), + "cloud-init-bond0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=active-backup + miimon=100 + xmit_hash_policy=layer3+4 + num_grat_arp=5 + downdelay=10 + updelay=20 + fail_over_mac=active + primary_reselect=always + primary=bond0s0 + + [ipv4] + method=manual + may-fail=false + address1=192.168.0.2/24 + gateway=192.168.0.1 + route1=10.1.3.0/24,192.168.0.3 + address2=192.168.1.2/24 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::1/92 + route1=2001:67c::/32,2001:67c:1562::1 + route2=3001:67c::/32,3001:67c:15::1 + + """ + ), + }, + }, + "bond_v2": { + "yaml": textwrap.dedent( + """ + version: 2 + ethernets: + bond0s0: + match: + driver: "virtio_net" + macaddress: aa:bb:cc:dd:e8:00 + set-name: bond0s0 + bond0s1: + set-name: bond0s1 + match: + driver: "e1000" + macaddress: aa:bb:cc:dd:e8:01 + bonds: + bond0: + addresses: + - 192.168.0.2/24 + - 192.168.1.2/24 + - 2001:1::1/92 + interfaces: + - bond0s0 + - bond0s1 + macaddress: aa:bb:cc:dd:e8:ff + mtu: 9000 + parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitous-arp: 5 + mii-monitor-interval: 100 + mode: active-backup + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 + routes: + - to: 0.0.0.0/0 + via: 192.168.0.1 + - to: 10.1.3.0/24 + via: 192.168.0.3 + - to: 2001:67c::/32 + via: 2001:67c:1562::1 + - metric: 10000 + to: 3001:67c::/32 + via: 3001:67c:15::1 + """ + ), + "expected_netplan": textwrap.dedent( + """ + network: + version: 2 + ethernets: + bond0s0: + match: + driver: virtio_net + macaddress: aa:bb:cc:dd:e8:00 + set-name: bond0s0 + bond0s1: + match: + driver: e1000 + macaddress: aa:bb:cc:dd:e8:01 + set-name: bond0s1 + bonds: + bond0: + addresses: + - 192.168.0.2/24 + - 192.168.1.2/24 + - 2001:1::1/92 + interfaces: + - bond0s0 + - bond0s1 + macaddress: aa:bb:cc:dd:e8:ff + mtu: 9000 + parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitous-arp: 5 + mii-monitor-interval: 100 + mode: active-backup + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 + routes: + - to: 0.0.0.0/0 + via: 192.168.0.1 + - to: 10.1.3.0/24 + via: 192.168.0.3 + - to: 2001:67c::/32 + via: 2001:67c:1562::1 + - metric: 10000 + to: 3001:67c::/32 + via: 3001:67c:15::1 + """ + ), + "expected_eni": textwrap.dedent( + """\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + +auto bond0s1 +iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-slaves none + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true + pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up route add -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true + pre-down route del -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true + post-up route add -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ +|| true + pre-down route del -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ +|| true + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE_0=bond0s0 + BONDING_SLAVE_1=bond0s1 + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=hotplug + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ + BOOTPROTO=none + LLADDR=aa:bb:cc:dd:e8:01 + STARTMODE=hotplug + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE0=bond0s0 + BONDING_SLAVE1=bond0s1 + BOOTPROTO=none + DEFROUTE=yes + DEVICE=bond0 + GATEWAY=192.168.0.1 + MACADDR=aa:bb:cc:dd:e8:ff + IPADDR=192.168.0.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::1/92 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + TYPE=Bond + USERCTL=no + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """ + ), + "route6-bond0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + 2001:67c::/32 via 2001:67c:1562::1 dev bond0 + 3001:67c::/32 via 3001:67c:15::1 metric 10000 dev bond0 + """ + ), + "route-bond0": textwrap.dedent( + """\ + ADDRESS0=10.1.3.0 + GATEWAY0=192.168.0.3 + NETMASK0=255.255.255.0 + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-bond0s0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0s0 + uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:00 + + """ + ), + "cloud-init-bond0s1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0s1 + uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:01 + + """ + ), + "cloud-init-bond0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=active-backup + miimon=100 + xmit_hash_policy=layer3+4 + num_grat_arp=5 + downdelay=10 + updelay=20 + fail_over_mac=active + primary_reselect=always + primary=bond0s0 + + [ipv4] + method=manual + may-fail=false + address1=192.168.0.2/24 + route1=0.0.0.0/0,192.168.0.1 + route2=10.1.3.0/24,192.168.0.3 + address2=192.168.1.2/24 + + [ipv6] + route1=2001:67c::/32,2001:67c:1562::1 + route2=3001:67c::/32,3001:67c:15::1 + method=manual + may-fail=false + address1=2001:1::1/92 + + """ + ), + }, + }, + "vlan_v1": { + "yaml": textwrap.dedent( + """ + version: 1 + config: + - type: physical + name: en0 + mac_address: aa:bb:cc:dd:e8:00 + - type: vlan + mtu: 2222 + name: en0.99 + vlan_link: en0 + vlan_id: 99 + subnets: + - type: static + address: '192.168.2.2/24' + - type: static + address: '192.168.1.2/24' + gateway: 192.168.1.1 + - type: static + address: 2001:1::bbbb/96 + routes: + - gateway: 2001:1::1 + netmask: '::' + network: '::' + """ + ), + "expected_sysconfig_opensuse": { + # TODO RJS: unknown proper BOOTPROTO setting ask Marius + "ifcfg-en0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=auto""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::bbbb/96 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + ETHERDEVICE=en0 + VLAN_ID=99 + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-en0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=en0 + HWADDR=aa:bb:cc:dd:e8:00 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=en0.99 + GATEWAY=192.168.1.1 + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::bbbb/96 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + IPV6_DEFAULTGW=2001:1::1 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + PHYSDEV=en0 + USERCTL=no + VLAN=yes""" + ), + }, + "expected_network_manager": { + "cloud-init-en0.99.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init en0.99 + uuid=f594e2ed-f107-51df-b225-1dc530a5356b + autoconnect-priority=120 + type=vlan + interface-name=en0.99 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=99 + parent=e0ca478b-8d84-52ab-8fae-628482c629b5 + + [ipv4] + method=manual + may-fail=false + address1=192.168.2.2/24 + address2=192.168.1.2/24 + gateway=192.168.1.1 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::bbbb/96 + route1=::/0,2001:1::1 + + """ + ), + "cloud-init-en0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init en0 + uuid=e0ca478b-8d84-52ab-8fae-628482c629b5 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:00 + + """ + ), + }, + }, + "vlan_v2": { + "yaml": textwrap.dedent( + """ + version: 2 + ethernets: + en0: + match: + macaddress: aa:bb:cc:dd:e8:00 + set-name: en0 + vlans: + en0.99: + addresses: + - 192.168.2.2/24 + - 192.168.1.2/24 + - 2001:1::bbbb/96 + id: 99 + link: en0 + mtu: 2222 + routes: + - to: 0.0.0.0/0 + via: 192.168.1.1 + - to: ::/0 + via: 2001:1::1 + + """ + ), + "expected_sysconfig_opensuse": { + # TODO RJS: unknown proper BOOTPROTO setting ask Marius + "ifcfg-en0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=auto""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::bbbb/96 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + ETHERDEVICE=en0 + VLAN_ID=99 + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-en0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=en0 + HWADDR=aa:bb:cc:dd:e8:00 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=en0.99 + GATEWAY=192.168.1.1 + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPV6ADDR=2001:1::bbbb/96 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + IPV6_DEFAULTGW=2001:1::1 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ONBOOT=yes + PHYSDEV=en0 + USERCTL=no + VLAN=yes""" + ), + }, + "expected_network_manager": { + "cloud-init-en0.99.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init en0.99 + uuid=f594e2ed-f107-51df-b225-1dc530a5356b + autoconnect-priority=120 + type=vlan + interface-name=en0.99 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=99 + parent=e0ca478b-8d84-52ab-8fae-628482c629b5 + + [ipv4] + method=manual + may-fail=false + address1=192.168.2.2/24 + route1=0.0.0.0/0,192.168.1.1 + address2=192.168.1.2/24 + + [ipv6] + route1=::/0,2001:1::1 + method=manual + may-fail=false + address1=2001:1::bbbb/96 + + """ + ), + "cloud-init-en0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init en0 + uuid=e0ca478b-8d84-52ab-8fae-628482c629b5 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=AA:BB:CC:DD:E8:00 + + """ + ), + }, + }, + "bridge": { + "yaml_v1": textwrap.dedent( + """ + version: 1 + config: + - type: physical + name: eth0 + mac_address: '52:54:00:12:34:00' + subnets: + - type: static + address: 2001:1::100/96 + - type: physical + name: eth1 + mac_address: '52:54:00:12:34:01' + subnets: + - type: static + address: 2001:1::101/96 + - type: bridge + name: br0 + bridge_interfaces: + - eth0 + - eth1 + params: + bridge_stp: 0 + bridge_bridgeprio: 22 + subnets: + - type: static + address: 192.168.2.2/24""" + ), + "yaml_v2": textwrap.dedent( + """ + version: 2 + ethernets: + eth0: + addresses: + - 2001:1::100/96 + match: + macaddress: '52:54:00:12:34:00' + set-name: eth0 + eth1: + addresses: + - 2001:1::101/96 + match: + macaddress: '52:54:00:12:34:01' + set-name: eth1 + bridges: + br0: + addresses: + - 192.168.2.2/24 + interfaces: + - eth0 + - eth1 + parameters: + priority: 22 + stp: false + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-br0": textwrap.dedent( + """\ + BOOTPROTO=static + IPADDR=192.168.2.2 + NETMASK=255.255.255.0 + STARTMODE=auto + BRIDGE_STP=off + BRIDGE_PRIORITY=22 + BRIDGE_PORTS='eth0 eth1' + """ + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=52:54:00:12:34:00 + IPADDR6=2001:1::100/96 + STARTMODE=auto + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=52:54:00:12:34:01 + IPADDR6=2001:1::101/96 + STARTMODE=auto + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-br0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=br0 + IPADDR=192.168.2.2 + NETMASK=255.255.255.0 + ONBOOT=yes + PRIO=22 + STP=no + TYPE=Bridge + USERCTL=no + """ + ), + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth0 + HWADDR=52:54:00:12:34:00 + IPV6ADDR=2001:1::100/96 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + BRIDGE=br0 + DEVICE=eth1 + HWADDR=52:54:00:12:34:01 + IPV6ADDR=2001:1::101/96 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-br0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init br0 + uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + autoconnect-priority=120 + type=bridge + interface-name=br0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bridge] + stp=false + priority=22 + + [ipv4] + method=manual + may-fail=false + address1=192.168.2.2/24 + + """ + ), + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=52:54:00:12:34:00 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::100/96 + + """ + ), + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + slave-type=bridge + master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=52:54:00:12:34:01 + + [ipv6] + method=manual + may-fail=false + address1=2001:1::101/96 + + """ + ), + }, + }, + "manual": { + "yaml": textwrap.dedent( + """ + version: 1 + config: + - type: physical + name: eth0 + mac_address: '52:54:00:12:34:00' + subnets: + - type: static + address: 192.168.1.2/24 + control: manual + - type: physical + name: eth1 + mtu: 1480 + mac_address: 52:54:00:12:34:aa + subnets: + - type: manual + - type: physical + name: eth2 + mac_address: 52:54:00:12:34:ff + subnets: + - type: manual + control: manual + """ + ), + "expected_eni": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + # control-manual eth0 + iface eth0 inet static + address 192.168.1.2/24 + + auto eth1 + iface eth1 inet manual + mtu 1480 + + # control-manual eth2 + iface eth2 inet manual + """ + ), + "expected_netplan": textwrap.dedent( + """\ + + network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.2/24 + match: + macaddress: '52:54:00:12:34:00' + set-name: eth0 + eth1: + match: + macaddress: 52:54:00:12:34:aa + mtu: 1480 + set-name: eth1 + eth2: + match: + macaddress: 52:54:00:12:34:ff + set-name: eth2 + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:00 + IPADDR=192.168.1.2 + NETMASK=255.255.255.0 + STARTMODE=manual + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:aa + MTU=1480 + STARTMODE=auto + """ + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:ff + STARTMODE=manual + """ + ), + }, + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=52:54:00:12:34:00 + IPADDR=192.168.1.2 + NETMASK=255.255.255.0 + ONBOOT=no + TYPE=Ethernet + USERCTL=no + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth1 + HWADDR=52:54:00:12:34:aa + MTU=1480 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + "ifcfg-eth2": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth2 + HWADDR=52:54:00:12:34:ff + ONBOOT=no + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=52:54:00:12:34:00 + + [ipv4] + method=manual + may-fail=false + address1=192.168.1.2/24 + + """ + ), + "cloud-init-eth1.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth1 + uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=1480 + mac-address=52:54:00:12:34:AA + + [ipv4] + method=auto + may-fail=true + + """ + ), + "cloud-init-eth2.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth2 + uuid=5559a242-3421-5fdd-896e-9cb8313d5804 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=52:54:00:12:34:FF + + [ipv4] + method=auto + may-fail=true + + """ + ), + }, + }, + "v1-dns": { + "expected_networkd": textwrap.dedent( + """\ + [Address] + Address=192.168.1.20/16 + + [Match] + MACAddress=11:22:33:44:55:66 + Name=interface0 + + [Network] + DHCP=no + DNS=1.1.1.1 3.3.3.3 + Domains=aaaa cccc + + [Route] + Gateway=192.168.1.1 + """ + ), + "expected_eni": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + auto lo + iface lo inet loopback + dns-nameservers 2.2.2.2 + dns-search bbbb + + iface lo inet6 loopback + dns-nameservers FEDC::1 + dns-search bbbb + + auto interface0 + iface interface0 inet static + address 192.168.1.20/16 + dns-nameservers 1.1.1.1 3.3.3.3 + dns-search aaaa cccc + gateway 192.168.1.1 + """ # noqa: E501 + ), + "expected_netplan": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + network: + version: 2 + ethernets: + interface0: + addresses: + - 192.168.1.20/16 + match: + macaddress: 11:22:33:44:55:66 + nameservers: + addresses: + - 1.1.1.1 + - 3.3.3.3 + search: + - aaaa + - cccc + routes: + - to: default + via: 192.168.1.1 + set-name: interface0 + """ # noqa: E501 + ), + "expected_sysconfig_opensuse": { + "ifcfg-interface0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=static + IPADDR=192.168.1.20 + LLADDR=11:22:33:44:55:66 + NETMASK=255.255.0.0 + STARTMODE=auto + """ + ) + }, + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=none + DEFROUTE=yes + DEVICE=interface0 + DNS1=1.1.1.1 + DNS2=3.3.3.3 + DOMAIN=aaaa cccc + GATEWAY=192.168.1.1 + HWADDR=11:22:33:44:55:66 + IPADDR=192.168.1.20 + NETMASK=255.255.0.0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ), + }, + "expected_network_manager": { + "cloud-init-interface0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init interface0 + uuid=8b6862ed-dbd6-5830-93f7-a91451c13828 + autoconnect-priority=120 + type=ethernet + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mac-address=11:22:33:44:55:66 + + [ipv4] + method=manual + may-fail=false + address1=192.168.1.20/16 + gateway=192.168.1.1 + dns=3.3.3.3;1.1.1.1; + dns-search=cccc;aaaa; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 1 + config: + - type: physical + name: interface0 + mac_address: "11:22:33:44:55:66" + subnets: + - type: static + address: 192.168.1.20/16 + gateway: 192.168.1.1 + dns_nameservers: + - 3.3.3.3 + dns_search: + - cccc + - type: nameserver + interface: interface0 + address: + - 1.1.1.1 + search: + - aaaa + - type: nameserver + address: + - 2.2.2.2 + - FEDC::1 + search: + - bbbb + """ + ), + }, + "v2-dev-name-via-mac-lookup": { + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=cf:d6:af:48:e8:80 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no""" + ), + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + nic0: + match: + macaddress: 'cf:d6:af:48:e8:80' + """ + ), + }, + "v2-mixed-routes": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=500 + + [ipv4] + method=auto + may-fail=true + route1=169.254.42.42/32,62.210.0.1 + route1_options=mtu=400 + route2=169.254.42.43/32,62.210.0.2 + route2_options=mtu=200 + address1=192.168.1.20/16 + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + route1=::/0,fe80::dc00:ff:fe20:186 + route1_options=mtu=300 + route2=fe80::dc00:ff:fe20:188/64,fe80::dc00:ff:fe20:187 + route2_options=mtu=100 + method=auto + may-fail=true + address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp6: true + mtu: 500 + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + routes: + - to: 169.254.42.42/32 + via: 62.210.0.1 + mtu: 400 + - via: fe80::dc00:ff:fe20:186 + to: ::/0 + mtu: 300 + - to: 169.254.42.43/32 + via: 62.210.0.2 + mtu: 200 + - via: fe80::dc00:ff:fe20:187 + to: fe80::dc00:ff:fe20:188 + mtu: 100 + addresses: + - 192.168.1.20/16 + - 2001:bc8:1210:232:dc00:ff:fe20:185/64 + """ + ), + }, + "v2-dns": { + "expected_networkd": textwrap.dedent( + """\ + [Address] + Address=192.168.1.20/16 + + [Address] + Address=2001:bc8:1210:232:dc00:ff:fe20:185/64 + + [Match] + Name=eth0 + + [Network] + DHCP=no + DNS=8.8.8.8 FEDC::1 + Domains=lab home + """ + ), + "expected_eni": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet static + address 192.168.1.20/16 + dns-nameservers 8.8.8.8 + dns-search lab home + + # control-alias eth0 + iface eth0 inet6 static + address 2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns-nameservers FEDC::1 + dns-search lab home + """ # noqa: E501 + ), + "expected_sysconfig_opensuse": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=static + IPADDR=192.168.1.20 + IPADDR6=2001:bc8:1210:232:dc00:ff:fe20:185/64 + NETMASK=255.255.0.0 + STARTMODE=auto + """ + ) + }, + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ + # Created by cloud-init automatically, do not edit. + # + BOOTPROTO=none + DEVICE=eth0 + DNS1=8.8.8.8 + DNS2=FEDC::1 + DOMAIN="lab home" + IPADDR=192.168.1.20 + IPV6ADDR=2001:bc8:1210:232:dc00:ff:fe20:185/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.0.0 + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """ + ) + }, + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=manual + may-fail=false + address1=192.168.1.20/16 + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + method=manual + may-fail=false + address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + addresses: + - 192.168.1.20/16 + - 2001:bc8:1210:232:dc00:ff:fe20:185/64 + """ + ), + }, + "v2-dns-no-if-ips": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=true + dns=8.8.8.8; + dns-search=lab;home; + + [ipv6] + method=auto + may-fail=true + dns=FEDC::1; + dns-search=lab;home; + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp6: true + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + """ + ), + }, + "v2-dns-no-dhcp": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + nameservers: + search: [lab, home] + addresses: [8.8.8.8, "FEDC::1"] + """ + ), + }, + "v2-route-no-gateway": { + "expected_network_manager": { + "cloud-init-eth0.nmconnection": textwrap.dedent( + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init eth0 + uuid=1dd9a779-d327-56e1-8454-c65e2556c12c + autoconnect-priority=120 + type=ethernet + interface-name=eth0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + + [ipv4] + method=auto + may-fail=false + route1=0.0.0.0/0 + + """ + ) + }, + "yaml": textwrap.dedent( + """\ + version: 2 + ethernets: + eth0: + dhcp4: true + routes: + - to: "0.0.0.0/0" + """ + ), + }, +} diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py index c38ee676d..e7259d41d 100644 --- a/tests/unittests/net/test_dhcp.py +++ b/tests/unittests/net/test_dhcp.py @@ -29,6 +29,7 @@ from tests.unittests.helpers import ( CiTestCase, ResponsesTestCase, + example_netdev, mock, populate_dir, ) @@ -138,6 +139,7 @@ def test_parse_multiple_leases(self): @pytest.mark.usefixtures("dhclient_exists") +@pytest.mark.usefixtures("disable_netdev_info") class TestDHCPRFC3442(CiTestCase): def test_parse_lease_finds_rfc3442_classless_static_routes(self): """IscDhclient().get_newest_lease() returns @@ -222,6 +224,7 @@ def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4): eph.obtain_lease() expected_kwargs = { "interface": "wlp3s0", + "interface_addrs_before_dhcp": example_netdev, "ip": "192.168.2.74", "prefix_or_mask": "255.255.255.0", "broadcast": "192.168.2.255", @@ -251,6 +254,7 @@ def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4): eph.obtain_lease() expected_kwargs = { "interface": "wlp3s0", + "interface_addrs_before_dhcp": example_netdev, "ip": "192.168.2.74", "prefix_or_mask": "255.255.255.0", "broadcast": "192.168.2.255", @@ -288,6 +292,17 @@ def test_parse_static_routes_single_ip_handles_trailing_semicolon(self): IscDhclient.parse_static_routes(rfc3442), ) + def test_unknown_121(self): + for unknown121 in [ + "0:a:0:0:1:20:a8:3f:81:10:a:0:0:1:20:a9:fe:a9:fe:a:0:0:1", + "0:a:0:0:1:20:a8:3f:81:10:a:0:0:1:20:a9:fe:a9:fe:a:0:0:1;", + ]: + assert IscDhclient.parse_static_routes(unknown121) == [ + ("0.0.0.0/0", "10.0.0.1"), + ("168.63.129.16/32", "10.0.0.1"), + ("169.254.169.254/32", "10.0.0.1"), + ] + def test_parse_static_routes_default_route(self): rfc3442 = "0,130,56,240,1" self.assertEqual( @@ -651,7 +666,7 @@ def test_dhcp_discovery_ib( def test_dhcp_output_error_stream( self, m_wait, m_which, m_subp, m_kill, m_remove, tmpdir ): - """ "dhcp_log_func is called with the output and error streams of + """dhcp_log_func is called with the output and error streams of dhclient when the callable is passed.""" dhclient_err = "FAKE DHCLIENT ERROR" dhclient_out = "FAKE DHCLIENT OUT" @@ -799,6 +814,7 @@ def test_multiple_files(self): ) +@pytest.mark.usefixtures("disable_netdev_info") class TestEphemeralDhcpNoNetworkSetup(ResponsesTestCase): @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp): @@ -846,6 +862,7 @@ def test_ephemeral_dhcp_setup_network_if_url_connectivity( NoDHCPLeaseMissingDhclientError, ], ) +@pytest.mark.usefixtures("disable_netdev_info") class TestEphemeralDhcpLeaseErrors: @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") def test_obtain_lease_raises_error(self, m_dhcp, error_class): @@ -1199,6 +1216,70 @@ def test_parse_lease_dump(self): assert "255.255.240.0" == parsed_lease["subnet-mask"] assert "192.168.0.1" == parsed_lease["routers"] + @pytest.mark.parametrize( + "lease, parsed", + ( + pytest.param( + """ + + domain_name='us-east-2.compute.internal' + + domain_name_servers='192.168.0.2' + + """, + { + "domain_name": "us-east-2.compute.internal", + "domain_name_servers": "192.168.0.2", + }, + id="lease_has_empty_lines", + ), + pytest.param( + """ + domain_name='us-east-2.compute.internal' + not-a-kv-pair + domain_name_servers='192.168.0.2' + """, + { + "domain_name": "us-east-2.compute.internal", + "domain_name_servers": "192.168.0.2", + }, + id="lease_has_values_that_arent_key_value_pairs", + ), + pytest.param( + """ + domain_name='us-east=2.compute.internal' + """, + { + "domain_name": "us-east=2.compute.internal", + }, + id="lease_has_kv_pair_including_equals_sign_in_value", + ), + ), + ) + def test_parse_lease_dump_resilience(self, lease, parsed): + with mock.patch("cloudinit.net.dhcp.util.load_binary_file"): + Dhcpcd.parse_dhcpcd_lease(dedent(lease), "eth0") + + def test_parse_lease_dump_fails(self): + def _raise(): + raise ValueError() + + lease = mock.Mock() + lease.strip = _raise + + with pytest.raises(InvalidDHCPLeaseFileError): + with mock.patch("cloudinit.net.dhcp.util.load_binary_file"): + Dhcpcd.parse_dhcpcd_lease(lease, "eth0") + + with pytest.raises(InvalidDHCPLeaseFileError): + with mock.patch("cloudinit.net.dhcp.util.load_binary_file"): + lease = dedent( + """ + fail + """ + ) + Dhcpcd.parse_dhcpcd_lease(lease, "eth0") + @pytest.mark.parametrize( "lease_file, option_245", ( diff --git a/tests/unittests/net/test_dns.py b/tests/unittests/net/test_dns.py index ce398299a..304f6878b 100644 --- a/tests/unittests/net/test_dns.py +++ b/tests/unittests/net/test_dns.py @@ -2,7 +2,8 @@ from unittest import mock -from cloudinit import safeyaml +import yaml + from cloudinit.net import network_state @@ -15,7 +16,7 @@ def test_system_mac_address_does_not_break_dns_parsing( by_mac_state.return_value = {"00:11:22:33:44:55": "foobar"} by_mac_init.return_value = {"00:11:22:33:44:55": "foobar"} state = network_state.parse_net_config_data( - safeyaml.load( + yaml.safe_load( """\ version: 2 ethernets: diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py index 2dd144681..e9e932e02 100644 --- a/tests/unittests/net/test_init.py +++ b/tests/unittests/net/test_init.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import copy import errno @@ -20,6 +21,7 @@ from tests.unittests.helpers import ( CiTestCase, ResponsesTestCase, + example_netdev, random_string, ) from tests.unittests.util import MockDistro @@ -42,10 +44,7 @@ def test_sys_dev_path_without_path(self): class TestReadSysNet: @pytest.fixture(autouse=True) - @pytest.mark.parametrize( - "disable_sysfs_net", [False], indirect=["disable_sysfs_net"] - ) - def setup(self, disable_sysfs_net, tmpdir_factory): + def setup(self, tmpdir_factory): # We mock invididual numbered tmpdirs here because these tests write # to the sysfs directory and stale test artifacts break later tests. mock_sysfs = f"{tmpdir_factory.mktemp('sysfs', numbered=True)}/" @@ -767,6 +766,7 @@ def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self): @mock.patch("cloudinit.net.subp.subp") +@pytest.mark.usefixtures("disable_netdev_info") class TestEphemeralIPV4Network(CiTestCase): with_logs = True @@ -791,7 +791,11 @@ def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp): params = copy.deepcopy(required_params) params[key] = None with self.assertRaises(ValueError) as context_manager: - EphemeralIPv4Network(MockDistro(), **params) + EphemeralIPv4Network( + MockDistro(), + interface_addrs_before_dhcp=example_netdev, + **params, + ) error = context_manager.exception self.assertIn("Cannot init network on", str(error)) self.assertEqual(0, m_subp.call_count) @@ -802,6 +806,7 @@ def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp): "interface": "eth0", "ip": "192.168.2.2", "broadcast": "192.168.2.255", + "interface_addrs_before_dhcp": example_netdev, } invalid_masks = ("invalid", "invalid.", "123.123.123") for error_val in invalid_masks: @@ -833,23 +838,8 @@ def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): ], update_env={"LANG": "C"}, ), - mock.call( - ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"], - ), ] expected_teardown_calls = [ - mock.call( - [ - "ip", - "-family", - "inet", - "link", - "set", - "dev", - "eth0", - "down", - ], - ), mock.call( [ "ip", @@ -868,6 +858,7 @@ def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): "ip": "192.168.2.2", "prefix_or_mask": "255.255.255.0", "broadcast": "192.168.2.255", + "interface_addrs_before_dhcp": example_netdev, } with EphemeralIPv4Network(MockDistro(), **params): self.assertEqual(expected_setup_calls, m_subp.call_args_list) @@ -894,6 +885,7 @@ def side_effect(args, **kwargs): ip="1.1.1.1", prefix_or_mask="255.255.255.0", broadcast="1.1.1.255", + interface_addrs_before_dhcp=example_netdev, static_routes=[ ("2.2.2.2/32", "9.9.9.9"), ("3.3.3.3/32", "8.8.8.8"), @@ -915,18 +907,6 @@ def side_effect(args, **kwargs): "eth0", ], ), - mock.call( - [ - "ip", - "-family", - "inet", - "link", - "set", - "dev", - "eth0", - "down", - ], - ), mock.call( [ "ip", @@ -943,28 +923,6 @@ def side_effect(args, **kwargs): for teardown in expected_teardown_calls: assert teardown in m_subp.call_args_list - @mock.patch("cloudinit.net.readurl") - def test_ephemeral_ipv4_no_network_if_url_connectivity( - self, m_readurl, m_subp - ): - """No network setup is performed if we can successfully connect to - connectivity_url.""" - params = { - "interface": "eth0", - "ip": "192.168.2.2", - "prefix_or_mask": "255.255.255.0", - "broadcast": "192.168.2.255", - "connectivity_url_data": {"url": "http://example.org/index.html"}, - } - - with EphemeralIPv4Network(MockDistro(), **params): - self.assertEqual( - [mock.call(url="http://example.org/index.html", timeout=5)], - m_readurl.call_args_list, - ) - # Ensure that no teardown happens: - m_subp.assert_has_calls([]) - def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp): """EphemeralIPv4Network handles exception when address is setup. @@ -972,37 +930,20 @@ def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp): """ params = { "interface": "eth0", - "ip": "192.168.2.2", + "ip": "10.85.130.116", "prefix_or_mask": "255.255.255.0", "broadcast": "192.168.2.255", + "interface_addrs_before_dhcp": example_netdev, } m_subp.side_effect = ProcessExecutionError( "", "RTNETLINK answers: File exists", 2 ) - expected_calls = [ - mock.call( - [ - "ip", - "-family", - "inet", - "addr", - "add", - "192.168.2.2/24", - "broadcast", - "192.168.2.255", - "dev", - "eth0", - ], - update_env={"LANG": "C"}, - ) - ] + expected_calls = [] with EphemeralIPv4Network(MockDistro(), **params): pass - self.assertEqual(expected_calls, m_subp.call_args_list) - self.assertIn( - "Skip ephemeral network setup, eth0 already has address", - self.logs.getvalue(), - ) + assert expected_calls == m_subp.call_args_list + assert "Skip bringing up network link" in self.logs.getvalue() + assert "Skip adding ip address" in self.logs.getvalue() def test_ephemeral_ipv4_network_with_prefix(self, m_subp): """EphemeralIPv4Network takes a valid prefix to setup the network.""" @@ -1011,6 +952,7 @@ def test_ephemeral_ipv4_network_with_prefix(self, m_subp): "ip": "192.168.2.2", "prefix_or_mask": "24", "broadcast": "192.168.2.255", + "interface_addrs_before_dhcp": example_netdev, } for prefix_val in ["24", 16]: # prefix can be int or string params["prefix_or_mask"] = prefix_val @@ -1063,6 +1005,7 @@ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp): "prefix_or_mask": "255.255.255.0", "broadcast": "192.168.2.255", "router": "192.168.2.1", + "interface_addrs_before_dhcp": example_netdev, } # Empty response from ip route gw check m_subp.return_value = subp.SubpResult("", "") @@ -1082,16 +1025,13 @@ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp): ], update_env={"LANG": "C"}, ), - mock.call( - ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"], - ), mock.call(["ip", "route", "show", "0.0.0.0/0"]), mock.call( [ "ip", "-4", "route", - "add", + "replace", "192.168.2.1", "dev", "eth0", @@ -1104,7 +1044,7 @@ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp): "ip", "-4", "route", - "add", + "replace", "default", "via", "192.168.2.1", @@ -1148,6 +1088,7 @@ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): ("0.0.0.0/0", "192.168.2.1"), ], "router": "192.168.2.1", + "interface_addrs_before_dhcp": example_netdev, } expected_setup_calls = [ mock.call( @@ -1165,9 +1106,6 @@ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): ], update_env={"LANG": "C"}, ), - mock.call( - ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"], - ), mock.call( [ "ip", @@ -1236,18 +1174,6 @@ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): mock.call( ["ip", "-4", "route", "del", "192.168.2.1/32", "dev", "eth0"], ), - mock.call( - [ - "ip", - "-family", - "inet", - "link", - "set", - "dev", - "eth0", - "down", - ], - ), mock.call( [ "ip", @@ -1894,3 +1820,29 @@ class TestIsIpNetwork: ) def test_is_ip_network(self, func, arg, expected_return): assert func(arg) == expected_return + + +class TestIsIpInSubnet: + """Tests for net.is_ip_in_subnet().""" + + @pytest.mark.parametrize( + "func,ip,subnet,expected_return", + ( + (net.is_ip_in_subnet, "192.168.1.1", "2001:67c::1/64", False), + (net.is_ip_in_subnet, "2001:67c::1", "192.168.1.1/24", False), + (net.is_ip_in_subnet, "192.168.1.1", "192.168.1.1/24", True), + (net.is_ip_in_subnet, "192.168.1.1", "192.168.1.1/32", True), + (net.is_ip_in_subnet, "192.168.1.2", "192.168.1.1/24", True), + (net.is_ip_in_subnet, "192.168.1.2", "192.168.1.1/32", False), + (net.is_ip_in_subnet, "192.168.2.2", "192.168.1.1/24", False), + (net.is_ip_in_subnet, "192.168.2.2", "192.168.1.1/32", False), + (net.is_ip_in_subnet, "2001:67c1::1", "2001:67c1::1/64", True), + (net.is_ip_in_subnet, "2001:67c1::1", "2001:67c1::1/128", True), + (net.is_ip_in_subnet, "2001:67c1::2", "2001:67c1::1/64", True), + (net.is_ip_in_subnet, "2001:67c1::2", "2001:67c1::1/128", False), + (net.is_ip_in_subnet, "2002:67c1::1", "2001:67c1::1/8", True), + (net.is_ip_in_subnet, "2002:67c1::1", "2001:67c1::1/16", False), + ), + ) + def test_is_ip_in_subnet(self, func, ip, subnet, expected_return): + assert func(ip, subnet) == expected_return diff --git a/tests/unittests/net/test_net_rendering.py b/tests/unittests/net/test_net_rendering.py index 06feab891..3e1490b27 100644 --- a/tests/unittests/net/test_net_rendering.py +++ b/tests/unittests/net/test_net_rendering.py @@ -29,8 +29,8 @@ from pathlib import Path import pytest +import yaml -from cloudinit import safeyaml from cloudinit.net.netplan import Renderer as NetplanRenderer from cloudinit.net.network_manager import Renderer as NetworkManagerRenderer from cloudinit.net.network_state import NetworkState, parse_net_config_data @@ -55,7 +55,7 @@ def _check_netplan( if network_state.version == 2: renderer = NetplanRenderer(config={"netplan_path": netplan_path}) renderer.render_network_state(network_state) - assert safeyaml.load(netplan_path.read_text()) == expected_config, ( + assert yaml.safe_load(netplan_path.read_text()) == expected_config, ( f"Netplan config generated at {netplan_path} does not match v2 " "config defined for this test." ) @@ -89,7 +89,7 @@ def _check_network_manager(network_state: NetworkState, tmp_path: Path): [("no_matching_mac_v2", Renderer.Netplan | Renderer.NetworkManager)], ) def test_convert(test_name, renderers, tmp_path): - network_config = safeyaml.load( + network_config = yaml.safe_load( Path(ARTIFACT_DIR, f"{test_name}.yaml").read_text() ) network_state = parse_net_config_data(network_config["network"]) diff --git a/tests/unittests/net/test_netplan.py b/tests/unittests/net/test_netplan.py index 28b0891d9..86bb32b10 100644 --- a/tests/unittests/net/test_netplan.py +++ b/tests/unittests/net/test_netplan.py @@ -17,25 +17,37 @@ def renderer(tmp_path): class TestNetplanRenderer: - @pytest.mark.parametrize("write_config", [True, False]) - def test_skip_netplan_generate(self, renderer, write_config, mocker): - """Check `netplan generate` is called if netplan config has changed.""" + @pytest.mark.parametrize( + "orig_config", ["", "{'orig_cfg': true}", "{'new_cfg': true}"] + ) + def test_skip_netplan_generate(self, renderer, orig_config, mocker): + """Check `netplan generate` called when netplan config has changed.""" header = "\n" - content = "foo" + new_config = "{'new_cfg': true}" renderer_mocks = mocker.patch.multiple( renderer, - _render_content=mocker.Mock(return_value=content), + _render_content=mocker.Mock(return_value=new_config), _netplan_generate=mocker.DEFAULT, _net_setup_link=mocker.DEFAULT, ) - if write_config: + if orig_config: util.ensure_dir(os.path.dirname(renderer.netplan_path)) with open(renderer.netplan_path, "w") as f: f.write(header) - f.write(content) - + f.write(orig_config) renderer.render_network_state(mocker.Mock()) - + config_changed = bool(orig_config != new_config) assert renderer_mocks["_netplan_generate"].call_args_list == [ - mock.call(run=True, same_content=write_config) + mock.call(run=True, config_changed=config_changed) ] + + +class TestNetplanAPIWriteYAMLFile: + def test_no_netplan_python_api(self, caplog): + """Skip when no netplan available.""" + with mock.patch("builtins.__import__", side_effect=ImportError): + netplan.netplan_api_write_yaml_file("network: {version: 2}") + assert ( + "No netplan python module. Fallback to write" + f" {netplan.CLOUDINIT_NETPLAN_FILE}" in caplog.text + ) diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py index d2063231d..eaad90dc8 100644 --- a/tests/unittests/net/test_network_state.py +++ b/tests/unittests/net/test_network_state.py @@ -3,8 +3,9 @@ from unittest import mock import pytest +import yaml -from cloudinit import safeyaml, util +from cloudinit import util from cloudinit.net import network_state from cloudinit.net.netplan import Renderer as NetplanRenderer from cloudinit.net.renderers import NAME_TO_RENDERER @@ -214,8 +215,8 @@ def test_v2_warns_deprecated_gateways( In netplan targets we perform a passthrough and the warning is not needed. """ - util.deprecate._log = set() # type: ignore - ncfg = safeyaml.load( + util.deprecate.__dict__["log"] = set() + ncfg = yaml.safe_load( cfg.format( gateway4="gateway4: 10.54.0.1", gateway6="gateway6: 2a00:1730:fff9:100::1", @@ -241,8 +242,8 @@ def test_v2_warns_deprecated_gateways( class TestNetworkStateParseNameservers: def _parse_network_state_from_config(self, config): with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"): - yaml = safeyaml.load(config) - return network_state.parse_net_config_data(yaml["network"]) + config = yaml.safe_load(config) + return network_state.parse_net_config_data(config["network"]) def test_v1_nameservers_valid(self): config = self._parse_network_state_from_config( diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py index bb781b983..15708f51f 100644 --- a/tests/unittests/net/test_networkd.py +++ b/tests/unittests/net/test_networkd.py @@ -1,9 +1,11 @@ # This file is part of cloud-init. See LICENSE file for license information. +from configparser import ConfigParser from string import Template from unittest import mock import pytest +import yaml from cloudinit import safeyaml from cloudinit.net import network_state, networkd @@ -195,6 +197,7 @@ [Route] Gateway=10.0.0.1 +GatewayOnLink=yes [Route] Gateway=2a01:4f8:10a:19d2::2 @@ -231,6 +234,7 @@ [Route] Gateway=192.168.254.254 +GatewayOnLink=yes [Route] Gateway=fec0::ffff @@ -243,12 +247,210 @@ """ +V1_CONFIG_MULTI_SUBNETS_NOT_ONLINK = """ +network: + version: 1 + config: + - type: physical + name: eth0 + mac_address: 'ae:98:25:fa:36:9e' + subnets: + - type: static + address: '10.0.0.2' + netmask: '255.255.255.0' + gateway: '10.0.0.1' + - type: static6 + address: '2a01:4f8:10a:19d2::4/64' + gateway: '2a01:4f8:10a:19d2::2' + - type: nameserver + address: + - '100.100.100.100' + search: + - 'rgrunbla.github.beta.tailscale.net' +""" + +V1_CONFIG_MULTI_SUBNETS_NOT_ONLINK_RENDERED = """\ +[Address] +Address=10.0.0.2/24 + +[Address] +Address=2a01:4f8:10a:19d2::4/64 + +[Match] +MACAddress=ae:98:25:fa:36:9e +Name=eth0 + +[Network] +DHCP=no +DNS=100.100.100.100 +Domains=rgrunbla.github.beta.tailscale.net + +[Route] +Gateway=10.0.0.1 + +[Route] +Gateway=2a01:4f8:10a:19d2::2 + +""" + +V2_CONFIG_MULTI_SUBNETS_NOT_ONLINK = """ +network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.1/24 + - fec0::1/64 + gateway4: 192.168.1.254 + gateway6: "fec0::ffff" + routes: + - to: 169.254.1.1/32 + - to: "fe80::1/128" +""" + +V2_CONFIG_MULTI_SUBNETS_NOT_ONLINK_RENDERED = """\ +[Address] +Address=192.168.1.1/24 + +[Address] +Address=fec0::1/64 + +[Match] +Name=eth0 + +[Network] +DHCP=no + +[Route] +Gateway=192.168.1.254 + +[Route] +Gateway=fec0::ffff + +[Route] +Destination=169.254.1.1/32 + +[Route] +Destination=fe80::1/128 + +""" + +V1_CONFIG_MULTI_SUBNETS_ONLINK = """ +network: + version: 1 + config: + - type: physical + name: eth0 + mac_address: 'ae:98:25:fa:36:9e' + subnets: + - type: static + address: '10.0.0.2' + netmask: '255.255.255.0' + gateway: '192.168.0.1' + - type: static6 + address: '2a01:4f8:10a:19d2::4/64' + gateway: '2000:4f8:10a:19d2::2' + - type: nameserver + address: + - '100.100.100.100' + search: + - 'rgrunbla.github.beta.tailscale.net' +""" + +V1_CONFIG_MULTI_SUBNETS_ONLINK_RENDERED = """\ +[Address] +Address=10.0.0.2/24 + +[Address] +Address=2a01:4f8:10a:19d2::4/64 + +[Match] +MACAddress=ae:98:25:fa:36:9e +Name=eth0 + +[Network] +DHCP=no +DNS=100.100.100.100 +Domains=rgrunbla.github.beta.tailscale.net + +[Route] +Gateway=192.168.0.1 +GatewayOnLink=yes + +[Route] +Gateway=2000:4f8:10a:19d2::2 +GatewayOnLink=yes + +""" + +V2_CONFIG_MULTI_SUBNETS_ONLINK = """ +network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.1/32 + - fec0::1/128 + gateway4: 192.168.254.254 + gateway6: "fec0::ffff" + routes: + - to: 169.254.1.1/32 + - to: "fe80::1/128" +""" + +V2_CONFIG_MULTI_SUBNETS_ONLINK_RENDERED = """\ +[Address] +Address=192.168.1.1/32 + +[Address] +Address=fec0::1/128 + +[Match] +Name=eth0 + +[Network] +DHCP=no + +[Route] +Gateway=192.168.254.254 +GatewayOnLink=yes + +[Route] +Gateway=fec0::ffff +GatewayOnLink=yes + +[Route] +Destination=169.254.1.1/32 + +[Route] +Destination=fe80::1/128 + +""" + +V1_CONFIG_ACCEPT_RA_YAML = """\ +network: + version: 1 + config: + - type: physical + name: eth0 + mac_address: "00:11:22:33:44:55" +""" + +V2_CONFIG_ACCEPT_RA_YAML = """\ +network: + version: 2 + ethernets: + eth0: + match: + macaddress: "00:11:22:33:44:55" +""" + class TestNetworkdRenderState: def _parse_network_state_from_config(self, config): with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"): - yaml = safeyaml.load(config) - return network_state.parse_net_config_data(yaml["network"]) + config = yaml.safe_load(config) + return network_state.parse_net_config_data(config["network"]) def test_networkd_render_with_set_name(self): with mock.patch("cloudinit.net.get_interfaces_by_mac"): @@ -364,3 +566,127 @@ def test_networkd_render_v2_multi_subnets(self): rendered_content = renderer._render_content(ns) assert rendered_content["eth0"] == V2_CONFIG_MULTI_SUBNETS_RENDERED + + def test_networkd_render_v1_multi_subnets_not_onlink(self): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config( + V1_CONFIG_MULTI_SUBNETS_NOT_ONLINK + ) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert ( + rendered_content["eth0"] + == V1_CONFIG_MULTI_SUBNETS_NOT_ONLINK_RENDERED + ) + + def test_networkd_render_v2_multi_subnets_not_onlink(self): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config( + V2_CONFIG_MULTI_SUBNETS_NOT_ONLINK + ) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert ( + rendered_content["eth0"] + == V2_CONFIG_MULTI_SUBNETS_NOT_ONLINK_RENDERED + ) + + def test_networkd_render_v1_multi_subnets_onlink(self): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config( + V1_CONFIG_MULTI_SUBNETS_ONLINK + ) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert ( + rendered_content["eth0"] == V1_CONFIG_MULTI_SUBNETS_ONLINK_RENDERED + ) + + def test_networkd_render_v2_multi_subnets_onlink(self): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config( + V2_CONFIG_MULTI_SUBNETS_ONLINK + ) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert ( + rendered_content["eth0"] == V2_CONFIG_MULTI_SUBNETS_ONLINK_RENDERED + ) + + @pytest.mark.parametrize("version", ["v1", "v2"]) + @pytest.mark.parametrize( + "address", ["4", "6", "10.0.0.10/24", "2001:db8::1/64"] + ) + @pytest.mark.parametrize("accept_ra", [True, False, None]) + def test_networkd_render_accept_ra(self, version, address, accept_ra): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + # network-config v1 inputs + if version == "v1": + config = yaml.safe_load(V1_CONFIG_ACCEPT_RA_YAML) + if address == "4" or address == "6": + config["network"]["config"][0]["subnets"] = [ + {"type": f"dhcp{address}"} + ] + else: + config["network"]["config"][0]["subnets"] = [ + {"type": "static", "address": address} + ] + if accept_ra is not None: + config["network"]["config"][0]["accept-ra"] = accept_ra + # network-config v2 inputs + elif version == "v2": + config = yaml.safe_load(V2_CONFIG_ACCEPT_RA_YAML) + if address == "4" or address == "6": + config["network"]["ethernets"]["eth0"][ + f"dhcp{address}" + ] = True + else: + config["network"]["ethernets"]["eth0"]["addresses"] = [ + address + ] + if isinstance(accept_ra, bool): + config["network"]["ethernets"]["eth0"][ + "accept-ra" + ] = accept_ra + else: + raise ValueError(f"Unknown network-config version: {version}") + config = safeyaml.dumps(config) + + # render + ns = self._parse_network_state_from_config(config) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + # dump the input/output for debugging test failures + print(config) + print(rendered_content["eth0"]) + + # validate the rendered content + c = ConfigParser() + c.read_string(rendered_content["eth0"]) + + if address in ["4", "6"]: + expected_dhcp = f"ipv{address}" + expected_address = None + else: + expected_dhcp = False + expected_address = address + try: + got_dhcp = c.getboolean("Network", "DHCP") + except ValueError: + got_dhcp = c.get("Network", "DHCP", fallback=None) + got_address = c.get("Address", "Address", fallback=None) + got_accept_ra = c.getboolean("Network", "IPv6AcceptRA", fallback=None) + assert ( + got_dhcp == expected_dhcp + ), f"DHCP={got_dhcp}, expected {expected_dhcp}" + assert ( + got_address == expected_address + ), f"Address={got_address}, expected {expected_address}" + assert ( + got_accept_ra == accept_ra + ), f"IPv6AcceptRA={got_accept_ra}, expected {accept_ra}" diff --git a/tests/unittests/reporting/test_reporting_hyperv.py b/tests/unittests/reporting/test_reporting_hyperv.py index e9f2b213f..31b0408d1 100644 --- a/tests/unittests/reporting/test_reporting_hyperv.py +++ b/tests/unittests/reporting/test_reporting_hyperv.py @@ -11,6 +11,11 @@ from cloudinit import util from cloudinit.reporting import events, instantiated_handler_registry from cloudinit.reporting.handlers import HyperVKvpReportingHandler + +# TODO: Importing `errors` here is a hack to avoid a circular import. +# Without it, we have a azure->errors->identity->azure import loop, but +# long term we should restructure these modules to avoid the issue. +from cloudinit.sources.azure import errors # noqa: F401 from cloudinit.sources.helpers import azure from tests.unittests.helpers import CiTestCase diff --git a/tests/unittests/runs/test_merge_run.py b/tests/unittests/runs/test_merge_run.py index 7b1559b96..e7f32d03e 100644 --- a/tests/unittests/runs/test_merge_run.py +++ b/tests/unittests/runs/test_merge_run.py @@ -22,7 +22,21 @@ def test_none_ds(self): cfg = { "datasource_list": ["None"], "cloud_init_modules": ["write_files"], - "system_info": {"paths": {"run_dir": new_root}}, + "system_info": { + "paths": {"run_dir": new_root}, + "package_mirrors": [ + { + "arches": ["i386", "amd64", "blah"], + "failsafe": { + "primary": "http://my.archive.mydomain.com/ubuntu", + "security": ( + "http://my.security.mydomain.com/ubuntu" + ), + }, + "search": {"primary": [], "security": []}, + }, + ], + }, } ud = helpers.readResource("user_data.1.txt") cloud_cfg = safeyaml.dumps(cfg) diff --git a/tests/unittests/runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py index eec2db00b..7cb5a28e7 100644 --- a/tests/unittests/runs/test_simple_run.py +++ b/tests/unittests/runs/test_simple_run.py @@ -2,6 +2,7 @@ import copy import os +from unittest import mock from cloudinit import atomic_helper, safeyaml, stages, util from cloudinit.config.modules import Modules @@ -45,6 +46,15 @@ def setUp(self): self.patchOS(self.new_root) self.patchUtils(self.new_root) + self.m_doc = mock.patch( + "cloudinit.config.schema.get_meta_doc", return_value={} + ) + self.m_doc.start() + + def tearDown(self): + self.m_doc.stop() + super().tearDown() + def test_none_ds_populates_var_lib_cloud(self): """Init and run_section default behavior creates appropriate dirs.""" # Now start verifying whats created diff --git a/tests/unittests/sources/azure/__init__.py b/tests/unittests/sources/azure/__init__.py new file mode 100644 index 000000000..da6365a59 --- /dev/null +++ b/tests/unittests/sources/azure/__init__.py @@ -0,0 +1 @@ +# This file is part of cloud-init. See LICENSE file for license information. diff --git a/tests/unittests/sources/azure/test_imds.py b/tests/unittests/sources/azure/test_imds.py index 45185449d..eb5119b82 100644 --- a/tests/unittests/sources/azure/test_imds.py +++ b/tests/unittests/sources/azure/test_imds.py @@ -10,6 +10,10 @@ import requests import responses +# TODO: Importing `errors` here is a hack to avoid a circular import. +# Without it, we have a azure->errors->identity->azure import loop, but +# long term we should restructure these modules to avoid the issue. +from cloudinit.sources.azure import errors as _errors # noqa: F401 from cloudinit.sources.azure import imds from cloudinit.url_helper import UrlError, readurl @@ -56,8 +60,8 @@ def mock_requests_session_request(): @pytest.fixture(autouse=True) -def mock_time(): - with mock.patch.object(imds, "time", autospec=True) as m: +def mock_time_monotonic(): + with mock.patch.object(imds, "monotonic", autospec=True) as m: m.time_current = 0.0 m.time_increment = 1.0 @@ -129,6 +133,25 @@ def regex_for_http_error(error): return f".*{error!s}.*" +class TestHeaders: + default_url = ( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true" + ) + + def test_headers_cb(self): + headers = imds.headers_cb(self.default_url) + assert list(headers.keys()) == ["Metadata", "x-ms-client-request-id"] + assert headers.get("Metadata") == "true" + uuid = headers.get("x-ms-client-request-id") + match = re.search( + "^[a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-" + "[a-zA-Z0-9]{4}-[a-zA-Z0-9]{12}$", + uuid, + ) + assert match + + class TestFetchMetadataWithApiFallback: default_url = ( "http://169.254.169.254/metadata/instance?" @@ -140,7 +163,6 @@ class TestFetchMetadataWithApiFallback: # Early versions of responses do not appreciate the parameters... base_url = "http://169.254.169.254/metadata/instance" - headers = {"Metadata": "true"} timeout = 30 @pytest.mark.parametrize("retry_deadline", [0.0, 1.0, 60.0]) @@ -168,7 +190,7 @@ def test_basic( mock.call( self.default_url, timeout=self.timeout, - headers=self.headers, + headers_cb=imds.headers_cb, exception_cb=mock.ANY, infinite=True, log_req_resp=True, @@ -213,7 +235,7 @@ def test_basic_fallback( mock.call( self.default_url, timeout=self.timeout, - headers=self.headers, + headers_cb=imds.headers_cb, exception_cb=mock.ANY, infinite=True, log_req_resp=True, @@ -221,7 +243,7 @@ def test_basic_fallback( mock.call( self.fallback_url, timeout=self.timeout, - headers=self.headers, + headers_cb=imds.headers_cb, exception_cb=mock.ANY, infinite=True, log_req_resp=True, @@ -232,7 +254,11 @@ def test_basic_fallback( ( "cloudinit.url_helper", logging.DEBUG, - StringMatch(r"\[0/infinite\] open.*"), + StringMatch( + r"\[0/infinite\] open.*Metadata.*true" + ".*x-ms-client-request-id.*[a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-" + "[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{12}.*" + ), ), ( LOG_PATH, @@ -575,7 +601,7 @@ def test_non_json_repsonse( mock.call( self.default_url, timeout=self.timeout, - headers=self.headers, + headers_cb=imds.headers_cb, exception_cb=mock.ANY, infinite=True, log_req_resp=True, @@ -639,7 +665,6 @@ class TestFetchReprovisionData: "http://169.254.169.254/metadata/" "reprovisiondata?api-version=2019-06-01" ) - headers = {"Metadata": "true"} timeout = 30 # Early versions of responses do not appreciate the parameters... @@ -663,7 +688,7 @@ def test_basic( mock.call( self.url, timeout=self.timeout, - headers=self.headers, + headers_cb=imds.headers_cb, exception_cb=mock.ANY, infinite=True, log_req_resp=False, diff --git a/tests/unittests/sources/helpers/__init__.py b/tests/unittests/sources/helpers/__init__.py new file mode 100644 index 000000000..da6365a59 --- /dev/null +++ b/tests/unittests/sources/helpers/__init__.py @@ -0,0 +1 @@ +# This file is part of cloud-init. See LICENSE file for license information. diff --git a/tests/unittests/sources/helpers/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py index 5eabf1042..b68c8006c 100644 --- a/tests/unittests/sources/helpers/test_netlink.py +++ b/tests/unittests/sources/helpers/test_netlink.py @@ -33,7 +33,7 @@ def int_to_bytes(i): - """convert integer to binary: eg: 1 to \x01""" + r"""convert integer to binary: eg: 1 to \x01""" hex_value = "{0:x}".format(i) hex_value = "0" * (len(hex_value) % 2) + hex_value return codecs.decode(hex_value, "hex_codec") @@ -44,7 +44,7 @@ class TestCreateBoundNetlinkSocket(CiTestCase): def test_socket_error_on_create(self, m_socket): """create_bound_netlink_socket catches socket creation exception""" - """NetlinkCreateSocketError is raised when socket creation errors.""" + # NetlinkCreateSocketError is raised when socket creation errors. m_socket.side_effect = socket.error("Fake socket failure") with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr: create_bound_netlink_socket() diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py index 4d85ec3c6..7ae164140 100644 --- a/tests/unittests/sources/helpers/test_openstack.py +++ b/tests/unittests/sources/helpers/test_openstack.py @@ -112,3 +112,122 @@ def test_subnet_dns(self): assert expected == openstack.convert_net_json( network_json=net_json, known_macs=macs ) + + def test_bond_mac(self): + """Verify the bond mac address is assigned correctly.""" + network_json = { + "links": [ + { + "id": "ens1f0np0", + "name": "ens1f0np0", + "type": "phy", + "ethernet_mac_address": "xx:xx:xx:xx:xx:00", + "mtu": 9000, + }, + { + "id": "ens1f1np1", + "name": "ens1f1np1", + "type": "phy", + "ethernet_mac_address": "xx:xx:xx:xx:xx:01", + "mtu": 9000, + }, + { + "id": "bond0", + "name": "bond0", + "type": "bond", + "bond_links": ["ens1f0np0", "ens1f1np1"], + "mtu": 9000, + "ethernet_mac_address": "xx:xx:xx:xx:xx:00", + "bond_mode": "802.3ad", + "bond_xmit_hash_policy": "layer3+4", + "bond_miimon": 100, + }, + { + "id": "bond0.123", + "name": "bond0.123", + "type": "vlan", + "vlan_link": "bond0", + "vlan_id": 123, + "vlan_mac_address": "xx:xx:xx:xx:xx:00", + }, + ], + "networks": [ + { + "id": "publicnet-ipv4", + "type": "ipv4", + "link": "bond0.123", + "ip_address": "x.x.x.x", + "netmask": "255.255.255.0", + "routes": [ + { + "network": "0.0.0.0", + "netmask": "0.0.0.0", + "gateway": "x.x.x.1", + } + ], + "network_id": "00000000-0000-0000-0000-000000000000", + } + ], + "services": [{"type": "dns", "address": "1.1.1.1"}], + } + expected = { + "config": [ + { + "mac_address": "xx:xx:xx:xx:xx:00", + "mtu": 9000, + "name": "ens1f0np0", + "subnets": [], + "type": "physical", + }, + { + "mac_address": "xx:xx:xx:xx:xx:01", + "mtu": 9000, + "name": "ens1f1np1", + "subnets": [], + "type": "physical", + }, + { + "bond_interfaces": ["ens1f0np0", "ens1f1np1"], + "mtu": 9000, + "name": "bond0", + "mac_address": "xx:xx:xx:xx:xx:00", + "params": { + "bond-miimon": 100, + "bond-mode": "802.3ad", + "bond-xmit_hash_policy": "layer3+4", + }, + "subnets": [], + "type": "bond", + }, + { + "name": "bond0.123", + "subnets": [ + { + "address": "x.x.x.x", + "ipv4": True, + "netmask": "255.255.255.0", + "routes": [ + { + "gateway": "x.x.x.1", + "netmask": "0.0.0.0", + "network": "0.0.0.0", + } + ], + "type": "static", + } + ], + "type": "vlan", + "vlan_id": 123, + "vlan_link": "bond0", + }, + {"address": "1.1.1.1", "type": "nameserver"}, + ], + "version": 1, + } + macs = { + "xx:xx:xx:xx:xx:00": "ens1f0np0", + "xx:xx:xx:xx:xx:01": "ens1f1np1", + } + assert expected == openstack.convert_net_json( + network_json=network_json, known_macs=macs + ) diff --git a/tests/unittests/sources/test___init__.py b/tests/unittests/sources/test___init__.py index f33d00103..2c214aeed 100644 --- a/tests/unittests/sources/test___init__.py +++ b/tests/unittests/sources/test___init__.py @@ -44,8 +44,8 @@ ("foo bonding.max_bonds=0 ds=nocloud-net bar", "nocloud-net"), ), ) -def test_ds_detect_kernel_commandline(m_cmdline, expected_ds): - """check commandline match""" +def test_ds_detect_kernel_command_line(m_cmdline, expected_ds): + """check command line match""" with mock.patch( "cloudinit.util.get_cmdline", return_value=m_cmdline, diff --git a/tests/unittests/sources/test_akamai.py b/tests/unittests/sources/test_akamai.py index 8b593dee8..2480269f6 100644 --- a/tests/unittests/sources/test_akamai.py +++ b/tests/unittests/sources/test_akamai.py @@ -1,5 +1,5 @@ from contextlib import suppress -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Dict, List, Optional, Union import pytest @@ -17,7 +17,7 @@ class TestDataSourceAkamai: """ def _get_datasource( - self, ds_cfg: Dict[str, Any] = None, local: bool = False + self, ds_cfg: Optional[Dict[str, Any]] = None, local: bool = False ) -> Union[DataSourceAkamai, DataSourceAkamaiLocal]: """ Creates a test DataSource configured as provided @@ -224,7 +224,7 @@ def test_get_network_context_managers( get_interfaces_by_mac, local_stage: bool, ds_cfg: Dict[str, Any], - expected_manager_config: List[Tuple[Tuple[bool, bool], bool]], + expected_manager_config: List, expected_interface: str, ): """ diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py index 01140720c..2639302b2 100644 --- a/tests/unittests/sources/test_aliyun.py +++ b/tests/unittests/sources/test_aliyun.py @@ -4,6 +4,7 @@ import os from unittest import mock +import pytest import responses from cloudinit import helpers @@ -186,6 +187,7 @@ def test_with_mock_server(self, m_is_aliyun, m_resolv): @mock.patch("cloudinit.net.find_fallback_nic") @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") + @pytest.mark.usefixtures("disable_netdev_info") def test_aliyun_local_with_mock_server( self, m_is_bsd, diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index aa67396ae..3c0e10aa0 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import copy import datetime @@ -28,6 +29,7 @@ from tests.unittests.helpers import ( CiTestCase, ExitStack, + example_netdev, mock, populate_dir, resourceLocation, @@ -97,6 +99,11 @@ def mock_device_driver(): yield m +@pytest.fixture(autouse=True) +def mock_netinfo(disable_netdev_info): + pass + + @pytest.fixture def mock_generate_fallback_config(): with mock.patch( @@ -115,6 +122,15 @@ def mock_time(): yield m +@pytest.fixture +def mock_monotonic(): + with mock.patch( + MOCKPATH + "monotonic", + autospec=True, + ) as m: + yield m + + @pytest.fixture def mock_dmi_read_dmi_data(): def fake_read(key: str) -> str: @@ -718,14 +734,20 @@ class TestGenerateNetworkConfig: "match": {"name": "eth1"}, "dhcp6": False, "dhcp4": True, - "dhcp4-overrides": {"route-metric": 200}, + "dhcp4-overrides": { + "route-metric": 200, + "use-dns": False, + }, }, "eth2": { "set-name": "eth2", "match": {"name": "eth2"}, "dhcp6": False, "dhcp4": True, - "dhcp4-overrides": {"route-metric": 300}, + "dhcp4-overrides": { + "route-metric": 300, + "use-dns": False, + }, }, }, "version": 2, @@ -952,7 +974,7 @@ def test_single_ipv4_nic_configuration( "dhcp6": False, "match": {"name": "eth0"}, "set-name": "eth0", - } + }, }, "version": 2, } @@ -1533,7 +1555,7 @@ def test_network_config_set_from_imds(self): "dhcp6": False, "dhcp4": True, "dhcp4-overrides": {"route-metric": 100}, - } + }, }, "version": 2, } @@ -1562,14 +1584,14 @@ def test_network_config_set_from_imds_route_metric_for_secondary_nic(self): "match": {"name": "eth1"}, "dhcp6": False, "dhcp4": True, - "dhcp4-overrides": {"route-metric": 200}, + "dhcp4-overrides": {"route-metric": 200, "use-dns": False}, }, "eth2": { "set-name": "eth2", "match": {"name": "eth2"}, "dhcp6": False, "dhcp4": True, - "dhcp4-overrides": {"route-metric": 300}, + "dhcp4-overrides": {"route-metric": 300, "use-dns": False}, }, }, "version": 2, @@ -1602,7 +1624,7 @@ def test_network_config_set_from_imds_for_secondary_nic_no_ip(self): "dhcp6": False, "dhcp4": True, "dhcp4-overrides": {"route-metric": 100}, - } + }, }, "version": 2, } @@ -2818,13 +2840,21 @@ def test_read_azure_ovf_with_savable_type(self): self.assertTrue(cfg["PreprovisionedVm"]) self.assertEqual("Savable", cfg["PreprovisionedVMType"]) - def test_read_azure_ovf_with_proxy_guest_agent(self): + def test_read_azure_ovf_with_proxy_guest_agent_true(self): """The read_azure_ovf method should set ProvisionGuestProxyAgent cfg flag to True.""" content = construct_ovf_env(provision_guest_proxy_agent=True) ret = dsaz.read_azure_ovf(content) cfg = ret[2] - self.assertTrue(cfg["ProvisionGuestProxyAgent"]) + assert cfg["ProvisionGuestProxyAgent"] is True + + def test_read_azure_ovf_with_proxy_guest_agent_false(self): + """The read_azure_ovf method should set ProvisionGuestProxyAgent + cfg flag to False.""" + content = construct_ovf_env(provision_guest_proxy_agent=False) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + assert cfg["ProvisionGuestProxyAgent"] is False @pytest.mark.parametrize( @@ -2984,6 +3014,7 @@ def test_wait_for_nic_attach_multinic_attach( ip="10.0.0.4", prefix_or_mask="32", broadcast="255.255.255.255", + interface_addrs_before_dhcp=example_netdev, router="10.0.0.1", static_routes=[ ("0.0.0.0/0", "10.0.0.1"), @@ -3013,6 +3044,7 @@ def test_wait_for_nic_attach_multinic_attach( ip="10.0.0.4", prefix_or_mask="32", broadcast="255.255.255.255", + interface_addrs_before_dhcp=example_netdev, router="10.0.0.1", static_routes=None, ) @@ -3528,10 +3560,11 @@ def test_retry_times_out( mock_kvp_report_failure_to_host, mock_sleep, mock_time, + mock_monotonic, error_class, error_reason, ): - mock_time.side_effect = [ + mock_monotonic.side_effect = [ 0.0, # start 60.1, # duration check for host error report 60.11, # loop check @@ -3577,6 +3610,7 @@ def test_primary(self, azure_ds, static_routes): ip="10.0.0.4", prefix_or_mask="32", broadcast="255.255.255.255", + interface_addrs_before_dhcp=example_netdev, router="10.0.0.1", static_routes=static_routes, ) @@ -3590,6 +3624,7 @@ def test_primary_via_wireserver_specified_in_option_245(self, azure_ds): ip="10.0.0.4", prefix_or_mask="32", broadcast="255.255.255.255", + interface_addrs_before_dhcp=example_netdev, router="10.0.0.1", static_routes=[("1.2.3.4/32", "10.0.0.1")], ) @@ -3612,6 +3647,7 @@ def test_secondary(self, azure_ds, static_routes): ip="10.0.0.4", prefix_or_mask="32", broadcast="255.255.255.255", + interface_addrs_before_dhcp=example_netdev, router="10.0.0.1", static_routes=static_routes, ) @@ -3732,7 +3768,7 @@ def test_no_pps(self): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", timeout=30, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, exception_cb=mock.ANY, infinite=True, log_req_resp=True, @@ -3811,7 +3847,7 @@ def test_stale_pps(self, pps_type): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -3820,7 +3856,7 @@ def test_stale_pps(self, pps_type): "http://169.254.169.254/metadata/reprovisiondata?" "api-version=2019-06-01", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, log_req_resp=False, infinite=True, timeout=30, @@ -3829,7 +3865,7 @@ def test_stale_pps(self, pps_type): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -3874,7 +3910,7 @@ def test_running_pps(self): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -3883,7 +3919,7 @@ def test_running_pps(self): "http://169.254.169.254/metadata/reprovisiondata?" "api-version=2019-06-01", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, log_req_resp=False, infinite=True, timeout=30, @@ -3892,7 +3928,7 @@ def test_running_pps(self): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -3992,7 +4028,7 @@ def test_savable_pps(self): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -4001,7 +4037,7 @@ def test_savable_pps(self): "http://169.254.169.254/metadata/reprovisiondata?" "api-version=2019-06-01", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, log_req_resp=False, infinite=True, timeout=30, @@ -4010,7 +4046,7 @@ def test_savable_pps(self): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -4150,7 +4186,7 @@ def test_savable_pps_early_unplug(self, fabric_side_effect): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -4159,7 +4195,7 @@ def test_savable_pps_early_unplug(self, fabric_side_effect): "http://169.254.169.254/metadata/reprovisiondata?" "api-version=2019-06-01", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=False, timeout=30, @@ -4168,7 +4204,7 @@ def test_savable_pps_early_unplug(self, fabric_side_effect): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -4262,7 +4298,7 @@ def test_recovery_pps(self, pps_type): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -4271,7 +4307,7 @@ def test_recovery_pps(self, pps_type): "http://169.254.169.254/metadata/reprovisiondata?" "api-version=2019-06-01", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=False, timeout=30, @@ -4280,7 +4316,7 @@ def test_recovery_pps(self, pps_type): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -4388,7 +4424,7 @@ def test_os_disk_pps(self, mock_sleep, subp_side_effect): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", exception_cb=mock.ANY, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, infinite=True, log_req_resp=True, timeout=30, @@ -4451,7 +4487,7 @@ def test_imds_failure_results_in_provisioning_failure(self): "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", timeout=30, - headers={"Metadata": "true"}, + headers_cb=imds.headers_cb, exception_cb=mock.ANY, infinite=True, log_req_resp=True, @@ -4502,6 +4538,7 @@ def test_errors( mock_imds_fetch_metadata_with_api_fallback, mock_kvp_report_failure_to_host, mock_time, + mock_monotonic, monkeypatch, report_failure, reported_error_type, @@ -4512,7 +4549,7 @@ def test_errors( ) azure_ds._route_configured_for_imds = route_configured_for_imds mock_imds_fetch_metadata_with_api_fallback.side_effect = exception - mock_time.return_value = 0.0 + mock_monotonic.return_value = 0.0 max_connection_errors = None if route_configured_for_imds else 11 assert ( diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py index dcb1cc71e..0a6959658 100644 --- a/tests/unittests/sources/test_azure_helper.py +++ b/tests/unittests/sources/test_azure_helper.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import os import re diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py index d1cf54478..ea690a000 100644 --- a/tests/unittests/sources/test_ec2.py +++ b/tests/unittests/sources/test_ec2.py @@ -1,8 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import copy import json -import threading +import logging +from typing import List from unittest import mock import pytest @@ -12,7 +14,8 @@ from cloudinit import helpers from cloudinit.net import netplan from cloudinit.sources import DataSourceEc2 as ec2 -from tests.unittests import helpers as test_helpers +from cloudinit.sources import NicOrder +from tests.unittests.helpers import example_netdev from tests.unittests.util import MockDistro DYNAMIC_METADATA = { @@ -293,7 +296,7 @@ def disable_is_resolvable(): def _register_ssh_keys(rfunc, base_url, keys_data): - """handle ssh key inconsistencies. + r"""handle ssh key inconsistencies. public-keys in the ec2 metadata is inconsistently formated compared to other entries. @@ -329,7 +332,7 @@ def _register_ssh_keys(rfunc, base_url, keys_data): def register_mock_metaserver(base_url, data, responses_mock=None): - """Register with responses a ec2 metadata like service serving 'data'. + r"""Register with responses a ec2 metadata like service serving 'data'. If given a dictionary, it will populate urls under base_url for that dictionary. For example, input of @@ -343,7 +346,7 @@ def register_mock_metaserver(base_url, data, responses_mock=None): """ responses_mock = responses_mock or responses - def register_helper(register, base_url, body): + def register_helper(base_url, body): if not isinstance(base_url, str): register(base_url, body) return @@ -364,69 +367,55 @@ def register_helper(register, base_url, body): suffix += "/" vals.append(suffix) url = base_url + "/" + suffix - register_helper(register, url, v) + register_helper(url, v) register(base_url, "\n".join(vals) + "\n") register(base_url + "/", "\n".join(vals) + "\n") elif body is None: register(base_url, "not found", status=404) - def myreg(*argc, **kwargs): - url, body = argc + def register(url, body, status=200): method = responses.PUT if "latest/api/token" in url else responses.GET - status = kwargs.get("status", 200) return responses_mock.add(method, url, body, status=status) - register_helper(myreg, base_url, data) + register_helper(base_url, data) -class TestEc2(test_helpers.ResponsesTestCase): - with_logs = True - maxDiff = None +class TestEc2: + datasource = ec2.DataSourceEc2 + metadata_addr = datasource.metadata_urls[0] valid_platform_data = { "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", - "uuid_source": "dmi", "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", } - def setUp(self): - super(TestEc2, self).setUp() - self.datasource = ec2.DataSourceEc2 - self.metadata_addr = self.datasource.metadata_urls[0] - self.tmp = self.tmp_dir() - def data_url(self, version, data_item="meta-data"): """Return a metadata url based on the version provided.""" return "/".join([self.metadata_addr, version, data_item]) - def _patch_add_cleanup(self, mpath, *args, **kwargs): - p = mock.patch(mpath, *args, **kwargs) - p.start() - self.addCleanup(p.stop) - def _setup_ds( - self, sys_cfg, platform_data, md, md_version=None, distro=None + self, + sys_cfg, + platform_data, + md, + *, + mocker, + tmpdir, + md_version=None, + distro=None ): self.uris = [] distro = distro or mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - paths = helpers.Paths({"run_dir": self.tmp}) + distro.get_tmp_exec_path = tmpdir + paths = helpers.Paths({"run_dir": tmpdir}) if sys_cfg is None: sys_cfg = {} ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) - event = threading.Event() - p = mock.patch("time.sleep", event.wait) - p.start() - - def _mock_sleep(): - event.set() - p.stop() - - self.addCleanup(_mock_sleep) + mocker.patch("time.sleep") if not md_version: md_version = ds.min_metadata_version if platform_data is not None: - self._patch_add_cleanup( + mocker.patch( "cloudinit.sources.DataSourceEc2._collect_platform_data", return_value=platform_data, ) @@ -436,7 +425,7 @@ def _mock_sleep(): ds.min_metadata_version ] + ds.extended_metadata_versions token_url = self.data_url("latest", data_item="api/token") - register_mock_metaserver(token_url, "API-TOKEN", self.responses) + register_mock_metaserver(token_url, "API-TOKEN", responses) for version in all_versions: metadata_url = self.data_url(version) + "/" if version == md_version: @@ -444,13 +433,13 @@ def _mock_sleep(): register_mock_metaserver( metadata_url, md.get("md", DEFAULT_METADATA), - self.responses, + responses, ) userdata_url = self.data_url( version, data_item="user-data" ) register_mock_metaserver( - userdata_url, md.get("ud", ""), self.responses + userdata_url, md.get("ud", ""), responses ) identity_url = self.data_url( version, data_item="dynamic/instance-identity" @@ -458,7 +447,7 @@ def _mock_sleep(): register_mock_metaserver( identity_url, md.get("id", DYNAMIC_METADATA), - self.responses, + responses, ) else: instance_id_url = metadata_url + "instance-id" @@ -467,21 +456,26 @@ def _mock_sleep(): register_mock_metaserver( instance_id_url, DEFAULT_METADATA["instance-id"], - self.responses, + responses, ) else: # Register 404s for all unrequested extended versions register_mock_metaserver( - instance_id_url, None, self.responses + instance_id_url, None, responses ) return ds - def test_network_config_property_returns_version_2_network_data(self): + @responses.activate + def test_network_config_property_returns_version_2_network_data( + self, mocker, tmpdir + ): """network_config property returns network version 2 for metadata""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) find_fallback_path = M_PATH_NET + "find_fallback_nic" with mock.patch(find_fallback_path) as m_find_fallback: @@ -508,9 +502,10 @@ def test_network_config_property_returns_version_2_network_data(self): m_get_interfaces_by_mac.return_value = {mac1: "eth9"} m_find_fallback.return_value = "eth9" m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) + assert expected == ds.network_config - def test_network_config_property_set_dhcp4(self): + @responses.activate + def test_network_config_property_set_dhcp4(self, mocker, tmpdir): """network_config property configures dhcp4 on nics with local-ipv4s. Only one device is configured based on get_interfaces_by_mac even when @@ -520,6 +515,8 @@ def test_network_config_property_set_dhcp4(self): platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) find_fallback_path = M_PATH_NET + "find_fallback_nic" with mock.patch(find_fallback_path) as m_find_fallback: @@ -550,9 +547,12 @@ def test_network_config_property_set_dhcp4(self): m_get_interfaces_by_mac.return_value = {mac1: "eth9"} m_find_fallback.return_value = "eth9" m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) + assert expected == ds.network_config - def test_network_config_property_secondary_private_ips(self): + @responses.activate + def test_network_config_property_secondary_private_ips( + self, mocker, tmpdir + ): """network_config property configures any secondary ipv4 addresses. Only one device is configured based on get_interfaces_by_mac even when @@ -562,6 +562,8 @@ def test_network_config_property_secondary_private_ips(self): platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, md={"md": SECONDARY_IP_METADATA_2018_09_24}, + mocker=mocker, + tmpdir=tmpdir, ) find_fallback_path = M_PATH_NET + "find_fallback_nic" with mock.patch(find_fallback_path) as m_find_fallback: @@ -593,20 +595,28 @@ def test_network_config_property_secondary_private_ips(self): m_get_interfaces_by_mac.return_value = {mac1: "eth9"} m_find_fallback.return_value = "eth9" m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) + assert expected == ds.network_config - def test_network_config_property_is_cached_in_datasource(self): + @responses.activate + def test_network_config_property_is_cached_in_datasource( + self, mocker, tmpdir + ): """network_config property is cached in DataSourceEc2.""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) ds._network_config = {"cached": "data"} - self.assertEqual({"cached": "data"}, ds.network_config) + assert {"cached": "data"} == ds.network_config + @responses.activate @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): + def test_network_config_cached_property_refreshed_on_upgrade( + self, m_dhcp, caplog, mocker, tmpdir + ): """Refresh the network_config Ec2 cache if network key is absent. This catches an upgrade issue where obj.pkl contained stale metadata @@ -618,30 +628,46 @@ def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, md={"md": old_metadata}, + mocker=mocker, + tmpdir=tmpdir, ) - self.assertTrue(ds.get_data()) + assert True is ds.get_data() + + def _remove_md(resp_list: List) -> None: + for index, url in enumerate(resp_list): + try: + url = url.url + except AttributeError: + # Can be removed when Bionic is EOL + url = url["url"] + if url.startswith( + "http://169.254.169.254/2009-04-04/meta-data/" + ): + del resp_list[index] # Workaround https://github.com/getsentry/responses/issues/212 - if hasattr(self.responses, "_urls"): + if hasattr(responses, "_urls"): # Can be removed when Bionic is EOL - for index, url in enumerate(self.responses._urls): - if url["url"].startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._urls[index] - elif hasattr(self.responses, "_matches"): + _remove_md(responses._urls) + elif hasattr(responses, "_default_mock") and hasattr( + responses._default_mock, "_urls" + ): + # Can be removed when Bionic is EOL + _remove_md(responses._default_mock._urls) + elif hasattr(responses, "_matches"): # Can be removed when Focal is EOL - for index, response in enumerate(self.responses._matches): - if response.url.startswith( - "http://169.254.169.254/2009-04-04/meta-data/" - ): - del self.responses._matches[index] + _remove_md(responses._matches) + elif hasattr(responses, "_default_mock") and hasattr( + responses._default_mock, "_matches" + ): + # Can be removed when Focal is EOL + _remove_md(responses._default_mock._matches) # Provide new revision of metadata that contains network data register_mock_metaserver( "http://169.254.169.254/2009-04-04/meta-data/", DEFAULT_METADATA, - self.responses, + responses, ) mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac" @@ -649,11 +675,8 @@ def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: m_get_interfaces_by_mac.return_value = {mac1: "eth9"} nc = ds.network_config # Will re-crawl network metadata - self.assertIsNotNone(nc) - self.assertIn( - "Refreshing stale metadata from prior to upgrade", - self.logs.getvalue(), - ) + assert None is not nc + assert "Refreshing stale metadata from prior to upgrade" in caplog.text expected = { "version": 2, "ethernets": { @@ -665,9 +688,12 @@ def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): } }, } - self.assertEqual(expected, ds.network_config) + assert expected == ds.network_config - def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): + @responses.activate + def test_ec2_get_instance_id_refreshes_identity_on_upgrade( + self, mocker, tmpdir + ): """get_instance-id gets DataSourceEc2Local.identity if not present. This handles an upgrade case where the old pickled datasource didn't @@ -679,6 +705,8 @@ def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) # Mock 404s on all versions except latest all_versions = [ @@ -688,27 +716,28 @@ def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): register_mock_metaserver( "http://[fd00:ec2::254]/{0}/meta-data/instance-id".format(ver), None, - self.responses, + responses, ) ds.metadata_address = "http://[fd00:ec2::254]" register_mock_metaserver( "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]), DEFAULT_METADATA, - self.responses, + responses, ) # Register dynamic/instance-identity document which we now read. register_mock_metaserver( "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]), DYNAMIC_METADATA, - self.responses, + responses, ) ds._cloud_name = ec2.CloudNames.AWS # Setup cached metadata on the Datasource ds.metadata = DEFAULT_METADATA - self.assertEqual("my-identity-id", ds.get_instance_id()) + assert "my-identity-id" == ds.get_instance_id() - def test_classic_instance_true(self): + @responses.activate + def test_classic_instance_true(self, mocker, tmpdir): """If no vpc-id in metadata, is_classic_instance must return true.""" md_copy = copy.deepcopy(DEFAULT_METADATA) ifaces_md = md_copy.get("network", {}).get("interfaces", {}) @@ -720,26 +749,36 @@ def test_classic_instance_true(self): platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": md_copy}, + mocker=mocker, + tmpdir=tmpdir, ) - self.assertTrue(ds.get_data()) - self.assertTrue(ds.is_classic_instance()) + assert True is ds.get_data() + assert True is ds.is_classic_instance() - def test_classic_instance_false(self): + @responses.activate + def test_classic_instance_false(self, mocker, tmpdir): """If vpc-id in metadata, is_classic_instance must return false.""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) - self.assertTrue(ds.get_data()) - self.assertFalse(ds.is_classic_instance()) + assert True is ds.get_data() + assert False is ds.is_classic_instance() - def test_aws_inaccessible_imds_service_fails_with_retries(self): + @responses.activate + def test_aws_inaccessible_imds_service_fails_with_retries( + self, mocker, tmpdir + ): """Inaccessibility of http://169.254.169.254 are retried.""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md=None, + tmpdir=tmpdir, + mocker=mocker, ) conn_error = requests.exceptions.ConnectionError( @@ -752,127 +791,139 @@ def test_aws_inaccessible_imds_service_fails_with_retries(self): with mock.patch("cloudinit.url_helper.readurl") as m_readurl: # yikes, this endpoint needs help m_readurl.side_effect = ( - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, conn_error, conn_error, mock_success, ) with mock.patch("cloudinit.url_helper.time.sleep"): - self.assertTrue(ds.wait_for_metadata_service()) + assert True is ds.wait_for_metadata_service() # Just one /latest/api/token request - self.assertEqual(19, len(m_readurl.call_args_list)) + assert 3 == len(m_readurl.call_args_list) for readurl_call in m_readurl.call_args_list: - self.assertIn("latest/api/token", readurl_call[0][0]) + assert "latest/api/token" in readurl_call[0][0] - def test_aws_token_403_fails_without_retries(self): + @responses.activate + def test_aws_token_403_fails_without_retries(self, caplog, mocker, tmpdir): """Verify that 403s fetching AWS tokens are not retried.""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md=None, + mocker=mocker, + tmpdir=tmpdir, ) token_url = self.data_url("latest", data_item="api/token") - self.responses.add(responses.PUT, token_url, status=403) - self.assertFalse(ds.get_data()) + responses.add(responses.PUT, token_url, status=403) + assert False is ds.get_data() # Just one /latest/api/token request - logs = self.logs.getvalue() expected_logs = [ - "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is" - " disabled. Aborting.", - "WARNING: IMDS's HTTP endpoint is probably disabled", + ( + mock.ANY, + logging.WARNING, + "Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is" + " disabled. Aborting.", + ), + ( + mock.ANY, + logging.WARNING, + "IMDS's HTTP endpoint is probably disabled", + ), ] for log in expected_logs: - self.assertIn(log, logs) + assert log in caplog.record_tuples - def test_aws_token_redacted(self): + @responses.activate + def test_aws_token_redacted(self, caplog, mocker, tmpdir): """Verify that aws tokens are redacted when logged.""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) - self.assertTrue(ds.get_data()) - all_logs = self.logs.getvalue().splitlines() + assert True is ds.get_data() + all_logs = caplog.text.splitlines() REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] logs_with_token = [log for log in all_logs if "API-TOKEN" in log] - self.assertEqual(1, len(logs_with_redacted_ttl)) - self.assertEqual(83, len(logs_with_redacted)) - self.assertEqual(0, len(logs_with_token)) + assert 1 == len(logs_with_redacted_ttl) + assert 83 == len(logs_with_redacted) + assert 0 == len(logs_with_token) + @responses.activate @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_valid_platform_with_strict_true(self, m_dhcp): + def test_valid_platform_with_strict_true(self, m_dhcp, mocker, tmpdir): """Valid platform data should return true with strict_id true.""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual("aws", ds.cloud_name) - self.assertEqual("ec2", ds.platform_type) - self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform) - - def test_valid_platform_with_strict_false(self): + assert True is ret + assert 0 == m_dhcp.call_count + assert "aws" == ds.cloud_name + assert "ec2" == ds.platform_type + assert "metadata (%s)" % ds.metadata_address == ds.subplatform + + @responses.activate + def test_valid_platform_with_strict_false(self, mocker, tmpdir): """Valid platform data should return true with strict_id false.""" ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) ret = ds.get_data() - self.assertTrue(ret) + assert True is ret - def test_unknown_platform_with_strict_true(self): + @responses.activate + def test_unknown_platform_with_strict_true(self, mocker, tmpdir): """Unknown platform data with strict_id true should return False.""" uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, + platform_data={"uuid": uuid, "serial": ""}, sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) ret = ds.get_data() - self.assertFalse(ret) + assert False is ret - def test_unknown_platform_with_strict_false(self): + @responses.activate + def test_unknown_platform_with_strict_false(self, mocker, tmpdir): """Unknown platform data with strict_id false should return True.""" uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" ds = self._setup_ds( - platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, + platform_data={"uuid": uuid, "serial": ""}, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) ret = ds.get_data() - self.assertTrue(ret) + assert True is ret - def test_ec2_local_returns_false_on_non_aws(self): + @responses.activate + def test_ec2_local_returns_false_on_non_aws(self, caplog, mocker, tmpdir): """DataSourceEc2Local returns False when platform is not AWS.""" self.datasource = ec2.DataSourceEc2Local ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) platform_attrs = [ attr @@ -884,16 +935,18 @@ def test_ec2_local_returns_false_on_non_aws(self): if platform_name not in ["aws", "outscale"]: ds._cloud_name = platform_name ret = ds.get_data() - self.assertEqual("ec2", ds.platform_type) - self.assertFalse(ret) + assert "ec2" == ds.platform_type + assert False is ret message = ( "Local Ec2 mode only supported on ('aws', 'outscale')," " not {0}".format(platform_name) ) - self.assertIn(message, self.logs.getvalue()) + assert message in caplog.text @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") - def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): + def test_ec2_local_returns_false_on_bsd( + self, m_is_freebsd, caplog, mocker, tmpdir + ): """DataSourceEc2Local returns False on BSD. FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. @@ -904,21 +957,33 @@ def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, + mocker=mocker, + tmpdir=tmpdir, ) ret = ds.get_data() - self.assertFalse(ret) - self.assertIn( - "FreeBSD doesn't support running dhclient with -sf", - self.logs.getvalue(), + assert False is ret + assert ( + "FreeBSD doesn't support running dhclient with -sf" in caplog.text ) + @responses.activate + @pytest.mark.usefixtures("disable_netdev_info") @mock.patch("cloudinit.net.ephemeral.EphemeralIPv6Network") @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") def test_ec2_local_performs_dhcp_on_non_bsd( - self, m_is_bsd, m_dhcp, m_fallback_nic, m_net4, m_net6 + self, + m_is_bsd, + m_dhcp, + m_fallback_nic, + m_net4, + m_net6, + caplog, + mocker, + tmpdir, + disable_netdev_info, ): """Ec2Local returns True for valid platform data on non-BSD with dhcp. @@ -942,88 +1007,103 @@ def test_ec2_local_performs_dhcp_on_non_bsd( sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": DEFAULT_METADATA}, distro=MockDistro("", {}, {}), + mocker=mocker, + tmpdir=tmpdir, ) ret = ds.get_data() - self.assertTrue(ret) + assert True is ret m_dhcp.assert_called_once_with(ds.distro, "eth9", None) m_net4.assert_called_once_with( ds.distro, broadcast="192.168.2.255", + interface_addrs_before_dhcp=example_netdev, interface="eth9", ip="192.168.2.9", prefix_or_mask="255.255.255.0", router="192.168.2.1", static_routes=None, ) - self.assertIn("Crawl of metadata service ", self.logs.getvalue()) + assert "Crawl of metadata service " in caplog.text - def test_get_instance_tags(self): + @responses.activate + def test_get_instance_tags(self, mocker, tmpdir): ds = self._setup_ds( platform_data=self.valid_platform_data, sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, md={"md": TAGS_METADATA_2021_03_23}, + mocker=mocker, + tmpdir=tmpdir, ) - self.assertTrue(ds.get_data()) - self.assertIn("tags", ds.metadata) - self.assertIn("instance", ds.metadata["tags"]) + assert True is ds.get_data() + assert "tags" in ds.metadata + assert "instance" in ds.metadata["tags"] instance_tags = ds.metadata["tags"]["instance"] - self.assertEqual(instance_tags["Application"], "test") - self.assertEqual(instance_tags["Environment"], "production") + assert instance_tags["Application"] == "test" + assert instance_tags["Environment"] == "production" -class TestGetSecondaryAddresses(test_helpers.CiTestCase): +class TestGetSecondaryAddresses: mac = "06:17:04:d7:26:ff" - with_logs = True def test_md_with_no_secondary_addresses(self): """Empty list is returned when nic metadata contains no secondary ip""" - self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) + assert [] == ec2.get_secondary_addresses(NIC2_MD, self.mac) def test_md_with_secondary_v4_and_v6_addresses(self): """All secondary addresses are returned from nic metadata""" - self.assertEqual( - [ - "172.31.45.70/20", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac), - ) - - def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): + assert [ + "172.31.45.70/20", + "2600:1f16:292:100:f152:2222:3333:4444/128", + "2600:1f16:292:100:f153:12a3:c37c:11f9/128", + ] == ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac) + + def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults( + self, caplog + ): """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected" invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is" - self.assertEqual( - [ - "172.31.45.70/24", - "2600:1f16:292:100:f152:2222:3333:4444/128", - "2600:1f16:292:100:f153:12a3:c37c:11f9/128", - ], - ec2.get_secondary_addresses(invalid_cidr_md, self.mac), - ) + assert [ + "172.31.45.70/24", + "2600:1f16:292:100:f152:2222:3333:4444/128", + "2600:1f16:292:100:f153:12a3:c37c:11f9/128", + ] == ec2.get_secondary_addresses(invalid_cidr_md, self.mac) expected_logs = [ - "WARNING: Could not parse subnet-ipv4-cidr-block" - " something-unexpected for mac 06:17:04:d7:26:ff." - " ipv4 network config prefix defaults to /24", - "WARNING: Could not parse subnet-ipv6-cidr-block" - " not/sure/what/this/is for mac 06:17:04:d7:26:ff." - " ipv6 network config prefix defaults to /128", + ( + mock.ANY, + logging.WARNING, + "Could not parse subnet-ipv4-cidr-block" + " something-unexpected for mac 06:17:04:d7:26:ff." + " ipv4 network config prefix defaults to /24", + ), + ( + mock.ANY, + logging.WARNING, + "Could not parse subnet-ipv6-cidr-block" + " not/sure/what/this/is for mac 06:17:04:d7:26:ff." + " ipv6 network config prefix defaults to /128", + ), ] - logs = self.logs.getvalue() for log in expected_logs: - self.assertIn(log, logs) + assert log in caplog.record_tuples class TestBuildNicOrder: @pytest.mark.parametrize( - ["macs_metadata", "macs", "expected"], + ["macs_metadata", "macs_to_nics", "default_nic_order", "expected"], [ - pytest.param({}, [], {}, id="all_empty"), + pytest.param({}, {}, NicOrder.MAC, {}, id="all_empty"), pytest.param( - {}, ["0a:f7:8d:96:f2:a1"], {}, id="empty_macs_metadata" + {}, {}, NicOrder.NIC_NAME, {}, id="all_empty_sort_by_nic_name" + ), + pytest.param( + {}, + {"0a:f7:8d:96:f2:a1": "eth0"}, + NicOrder.MAC, + {}, + id="empty_macs_metadata", ), pytest.param( { @@ -1032,7 +1112,8 @@ class TestBuildNicOrder: "mac": "0a:0d:dd:44:cd:7b", } }, - [], + {}, + NicOrder.MAC, {}, id="empty_macs", ), @@ -1045,8 +1126,9 @@ class TestBuildNicOrder: "mac": "0a:f7:8d:96:f2:a1", }, }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], - {"0a:f7:8d:96:f2:a1": 0, "0a:0d:dd:44:cd:7b": 1}, + {"0a:f7:8d:96:f2:a1": "eth0", "0a:0d:dd:44:cd:7b": "eth1"}, + NicOrder.MAC, + {"0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1}, id="no-device-number-info", ), pytest.param( @@ -1058,7 +1140,8 @@ class TestBuildNicOrder: "mac": "0a:f7:8d:96:f2:a1", }, }, - ["0a:f7:8d:96:f2:a1"], + {"0a:f7:8d:96:f2:a1": "eth0"}, + NicOrder.MAC, {"0a:f7:8d:96:f2:a1": 0}, id="no-device-number-info-subset", ), @@ -1073,7 +1156,8 @@ class TestBuildNicOrder: "mac": "0a:f7:8d:96:f2:a1", }, }, - ["0a:f7:8d:96:f2:a1", "0a:0d:dd:44:cd:7b"], + {"0a:0d:dd:44:cd:7b": "eth0", "0a:f7:8d:96:f2:a1": "eth1"}, + NicOrder.MAC, {"0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1}, id="device-numbers", ), @@ -1095,11 +1179,12 @@ class TestBuildNicOrder: "mac": "0a:f7:8d:96:f2:a1", }, }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], + { + "0a:0d:dd:44:cd:7b": "eth0", + "0a:f7:8d:96:f2:a1": "eth1", + "0a:f7:8d:96:f2:a2": "eth2", + }, + NicOrder.MAC, { "0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1, @@ -1121,14 +1206,15 @@ class TestBuildNicOrder: }, "0a:f7:8d:96:f2:a2": { "device-number": "1", - "mac": "0a:f7:8d:96:f2:a1", + "mac": "0a:f7:8d:96:f2:a2", }, }, - [ - "0a:f7:8d:96:f2:a1", - "0a:0d:dd:44:cd:7b", - "0a:f7:8d:96:f2:a2", - ], + { + "0a:0d:dd:44:cd:7b": "eth0", + "0a:f7:8d:96:f2:a1": "eth1", + "0a:f7:8d:96:f2:a2": "eth2", + }, + NicOrder.MAC, { "0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1, @@ -1145,32 +1231,199 @@ class TestBuildNicOrder: "mac": "0a:f7:8d:96:f2:a1", }, }, - ["0a:f7:8d:96:f2:a9"], + {"0a:f7:8d:96:f2:a9": "eth0"}, + NicOrder.MAC, {}, id="macs-not-in-md", ), + pytest.param( + {}, + {"0a:f7:8d:96:f2:a1": "eth0"}, + NicOrder.NIC_NAME, + {}, + id="empty_macs_metadata_sort_by_nic_name", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "device-number": "0", + "mac": "0a:0d:dd:44:cd:7b", + } + }, + {}, + NicOrder.NIC_NAME, + {}, + id="empty_macs_sort_by_nic_name", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + {"0a:f7:8d:96:f2:a1": "eth0", "0a:0d:dd:44:cd:7b": "eth1"}, + NicOrder.NIC_NAME, + {"0a:f7:8d:96:f2:a1": 0, "0a:0d:dd:44:cd:7b": 1}, + id="no-device-number-info-sort-by-nic-name", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + {"0a:f7:8d:96:f2:a1": "eth0"}, + NicOrder.NIC_NAME, + {"0a:f7:8d:96:f2:a1": 0}, + id="no-device-number-info-subset-sort-by-nic-name", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "device-number": "0", + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + {"0a:0d:dd:44:cd:7b": "eth0", "0a:f7:8d:96:f2:a1": "eth1"}, + NicOrder.NIC_NAME, + {"0a:0d:dd:44:cd:7b": 0, "0a:f7:8d:96:f2:a1": 1}, + id="device-numbers-sort-by-nic-name", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "network-card": "1", + "device-number": "1", + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "network-card": "0", + "device-number": "0", + "mac": "0a:f7:8d:96:f2:a1", + }, + "0a:f7:8d:96:f2:a2": { + "network-card": "2", + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + { + "0a:f7:8d:96:f2:a1": "eth0", + "0a:0d:dd:44:cd:7b": "eth1", + "0a:f7:8d:96:f2:a2": "eth2", + }, + NicOrder.MAC, + { + "0a:f7:8d:96:f2:a1": 0, + "0a:0d:dd:44:cd:7b": 1, + "0a:f7:8d:96:f2:a2": 2, + }, + id="network-cardes-sort-by-nic-name", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "network-card": "0", + "device-number": "0", + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "network-card": "1", + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a1", + }, + "0a:f7:8d:96:f2:a2": { + "device-number": "1", + "mac": "0a:f7:8d:96:f2:a2", + }, + }, + { + "0a:0d:dd:44:cd:7b": "eth0", + "0a:f7:8d:96:f2:a1": "eth1", + "0a:f7:8d:96:f2:a2": "eth2", + }, + NicOrder.NIC_NAME, + { + "0a:0d:dd:44:cd:7b": 0, + "0a:f7:8d:96:f2:a1": 1, + "0a:f7:8d:96:f2:a2": 2, + }, + id="network-card-partially-missing-sort-by-nic-name", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + {"0a:f7:8d:96:f2:a9": "eth0"}, + NicOrder.NIC_NAME, + {}, + id="macs-not-in-md-sort-by-nic-name", + ), + pytest.param( + { + "0a:0d:dd:44:cd:7b": { + "mac": "0a:0d:dd:44:cd:7b", + }, + "0a:f7:8d:96:f2:a1": { + "mac": "0a:f7:8d:96:f2:a1", + }, + "0a:f7:8d:96:f2:a2": { + "mac": "0a:f7:8d:96:f2:a1", + }, + }, + { + "0a:f7:8d:96:f2:a1": "eth0", + "0a:0d:dd:44:cd:7b": "eth1", + "0a:f7:8d:96:f2:a2": "eth2", + }, + NicOrder.NIC_NAME, + { + "0a:f7:8d:96:f2:a1": 0, + "0a:0d:dd:44:cd:7b": 1, + "0a:f7:8d:96:f2:a2": 2, + }, + id="no-device-number-info-subset-sort-by-nic-name", + ), ], ) - def test_build_nic_order(self, macs_metadata, macs, expected): - assert expected == ec2._build_nic_order(macs_metadata, macs) + def test_build_nic_order( + self, macs_metadata, macs_to_nics, default_nic_order, expected + ): + assert expected == ec2._build_nic_order( + macs_metadata, macs_to_nics, default_nic_order + ) -class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): - def setUp(self): - super(TestConvertEc2MetadataNetworkConfig, self).setUp() - self.mac1 = "06:17:04:d7:26:09" +class TestConvertEc2MetadataNetworkConfig: + MAC1 = "06:17:04:d7:26:09" + + @classmethod + def get_network_metadata(cls): interface_dict = copy.deepcopy( - DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1] + DEFAULT_METADATA["network"]["interfaces"]["macs"][cls.MAC1] ) # These tests are written assuming the base interface doesn't have IPv6 interface_dict.pop("ipv6s") - self.network_metadata = { - "interfaces": {"macs": {self.mac1: interface_dict}} - } + return {"interfaces": {"macs": {cls.MAC1: interface_dict}}} def test_convert_ec2_metadata_network_config_skips_absent_macs(self): """Any mac absent from metadata is skipped by network config.""" - macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"} + macs_to_nics = {self.MAC1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"} # DE:AD:BE:EF:FF:FF represented by OS but not in metadata expected = { @@ -1185,18 +1438,15 @@ def test_convert_ec2_metadata_network_config_skips_absent_macs(self): }, } distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro, macs_to_nics - ), + assert expected == ec2.convert_ec2_metadata_network_config( + self.get_network_metadata(), distro, macs_to_nics ) def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): """Config dhcp6 when ipv6s is in metadata for a mac.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] + macs_to_nics = {self.MAC1: "eth9"} + network_metadata_ipv6 = copy.deepcopy(self.get_network_metadata()) + nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.MAC1] nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" nic1_metadata.pop("public-ipv4s") expected = { @@ -1211,18 +1461,15 @@ def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): }, } distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), + assert expected == ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, distro, macs_to_nics ) def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): """Config dhcp4 when there are no public addresses in public-ipv4s.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] + macs_to_nics = {self.MAC1: "eth9"} + network_metadata_ipv6 = copy.deepcopy(self.get_network_metadata()) + nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.MAC1] nic1_metadata["local-ipv4s"] = "172.3.3.15" nic1_metadata.pop("public-ipv4s") expected = { @@ -1237,18 +1484,15 @@ def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): }, } distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), + assert expected == ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, distro, macs_to_nics ) def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] + macs_to_nics = {self.MAC1: "eth9"} + network_metadata_ipv6 = copy.deepcopy(self.get_network_metadata()) + nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.MAC1] nic1_metadata["public-ipv4s"] = "" # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. @@ -1264,18 +1508,15 @@ def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): }, } distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, distro, macs_to_nics - ), + assert expected == ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, distro, macs_to_nics ) def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + macs_to_nics = {self.MAC1: "eth9"} + network_metadata_both = copy.deepcopy(self.get_network_metadata()) + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.MAC1] nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" nic1_metadata.pop("public-ipv4s") nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc @@ -1291,11 +1532,8 @@ def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): }, } distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), + assert expected == ec2.convert_ec2_metadata_network_config( + network_metadata_both, distro, macs_to_nics ) def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): @@ -1303,11 +1541,11 @@ def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): Source-routing configured for secondary NICs (routing-policy and extra routing table).""" mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) + macs_to_nics = {self.MAC1: "eth9", mac2: "eth10"} + network_metadata_both = copy.deepcopy(self.get_network_metadata()) # Add 2nd nic info network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.MAC1] nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc @@ -1349,11 +1587,8 @@ def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): distro.dhcp_client.dhcp_discovery.return_value = { "routers": "172.31.1.0" } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), + assert expected == ec2.convert_ec2_metadata_network_config( + network_metadata_both, distro, macs_to_nics ) def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( @@ -1363,13 +1598,13 @@ def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( Source-routing configured for secondary NICs (routing-policy and extra routing table).""" mac2 = "06:17:04:d7:26:08" - macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} - network_metadata_both = copy.deepcopy(self.network_metadata) + macs_to_nics = {self.MAC1: "eth9", mac2: "eth10"} + network_metadata_both = copy.deepcopy(self.get_network_metadata()) # Add 2nd nic info network_metadata_both["interfaces"]["macs"][ mac2 ] = NIC2_MD_IPV4_IPV6_MULTI_IP - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.MAC1] nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc @@ -1426,11 +1661,8 @@ def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( distro.dhcp_client.dhcp_discovery.return_value = { "routers": "172.31.1.0" } - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), + assert expected == ec2.convert_ec2_metadata_network_config( + network_metadata_both, distro, macs_to_nics ) def test_convert_ec2_metadata_network_config_multi_nics_ipv6_only(self): @@ -1486,9 +1718,9 @@ def test_convert_ec2_metadata_network_config_multi_nics_ipv6_only(self): def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: "eth9"} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + macs_to_nics = {self.MAC1: "eth9"} + network_metadata_both = copy.deepcopy(self.get_network_metadata()) + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.MAC1] nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" expected = { "version": 2, @@ -1502,11 +1734,8 @@ def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): }, } distro = mock.Mock() - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, distro, macs_to_nics - ), + assert expected == ec2.convert_ec2_metadata_network_config( + network_metadata_both, distro, macs_to_nics ) def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): @@ -1525,29 +1754,41 @@ def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): patch_path = M_PATH_NET + "get_interfaces_by_mac" distro = mock.Mock() with mock.patch(patch_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, distro - ), + m_get_interfaces_by_mac.return_value = {self.MAC1: "eth9"} + assert expected == ec2.convert_ec2_metadata_network_config( + self.get_network_metadata(), distro ) -class TesIdentifyPlatform(test_helpers.CiTestCase): +class TestIdentifyPlatform: def collmock(self, **kwargs): """return non-special _collect_platform_data updated with changes.""" unspecial = { "asset_tag": "3857-0037-2746-7462-1818-3997-77", "serial": "H23-C4J3JV-R6", "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2", - "uuid_source": "dmi", "vendor": "tothecloud", "product_name": "cloudproduct", } unspecial.update(**kwargs) return unspecial + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") + def test_identify_aws(self, m_collect): + """aws should be identified if uuid starts with ec2""" + m_collect.return_value = self.collmock( + uuid="ec2E1916-9099-7CAF-FD21-012345ABCDEF" + ) + assert ec2.CloudNames.AWS == ec2.identify_platform() + + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") + def test_identify_aws_endian(self, m_collect): + """aws should be identified if uuid starts with ec2""" + m_collect.return_value = self.collmock( + uuid="45E12AEC-DCD1-B213-94ED-012345ABCDEF" + ) + assert ec2.CloudNames.AWS == ec2.identify_platform() + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_aliyun(self, m_collect): """aliyun should be identified if product name equals to @@ -1556,7 +1797,7 @@ def test_identify_aliyun(self, m_collect): m_collect.return_value = self.collmock( product_name="Alibaba Cloud ECS" ) - self.assertEqual(ec2.CloudNames.ALIYUN, ec2.identify_platform()) + assert ec2.CloudNames.ALIYUN == ec2.identify_platform() @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_zstack(self, m_collect): @@ -1564,7 +1805,7 @@ def test_identify_zstack(self, m_collect): ends in .zstack.io """ m_collect.return_value = self.collmock(asset_tag="123456.zstack.io") - self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) + assert ec2.CloudNames.ZSTACK == ec2.identify_platform() @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_zstack_full_domain_only(self, m_collect): @@ -1572,19 +1813,19 @@ def test_identify_zstack_full_domain_only(self, m_collect): full domain boundary. """ m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + assert ec2.CloudNames.UNKNOWN == ec2.identify_platform() @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_e24cloud(self, m_collect): """e24cloud identified if vendor is e24cloud""" m_collect.return_value = self.collmock(vendor="e24cloud") - self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) + assert ec2.CloudNames.E24CLOUD == ec2.identify_platform() @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_e24cloud_negative(self, m_collect): """e24cloud identified if vendor is e24cloud""" m_collect.return_value = self.collmock(vendor="e24cloudyday") - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + assert ec2.CloudNames.UNKNOWN == ec2.identify_platform() # Outscale @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") @@ -1594,7 +1835,7 @@ def test_identify_outscale(self, m_collect): vendor="3DS Outscale".lower(), product_name="3DS Outscale VM".lower(), ) - self.assertEqual(ec2.CloudNames.OUTSCALE, ec2.identify_platform()) + assert ec2.CloudNames.OUTSCALE == ec2.identify_platform() @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_false_on_wrong_sys_vendor(self, m_collect): @@ -1603,7 +1844,7 @@ def test_false_on_wrong_sys_vendor(self, m_collect): vendor="Not 3DS Outscale".lower(), product_name="3DS Outscale VM".lower(), ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + assert ec2.CloudNames.UNKNOWN == ec2.identify_platform() @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_false_on_wrong_product_name(self, m_collect): @@ -1612,4 +1853,4 @@ def test_false_on_wrong_product_name(self, m_collect): vendor="3DS Outscale".lower(), product_name="Not 3DS Outscale VM".lower(), ) - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + assert ec2.CloudNames.UNKNOWN == ec2.identify_platform() diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py index 6fc31ddc7..1617f6943 100644 --- a/tests/unittests/sources/test_gce.py +++ b/tests/unittests/sources/test_gce.py @@ -101,6 +101,7 @@ def _set_mock_metadata(self, gce_meta=None, *, check_headers=None): gce_meta = GCE_META def _request_callback(request): + recursive = False url_path = urlparse(request.url).path if url_path.startswith("/computeMetadata/v1/"): path = url_path.split("/computeMetadata/v1/")[1:][0] diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py index 850fbf88a..bee486f4d 100644 --- a/tests/unittests/sources/test_ibmcloud.py +++ b/tests/unittests/sources/test_ibmcloud.py @@ -416,7 +416,6 @@ def test_get_data_processes_read_md(self): self.assertEqual({}, self.ds.metadata) self.assertEqual("ud", self.ds.userdata_raw) self.assertEqual("net", self.ds.network_json) - self.assertEqual("vd", self.ds.vendordata_pure) self.assertEqual("uuid", self.ds.system_uuid) self.assertEqual("ibmcloud", self.ds.cloud_name) self.assertEqual("ibmcloud", self.ds.platform_type) diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py index d617219e3..de8ded4a3 100644 --- a/tests/unittests/sources/test_init.py +++ b/tests/unittests/sources/test_init.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import copy import inspect @@ -904,9 +905,8 @@ def test_clear_cached_attrs_noops_on_clean_cache(self): def test_clear_cached_attrs_skips_non_attr_class_attributes(self): """Skip any cached_attr_defaults which aren't class attributes.""" self.datasource._dirty_cache = True - self.datasource.clear_cached_attrs() - for attr in ("ec2_metadata", "network_json"): - self.assertFalse(hasattr(self.datasource, attr)) + self.datasource.clear_cached_attrs(attr_defaults=(("some", "value"),)) + self.assertFalse(hasattr(self.datasource, "some")) def test_clear_cached_attrs_of_custom_attrs(self): """Custom attr_values can be passed to clear_cached_attrs.""" diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py index efc248838..b71234567 100644 --- a/tests/unittests/sources/test_lxd.py +++ b/tests/unittests/sources/test_lxd.py @@ -333,13 +333,13 @@ def test_network_config_when_unset(self, lxd_ds): assert NETWORK_V1 == lxd_ds.network_config assert LXD_V1_METADATA == lxd_ds._crawled_metadata + @mock.patch.object(lxd, "generate_network_config", return_value=NETWORK_V1) def test_network_config_crawled_metadata_no_network_config( - self, lxd_ds_no_network_config + self, m_generate, lxd_ds_no_network_config ): """network_config is correctly computed when _network_config is unset and _crawled_metadata does not contain network_config. """ - lxd.generate_network_config = mock.Mock(return_value=NETWORK_V1) assert UNSET == lxd_ds_no_network_config._crawled_metadata assert UNSET == lxd_ds_no_network_config._network_config assert None is lxd_ds_no_network_config.userdata_raw @@ -349,7 +349,7 @@ def test_network_config_crawled_metadata_no_network_config( LXD_V1_METADATA_NO_NETWORK_CONFIG == lxd_ds_no_network_config._crawled_metadata ) - assert 1 == lxd.generate_network_config.call_count + assert 1 == m_generate.call_count class TestIsPlatformViable: @@ -650,8 +650,10 @@ def fake_get(url): mock_status_code = mock.PropertyMock(return_value=404) type(m_resp).ok = mock_ok type(m_resp).status_code = mock_status_code - mock_text = mock.PropertyMock(return_value=content) - type(m_resp).text = mock_text + mock_content = mock.PropertyMock( + return_value=content.encode("utf-8") + ) + type(m_resp).content = mock_content return m_resp m_session_get.side_effect = fake_get diff --git a/tests/unittests/sources/test_maas.py b/tests/unittests/sources/test_maas.py index 2e037981a..d6e1658e4 100644 --- a/tests/unittests/sources/test_maas.py +++ b/tests/unittests/sources/test_maas.py @@ -116,7 +116,7 @@ def my_readurl(*args, **kwargs): short = url[len(prefix) :] if short not in data: raise url_helper.UrlError("not found", code=404, url=url) - return url_helper.StringResponse(data[short]) + return url_helper.StringResponse(data[short], url) # Now do the actual call of the code under test. with mock.patch("cloudinit.url_helper.readurl") as mock_readurl: diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py index 3482e3170..ecb0fa76b 100644 --- a/tests/unittests/sources/test_opennebula.py +++ b/tests/unittests/sources/test_opennebula.py @@ -43,7 +43,7 @@ class TestOpenNebulaDataSource(CiTestCase): parsed_user = None - allowed_subp = ["bash"] + allowed_subp = ["bash", "sh"] def setUp(self): super(TestOpenNebulaDataSource, self).setUp() @@ -1023,7 +1023,7 @@ def test_multiple_nics(self): class TestParseShellConfig: - @pytest.mark.allow_subp_for("bash") + @pytest.mark.allow_subp_for("bash", "sh") def test_no_seconds(self): cfg = "\n".join(["foo=bar", "SECONDS=2", "xx=foo"]) # we could test 'sleep 2', but that would make the test run slower. diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py index 97cc8c94e..380fe3408 100644 --- a/tests/unittests/sources/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -20,6 +20,7 @@ from cloudinit.sources import convert_vendordata from cloudinit.sources.helpers import openstack from tests.unittests import helpers as test_helpers +from tests.unittests import util as test_util from tests.unittests.helpers import mock BASE_URL = "http://169.254.169.254" @@ -136,7 +137,7 @@ def get_request_callback(request): responses_mock.add_callback( responses.GET, - re.compile(r"http://169.254.169.254/.*"), + re.compile(r"http://(169.254.169.254|\[fe80::a9fe:a9fe\])/.*"), callback=get_request_callback, ) @@ -315,8 +316,6 @@ def test_datasource(self, m_dhcp): self.assertEqual(EC2_META, ds_os.ec2_metadata) self.assertEqual(USER_DATA, ds_os.userdata_raw) self.assertEqual(2, len(ds_os.files)) - self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) - self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure) self.assertIsNone(ds_os.vendordata_raw) m_dhcp.assert_not_called() @@ -324,6 +323,7 @@ def test_datasource(self, m_dhcp): @test_helpers.mock.patch( "cloudinit.net.ephemeral.maybe_perform_dhcp_discovery" ) + @pytest.mark.usefixtures("disable_netdev_info") def test_local_datasource(self, m_dhcp, m_net): """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" _register_uris( @@ -362,8 +362,6 @@ def test_local_datasource(self, m_dhcp, m_net): self.assertEqual(EC2_META, ds_os_local.ec2_metadata) self.assertEqual(USER_DATA, ds_os_local.userdata_raw) self.assertEqual(2, len(ds_os_local.files)) - self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) - self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure) self.assertIsNone(ds_os_local.vendordata_raw) m_dhcp.assert_called_with(distro, "eth9", None) @@ -388,10 +386,10 @@ def test_bad_datasource_meta(self): found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) - self.assertIn( - "InvalidMetaDataException: Broken metadata address" - " http://169.254.169.25", + self.assertRegex( self.logs.getvalue(), + r"InvalidMetaDataException: Broken metadata address" + r" http://(169.254.169.254|\[fe80::a9fe:a9fe\])", ) def test_no_datasource(self): @@ -506,7 +504,9 @@ def test_wb__crawl_metadata_does_not_persist(self): responses_mock=self.responses, ) ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + settings.CFG_BUILTIN, + test_util.MockDistro(), + helpers.Paths({"run_dir": self.tmp}), ) crawled_data = ds_os._crawl_metadata() self.assertEqual(UNSET, ds_os.ec2_metadata) diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py index 7c455096c..6b53e2c01 100644 --- a/tests/unittests/sources/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -731,7 +731,7 @@ def test_metadata_returned( assert vnics_data == metadata.vnics_data @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) - @mock.patch("cloudinit.url_helper.time.time", side_effect=count(0, 1)) + @mock.patch("cloudinit.url_helper.time.monotonic", side_effect=count(0, 1)) @mock.patch("cloudinit.url_helper.readurl", side_effect=UrlError) def test_retry(self, m_readurl, m_time): # Since wait_for_url has its own retry tests, just verify that we @@ -756,7 +756,7 @@ def test_retry(self, m_readurl, m_time): ) @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) - @mock.patch("cloudinit.url_helper.time.time", side_effect=[0, 11]) + @mock.patch("cloudinit.url_helper.time.monotonic", side_effect=[0, 11]) @mock.patch( "cloudinit.sources.DataSourceOracle.wait_for_url", return_value=("http://hi", b'{"some": "value"}'), @@ -768,7 +768,7 @@ def test_fetch_vnics_max_wait(self, m_wait_for_url, m_time): assert m_wait_for_url.call_args_list[-1][1]["max_wait"] == 19 @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) - @mock.patch("cloudinit.url_helper.time.time", side_effect=[0, 1000]) + @mock.patch("cloudinit.url_helper.time.monotonic", side_effect=[0, 1000]) @mock.patch( "cloudinit.sources.DataSourceOracle.wait_for_url", return_value=("http://hi", b'{"some": "value"}'), diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py index 4bcec4fcb..b4d7dbb19 100644 --- a/tests/unittests/sources/test_smartos.py +++ b/tests/unittests/sources/test_smartos.py @@ -5,6 +5,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init + """This is a testcase for the SmartOS datasource. It replicates a serial console and acts like the SmartOS console does in diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py index e16733ad5..a6a805e09 100644 --- a/tests/unittests/sources/test_upcloud.py +++ b/tests/unittests/sources/test_upcloud.py @@ -4,12 +4,14 @@ import json +import pytest + from cloudinit import helpers, importer, settings, sources from cloudinit.sources.DataSourceUpCloud import ( DataSourceUpCloud, DataSourceUpCloudLocal, ) -from tests.unittests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, example_netdev, mock UC_METADATA = json.loads( """ @@ -216,6 +218,7 @@ def get_ds(self, get_sysinfo=_mock_dmi): ds._get_sysinfo = get_sysinfo return ds + @pytest.mark.usefixtures("disable_netdev_info") @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata") @mock.patch("cloudinit.net.find_fallback_nic") @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") @@ -245,6 +248,7 @@ def test_network_configured_metadata( m_net.assert_called_once_with( ds.distro, broadcast="10.6.3.255", + interface_addrs_before_dhcp=example_netdev, interface="eth1", ip="10.6.3.27", prefix_or_mask="22", diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py index 90daf6df8..21a6ff2ee 100644 --- a/tests/unittests/sources/test_vmware.py +++ b/tests/unittests/sources/test_vmware.py @@ -63,6 +63,62 @@ - echo "Hello, world." """ +VMW_IPV4_ROUTEINFO = { + "destination": "0.0.0.0", + "flags": "G", + "gateway": "10.85.130.1", + "genmask": "0.0.0.0", + "iface": "eth0", + "metric": "50", +} +VMW_IPV4_NETDEV_ADDR = { + "bcast": "10.85.130.255", + "ip": "10.85.130.116", + "mask": "255.255.255.0", + "scope": "global", +} +VMW_IPV4_NETIFACES_ADDR = { + "broadcast": "10.85.130.255", + "netmask": "255.255.255.0", + "addr": "10.85.130.116", +} +VMW_IPV6_ROUTEINFO = { + "destination": "::/0", + "flags": "UG", + "gateway": "2001:67c:1562:8007::1", + "iface": "eth0", + "metric": "50", +} +VMW_IPV6_NETDEV_ADDR = { + "ip": "fd42:baa2:3dd:17a:216:3eff:fe16:db54/64", + "scope6": "global", +} +VMW_IPV6_NETIFACES_ADDR = { + "netmask": "ffff:ffff:ffff:ffff::/64", + "addr": "fd42:baa2:3dd:17a:216:3eff:fe16:db54", +} +VMW_IPV6_NETDEV_PEER_ADDR = { + "ip": "fd42:baa2:3dd:17a:216:3eff:fe16:db54", + "scope6": "global", +} +VMW_IPV6_NETIFACES_PEER_ADDR = { + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128", + "addr": "fd42:baa2:3dd:17a:216:3eff:fe16:db54", +} + + +def generate_test_netdev_data(ipv4=None, ipv6=None): + ipv4 = ipv4 or [] + ipv6 = ipv6 or [] + return { + "eth0": { + "hwaddr": "00:16:3e:16:db:54", + "ipv4": ipv4, + "ipv6": ipv6, + "up": True, + }, + } + @pytest.fixture(autouse=True) def common_patches(): @@ -74,8 +130,8 @@ def common_patches(): is_FreeBSD=mock.Mock(return_value=False), ), mock.patch( - "cloudinit.sources.DataSourceVMware.netifaces.interfaces", - return_value=[], + "cloudinit.netinfo.netdev_info", + return_value={}, ), mock.patch( "cloudinit.sources.DataSourceVMware.getfqdn", @@ -108,6 +164,22 @@ def test_no_data_access_method(self): ret = ds.get_data() self.assertFalse(ret) + def test_convert_to_netifaces_ipv4_format(self): + netifaces_format = DataSourceVMware.convert_to_netifaces_ipv4_format( + VMW_IPV4_NETDEV_ADDR + ) + self.assertEqual(netifaces_format, VMW_IPV4_NETIFACES_ADDR) + + def test_convert_to_netifaces_ipv6_format(self): + netifaces_format = DataSourceVMware.convert_to_netifaces_ipv6_format( + VMW_IPV6_NETDEV_ADDR + ) + self.assertEqual(netifaces_format, VMW_IPV6_NETIFACES_ADDR) + netifaces_format = DataSourceVMware.convert_to_netifaces_ipv6_format( + VMW_IPV6_NETDEV_PEER_ADDR + ) + self.assertEqual(netifaces_format, VMW_IPV6_NETIFACES_PEER_ADDR) + @mock.patch("cloudinit.sources.DataSourceVMware.get_default_ip_addrs") def test_get_host_info_ipv4(self, m_fn_ipaddr): m_fn_ipaddr.return_value = ("10.10.10.1", None) @@ -152,6 +224,124 @@ def test_get_host_info_dual(self, m_fn_ipaddr): host_info[DataSourceVMware.LOCAL_IPV6] == "2001:db8::::::8888" ) + # TODO migrate this entire test suite to pytest then parameterize + @mock.patch("cloudinit.netinfo.route_info") + @mock.patch("cloudinit.netinfo.netdev_info") + def test_get_default_ip_addrs_ipv4only( + self, + m_netdev_info, + m_route_info, + ): + """Test get_default_ip_addrs use cases""" + m_route_info.return_value = { + "ipv4": [VMW_IPV4_ROUTEINFO], + "ipv6": [], + } + m_netdev_info.return_value = generate_test_netdev_data( + ipv4=[VMW_IPV4_NETDEV_ADDR] + ) + ipv4, ipv6 = DataSourceVMware.get_default_ip_addrs() + self.assertEqual(ipv4, "10.85.130.116") + self.assertEqual(ipv6, None) + + @mock.patch("cloudinit.netinfo.route_info") + @mock.patch("cloudinit.netinfo.netdev_info") + def test_get_default_ip_addrs_ipv6only( + self, + m_netdev_info, + m_route_info, + ): + m_route_info.return_value = { + "ipv4": [], + "ipv6": [VMW_IPV6_ROUTEINFO], + } + m_netdev_info.return_value = generate_test_netdev_data( + ipv6=[VMW_IPV6_NETDEV_ADDR] + ) + ipv4, ipv6 = DataSourceVMware.get_default_ip_addrs() + self.assertEqual(ipv4, None) + self.assertEqual(ipv6, "fd42:baa2:3dd:17a:216:3eff:fe16:db54/64") + + @mock.patch("cloudinit.netinfo.route_info") + @mock.patch("cloudinit.netinfo.netdev_info") + def test_get_default_ip_addrs_dualstack( + self, + m_netdev_info, + m_route_info, + ): + m_route_info.return_value = { + "ipv4": [VMW_IPV4_ROUTEINFO], + "ipv6": [VMW_IPV6_ROUTEINFO], + } + m_netdev_info.return_value = generate_test_netdev_data( + ipv4=[VMW_IPV4_NETDEV_ADDR], + ipv6=[VMW_IPV6_NETDEV_ADDR], + ) + ipv4, ipv6 = DataSourceVMware.get_default_ip_addrs() + self.assertEqual(ipv4, "10.85.130.116") + self.assertEqual(ipv6, "fd42:baa2:3dd:17a:216:3eff:fe16:db54/64") + + @mock.patch("cloudinit.netinfo.route_info") + @mock.patch("cloudinit.netinfo.netdev_info") + def test_get_default_ip_addrs_multiaddr( + self, + m_netdev_info, + m_route_info, + ): + m_route_info.return_value = { + "ipv4": [VMW_IPV4_ROUTEINFO], + "ipv6": [], + } + m_netdev_info.return_value = generate_test_netdev_data( + ipv4=[ + VMW_IPV4_NETDEV_ADDR, + { + "bcast": "10.85.131.255", + "ip": "10.85.131.117", + "mask": "255.255.255.0", + "scope": "global", + }, + ], + ipv6=[ + VMW_IPV6_NETDEV_ADDR, + { + "ip": "fe80::216:3eff:fe16:db54/64", + "scope6": "link", + }, + ], + ) + ipv4, ipv6 = DataSourceVMware.get_default_ip_addrs() + self.assertEqual(ipv4, None) + self.assertEqual(ipv6, None) + + @mock.patch("cloudinit.netinfo.route_info") + @mock.patch("cloudinit.netinfo.netdev_info") + def test_get_default_ip_addrs_nodefault( + self, + m_netdev_info, + m_route_info, + ): + m_route_info.return_value = { + "ipv4": [ + { + "destination": "185.125.188.0", + "flags": "G", + "gateway": "10.85.130.1", + "genmask": "0.0.0.255", + "iface": "eth0", + "metric": "50", + }, + ], + "ipv6": [], + } + m_netdev_info.return_value = generate_test_netdev_data( + ipv4=[VMW_IPV4_NETDEV_ADDR], + ipv6=[VMW_IPV6_NETDEV_ADDR], + ) + ipv4, ipv6 = DataSourceVMware.get_default_ip_addrs() + self.assertEqual(ipv4, None) + self.assertEqual(ipv6, None) + @mock.patch("cloudinit.sources.DataSourceVMware.get_host_info") def test_wait_on_network(self, m_fn): metadata = { diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py index 7fa02b1c9..e5f1c39ec 100644 --- a/tests/unittests/sources/test_vultr.py +++ b/tests/unittests/sources/test_vultr.py @@ -5,7 +5,7 @@ # Vultr Metadata API: # https://www.vultr.com/metadata/ -import json +import copy from cloudinit import helpers, settings from cloudinit.net.dhcp import NoDHCPLeaseError @@ -13,6 +13,17 @@ from cloudinit.sources.helpers import vultr from tests.unittests.helpers import CiTestCase, mock +VENDOR_DATA = """\ +#cloud-config +package_upgrade: true +disable_root: 0 +ssh_pwauth: 1 +chpasswd: + expire: false + list: + - root:$6$SxXx...k2mJNIzZB5vMCDBlYT1 +""" + # Vultr metadata test data VULTR_V1_1 = { "bgp": { @@ -58,18 +69,7 @@ "startup-script": "echo No configured startup script", "raid1-script": "", "user-data": [], - "vendor-data": [ - { - "package_upgrade": "true", - "disable_root": 0, - "ssh_pwauth": 1, - "chpasswd": { - "expire": False, - "list": ["root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/"], - }, - "system_info": {"default_user": {"name": "root"}}, - } - ], + "vendor-data": VENDOR_DATA, } VULTR_V1_2 = { @@ -130,22 +130,9 @@ "user-defined": [], "startup-script": "echo No configured startup script", "user-data": [], - "vendor-data": [ - { - "package_upgrade": "true", - "disable_root": 0, - "ssh_pwauth": 1, - "chpasswd": { - "expire": False, - "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"], - }, - "system_info": {"default_user": {"name": "root"}}, - } - ], + "vendor-data": VENDOR_DATA, } -VULTR_V1_3 = None - SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"] CLOUD_INTERFACES = { @@ -190,20 +177,6 @@ FILTERED_INTERFACES = ["eth1", "eth2", "eth0"] -# Expected generated objects - -# Expected config -EXPECTED_VULTR_CONFIG = { - "package_upgrade": "true", - "disable_root": 0, - "ssh_pwauth": 1, - "chpasswd": { - "expire": False, - "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"], - }, - "system_info": {"default_user": {"name": "root"}}, -} - # Expected network config object from generator EXPECTED_VULTR_NETWORK_1 = { "version": 1, @@ -271,28 +244,9 @@ } -FINAL_INTERFACE_USED = "" - - class TestDataSourceVultr(CiTestCase): def setUp(self): - global VULTR_V1_3 super(TestDataSourceVultr, self).setUp() - - # Create v3 - VULTR_V1_3 = VULTR_V1_2.copy() - VULTR_V1_3["cloud_interfaces"] = CLOUD_INTERFACES.copy() - VULTR_V1_3["interfaces"] = [] - - # Stored as a dict to make it easier to maintain - raw1 = json.dumps(VULTR_V1_1["vendor-data"][0]) - raw2 = json.dumps(VULTR_V1_2["vendor-data"][0]) - - # Make expected format - VULTR_V1_1["vendor-data"] = [raw1] - VULTR_V1_2["vendor-data"] = [raw2] - VULTR_V1_3["vendor-data"] = [raw2] - self.tmp = self.tmp_dir() # Test the datasource itself @@ -330,8 +284,8 @@ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap): # Test vendor config self.assertEqual( - EXPECTED_VULTR_CONFIG, - json.loads(vendordata[0].replace("#cloud-config", "")), + VENDOR_DATA, + vendordata, ) self.maxDiff = orig_val @@ -339,6 +293,15 @@ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap): # Test network config generation self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + def _get_metadata(self): + # Create v1_3 + vultr_v1_3 = VULTR_V1_2.copy() + vultr_v1_3["cloud_interfaces"] = CLOUD_INTERFACES.copy() + vultr_v1_3["interfaces"] = [] + vultr_v1_3["vendor-data"] = copy.deepcopy(VULTR_V1_2["vendor-data"]) + + return vultr_v1_3 + # Test the datasource with new network config type @mock.patch("cloudinit.net.get_interfaces_by_mac") @mock.patch("cloudinit.sources.helpers.vultr.is_vultr") @@ -346,7 +309,7 @@ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap): def test_datasource_cloud_interfaces( self, mock_getmeta, mock_isvultr, mock_netmap ): - mock_getmeta.return_value = VULTR_V1_3 + mock_getmeta.return_value = self._get_metadata() mock_isvultr.return_value = True mock_netmap.return_value = INTERFACE_MAP @@ -375,7 +338,7 @@ def test_network_config(self, mock_netmap): @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_private_network_config(self, mock_netmap): mock_netmap.return_value = INTERFACE_MAP - interf = VULTR_V1_2["interfaces"].copy() + interf = copy.deepcopy(VULTR_V1_2["interfaces"]) # Test configuring self.assertEqual( @@ -384,27 +347,10 @@ def test_private_network_config(self, mock_netmap): # Test unconfigured interf[1]["unconfigured"] = True - expected = EXPECTED_VULTR_NETWORK_2.copy() + expected = copy.deepcopy(EXPECTED_VULTR_NETWORK_2) expected["config"].pop(2) self.assertEqual(expected, vultr.generate_network_config(interf)) - # Override ephemeral for proper unit testing - def ephemeral_init( - self, distro, iface="", connectivity_url_data=None, tmp_dir=None - ): - global FINAL_INTERFACE_USED - FINAL_INTERFACE_USED = iface - if iface == "eth0": - return - raise NoDHCPLeaseError("Generic for testing") - - # Override ephemeral for proper unit testing - def ephemeral_init_always( - self, iface="", connectivity_url_data=None, tmp_dir=None - ): - global FINAL_INTERFACE_USED - FINAL_INTERFACE_USED = iface - # Override ephemeral for proper unit testing def override_enter(self): return @@ -415,7 +361,8 @@ def override_exit(self, excp_type, excp_value, excp_traceback): # Test interface seeking to ensure we are able to find the correct one @mock.patch( - "cloudinit.net.ephemeral.EphemeralDHCPv4.__init__", ephemeral_init + "cloudinit.net.ephemeral.EphemeralDHCPv4.__init__", + side_effect=(NoDHCPLeaseError("Generic for testing"), None), ) @mock.patch( "cloudinit.net.ephemeral.EphemeralDHCPv4.__enter__", override_enter @@ -431,6 +378,7 @@ def test_interface_seek( mock_interface_list, mock_read_metadata, mock_isvultr, + mock_eph_init, ): mock_read_metadata.return_value = {} mock_isvultr.return_value = True @@ -447,36 +395,4 @@ def test_interface_seek( except Exception: pass - self.assertEqual(FINAL_INTERFACE_USED, INTERFACES[3]) - - # Test route checking sucessful DHCPs - @mock.patch( - "cloudinit.net.ephemeral.EphemeralDHCPv4.__init__", - ephemeral_init_always, - ) - @mock.patch( - "cloudinit.net.ephemeral.EphemeralDHCPv4.__enter__", override_enter - ) - @mock.patch( - "cloudinit.net.ephemeral.EphemeralDHCPv4.__exit__", override_exit - ) - @mock.patch("cloudinit.sources.helpers.vultr.get_interface_list") - @mock.patch("cloudinit.sources.helpers.vultr.is_vultr") - @mock.patch("cloudinit.sources.helpers.vultr.read_metadata") - def test_interface_seek_route_check( - self, mock_read_metadata, mock_isvultr, mock_interface_list - ): - mock_read_metadata.return_value = {} - mock_interface_list.return_value = FILTERED_INTERFACES - mock_isvultr.return_value = True - - source = DataSourceVultr.DataSourceVultr( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) - ) - - try: - source._get_data() - except Exception: - pass - - self.assertEqual(FINAL_INTERFACE_USED, INTERFACES[3]) + assert mock_eph_init.call_args[1]["iface"] == FILTERED_INTERFACES[1] diff --git a/tests/unittests/sources/test_wsl.py b/tests/unittests/sources/test_wsl.py index 9f5c40fcd..31c5c897e 100644 --- a/tests/unittests/sources/test_wsl.py +++ b/tests/unittests/sources/test_wsl.py @@ -4,6 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. import logging +import os from copy import deepcopy from email.mime.multipart import MIMEMultipart from pathlib import PurePath @@ -13,6 +14,7 @@ from cloudinit import util from cloudinit.sources import DataSourceWSL as wsl +from tests.unittests.distros import _get_distro from tests.unittests.helpers import does_not_raise, mock INSTANCE_NAME = "Noble-MLKit" @@ -250,42 +252,53 @@ def join_payloads_from_content_type( class TestWSLDataSource: - @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") - @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") - def test_metadata_id_default(self, m_seed_dir, m_iname, tmpdir, paths): + @pytest.fixture(autouse=True) + def setup(self, mocker, tmpdir): + mocker.patch( + "cloudinit.sources.DataSourceWSL.instance_name", + return_value=INSTANCE_NAME, + ) + mocker.patch( + "cloudinit.sources.DataSourceWSL.find_home", + return_value=PurePath(tmpdir), + ) + mocker.patch( + "cloudinit.sources.DataSourceWSL.subp.which", + return_value="/usr/bin/wslpath", + ) + + def test_metadata_id_default(self, tmpdir, paths): """ Validates that instance-id is properly set, indepedent of the existence of user-data. """ - m_iname.return_value = INSTANCE_NAME - m_seed_dir.return_value = PurePath(tmpdir) ds = wsl.DataSourceWSL( sys_cfg=SAMPLE_CFG, - distro=None, + distro=_get_distro("ubuntu"), paths=paths, ) ds.get_data() assert ds.get_instance_id() == wsl.DEFAULT_INSTANCE_ID - @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") - @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") - def test_metadata_id(self, m_seed_dir, m_iname, tmpdir, paths): + def test_metadata_id(self, tmpdir, paths): """ Validates that instance-id is properly set, indepedent of the existence of user-data. """ - m_iname.return_value = INSTANCE_NAME - m_seed_dir.return_value = PurePath(tmpdir) SAMPLE_ID = "Nice-ID" - tmpdir.join(f"{INSTANCE_NAME}.meta-data").write( + metadata_path = tmpdir.join( + ".cloud-init", f"{INSTANCE_NAME}.meta-data" + ) + metadata_path.dirpath().mkdir() + metadata_path.write( f'{{"instance-id":"{SAMPLE_ID}"}}', ) ds = wsl.DataSourceWSL( sys_cfg=SAMPLE_CFG, - distro=None, + distro=_get_distro("ubuntu"), paths=paths, ) ds.get_data() @@ -293,19 +306,15 @@ def test_metadata_id(self, m_seed_dir, m_iname, tmpdir, paths): assert ds.get_instance_id() == SAMPLE_ID @mock.patch("cloudinit.util.lsb_release") - @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") - @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") - def test_get_data_cc(self, m_seed_dir, m_iname, m_gld, paths, tmpdir): - m_gld.return_value = SAMPLE_LINUX_DISTRO - m_iname.return_value = INSTANCE_NAME - m_seed_dir.return_value = PurePath(tmpdir) - tmpdir.join(f"{INSTANCE_NAME}.user-data").write( - "#cloud-config\nwrite_files:\n- path: /etc/wsl.conf" - ) + def test_get_data_cc(self, m_lsb_release, paths, tmpdir): + m_lsb_release.return_value = SAMPLE_LINUX_DISTRO + data_path = tmpdir.join(".cloud-init", f"{INSTANCE_NAME}.user-data") + data_path.dirpath().mkdir() + data_path.write("#cloud-config\nwrite_files:\n- path: /etc/wsl.conf") ds = wsl.DataSourceWSL( sys_cfg=SAMPLE_CFG, - distro=None, + distro=_get_distro("ubuntu"), paths=paths, ) @@ -320,19 +329,15 @@ def test_get_data_cc(self, m_seed_dir, m_iname, m_gld, paths, tmpdir): assert "wsl.conf" in cast(str, userdata) @mock.patch("cloudinit.util.lsb_release") - @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") - @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") - def test_get_data_sh(self, m_seed_dir, m_iname, m_gld, tmpdir, paths): - m_gld.return_value = SAMPLE_LINUX_DISTRO - m_iname.return_value = INSTANCE_NAME - m_seed_dir.return_value = PurePath(tmpdir) + def test_get_data_sh(self, m_lsb_release, tmpdir, paths): + m_lsb_release.return_value = SAMPLE_LINUX_DISTRO COMMAND = "echo Hello cloud-init on WSL!" - tmpdir.join(f"{INSTANCE_NAME}.user-data").write( - f"#!/bin/sh\n{COMMAND}\n" - ) + data_path = tmpdir.join(".cloud-init", f"{INSTANCE_NAME}.user-data") + data_path.dirpath().mkdir() + data_path.write(f"#!/bin/sh\n{COMMAND}\n") ds = wsl.DataSourceWSL( sys_cfg=SAMPLE_CFG, - distro=None, + distro=_get_distro("ubuntu"), paths=paths, ) @@ -349,31 +354,31 @@ def test_get_data_sh(self, m_seed_dir, m_iname, m_gld, tmpdir, paths): assert COMMAND in userdata @mock.patch("cloudinit.util.get_linux_distro") - @mock.patch("cloudinit.sources.DataSourceWSL.instance_name") - @mock.patch("cloudinit.sources.DataSourceWSL.cloud_init_data_dir") - def test_data_precedence(self, m_seed_dir, m_iname, m_gld, tmpdir, paths): - m_gld.return_value = SAMPLE_LINUX_DISTRO - m_iname.return_value = INSTANCE_NAME - m_seed_dir.return_value = PurePath(tmpdir) + def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + # Set up basic user data: + # This is the most specific: should win over the other user-data files. # Also, notice the file name casing: should be irrelevant. - tmpdir.join("ubuntu-24.04.user-data").write( - "#cloud-config\nwrite_files:\n- path: /etc/wsl.conf" - ) + user_file = tmpdir.join(".cloud-init", "ubuntu-24.04.user-data") + user_file.dirpath().mkdir() + user_file.write("#cloud-config\nwrite_files:\n- path: /etc/wsl.conf") distro_file = tmpdir.join(".cloud-init", "Ubuntu-all.user-data") - distro_file.dirpath().mkdir() distro_file.write("#!/bin/sh\n\necho Hello World\n") generic_file = tmpdir.join(".cloud-init", "default.user-data") generic_file.write("#cloud-config\npackages:\n- g++-13\n") + # Run the datasource ds = wsl.DataSourceWSL( sys_cfg=SAMPLE_CFG, - distro=None, + distro=_get_distro("ubuntu"), paths=paths, ) + # Assert user data is properly loaded assert ds.get_data() is True ud = ds.get_userdata() @@ -394,3 +399,84 @@ def test_data_precedence(self, m_seed_dir, m_iname, m_gld, tmpdir, paths): ) assert "" == shell_script + + # Additionally set up some UP4W agent data: + + # Now the winner should be the merge of the agent and Landscape data. + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + agent_file.write( + """#cloud-config +landscape: + client: + account_name: agenttest +ubuntu_advantage: + token: testtoken""" + ) + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + # Assert agent combines with existing user data + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + assert "wsl.conf" in userdata + assert "packages" not in userdata + assert "ubuntu_advantage" in userdata + assert "landscape" in userdata + assert "agenttest" in userdata + + # Additionally set up some Landscape provided user data + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest +package_update: true""" + ) + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + # Assert Landscape and Agent combine, with Agent taking precedence + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + + assert "wsl.conf" not in userdata + assert "packages" not in userdata + assert "ubuntu_advantage" in userdata + assert "package_update" in userdata, ( + "package_update entry should not be overriden by agent data" + " nor ignored" + ) + assert "landscape" in userdata + assert ( + "landscapetest" not in userdata and "agenttest" in userdata + ), "Landscape account name should have been overriden by agent data" diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 565788fc5..10cacf4b9 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -252,7 +252,9 @@ def test_invalid_content(self, m_read, tmpdir): key = "cloud-config-url" url = "http://example.com/foo" cmdline = "ro %s=%s bar=1" % (key, url) - m_read.return_value = url_helper.StringResponse(b"unexpected blob") + m_read.return_value = url_helper.StringResponse( + b"unexpected blob", "http://example.com/" + ) fpath = tmpdir.join("ccfile") lvl, msg = main.attempt_cmdline_url( @@ -288,7 +290,9 @@ def test_valid_content(self, m_read, tmpdir): payload = b"#cloud-config\nmydata: foo\nbar: wark\n" cmdline = "ro %s=%s bar=1" % ("cloud-config-url", url) - m_read.return_value = url_helper.StringResponse(payload) + m_read.return_value = url_helper.StringResponse( + payload, "http://example.com" + ) fpath = tmpdir.join("ccfile") lvl, msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py index aae2e9f4e..198b71b93 100644 --- a/tests/unittests/test_atomic_helper.py +++ b/tests/unittests/test_atomic_helper.py @@ -5,34 +5,33 @@ import stat from cloudinit import atomic_helper -from tests.unittests.helpers import CiTestCase -class TestAtomicHelper(CiTestCase): - def test_basic_usage(self): +class TestAtomicHelper: + def test_basic_usage(self, tmp_path): """write_file takes bytes if no omode.""" - path = self.tmp_path("test_basic_usage") + path = tmp_path / "test_basic_usage" contents = b"Hey there\n" atomic_helper.write_file(path, contents) self.check_file(path, contents) - def test_string(self): + def test_string(self, tmp_path): """write_file can take a string with mode w.""" - path = self.tmp_path("test_string") + path = tmp_path / "test_string" contents = "Hey there\n" atomic_helper.write_file(path, contents, omode="w") self.check_file(path, contents, omode="r") - def test_file_permissions(self): + def test_file_permissions(self, tmp_path): """write_file with mode 400 works correctly.""" - path = self.tmp_path("test_file_permissions") + path = tmp_path / "test_file_permissions" contents = b"test_file_perms" atomic_helper.write_file(path, contents, mode=0o400) self.check_file(path, contents, perms=0o400) - def test_file_preserve_permissions(self): + def test_file_preserve_permissions(self, tmp_path): """create a file with mode 700, then write_file with mode 644.""" - path = self.tmp_path("test_file_preserve_permissions") + path = tmp_path / "test_file_preserve_permissions" contents = b"test_file_perms" with open(path, mode="wb") as f: f.write(b"test file preserve permissions") @@ -40,26 +39,32 @@ def test_file_preserve_permissions(self): atomic_helper.write_file(path, contents, preserve_mode=True) self.check_file(path, contents, perms=0o700) - def test_write_json(self): + def test_write_json(self, tmp_path): """write_json output is readable json.""" - path = self.tmp_path("test_write_json") + path = tmp_path / "test_write_json" data = {"key1": "value1", "key2": ["i1", "i2"]} atomic_helper.write_json(path, data) with open(path, "r") as fp: found = json.load(fp) - self.assertEqual(data, found) + assert data == found self.check_perms(path, 0o644) def check_file(self, path, content, omode=None, perms=0o644): if omode is None: omode = "rb" - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) with open(path, omode) as fp: found = fp.read() - self.assertEqual(content, found) + assert content == found self.check_perms(path, perms) def check_perms(self, path, perms): file_stat = os.stat(path) - self.assertEqual(perms, stat.S_IMODE(file_stat.st_mode)) + assert perms == stat.S_IMODE(file_stat.st_mode) + + def test_write_file_ensure_dirs(self, tmp_path): + path = tmp_path / "ensure_dirs" / "ensure/dir" + contents = b"Hey there\n" + atomic_helper.write_file(path, contents) + self.check_file(path, contents) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 4f3f3a8e4..95bdf4514 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -5,7 +5,7 @@ import copy import errno import os -from textwrap import dedent +import re import pytest @@ -25,72 +25,65 @@ path_map, ) from cloudinit.settings import PER_ALWAYS, PER_INSTANCE, PER_ONCE -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja +from tests.unittests.helpers import mock, skipUnlessJinja from tests.unittests.util import FakeDataSource INSTANCE_DATA_FILE = "instance-data-sensitive.json" +MPATH = "cloudinit.handlers.jinja_template." -class TestJinjaTemplatePartHandler(CiTestCase): - - with_logs = True - - mpath = "cloudinit.handlers.jinja_template." - - def setUp(self): - super(TestJinjaTemplatePartHandler, self).setUp() - self.tmp = self.tmp_dir() - self.run_dir = os.path.join(self.tmp, "run_dir") - util.ensure_dir(self.run_dir) - self.paths = helpers.Paths( - {"cloud_dir": self.tmp, "run_dir": self.run_dir} +class TestJinjaTemplatePartHandler: + @pytest.fixture + def setup(self, tmp_path): + yield helpers.Paths( + {"cloud_dir": tmp_path, "run_dir": tmp_path / "run_dir"} ) - def test_jinja_template_part_handler_defaults(self): + def test_jinja_template_part_handler_defaults(self, paths): """On init, paths are saved and subhandler types are empty.""" - h = JinjaTemplatePartHandler(self.paths) - self.assertEqual(["## template: jinja"], h.prefixes) - self.assertEqual(3, h.handler_version) - self.assertEqual(self.paths, h.paths) - self.assertEqual({}, h.sub_handlers) - - def test_jinja_template_part_handler_looks_up_sub_handler_types(self): + h = JinjaTemplatePartHandler(paths) + assert ["## template: jinja"] == h.prefixes + assert 3 == h.handler_version + assert paths == h.paths + assert {} == h.sub_handlers + + def test_jinja_template_part_handler_looks_up_sub_handler_types( + self, paths + ): """When sub_handlers are passed, init lists types of subhandlers.""" - script_handler = ShellScriptPartHandler(self.paths) - cloudconfig_handler = CloudConfigPartHandler(self.paths) + script_handler = ShellScriptPartHandler(paths) + cloudconfig_handler = CloudConfigPartHandler(paths) h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler, cloudconfig_handler] - ) - self.assertCountEqual( - [ - "text/cloud-config", - "text/cloud-config-jsonp", - "text/x-shellscript", - ], - h.sub_handlers, + paths, sub_handlers=[script_handler, cloudconfig_handler] ) - - def test_jinja_template_part_handler_looks_up_subhandler_types(self): + expected = [ + "text/cloud-config", + "text/cloud-config-jsonp", + "text/x-shellscript", + ] + assert sorted(expected) == sorted(h.sub_handlers) + + def test_jinja_template_part_handler_looks_up_subhandler_types( + self, paths + ): """When sub_handlers are passed, init lists types of subhandlers.""" - script_handler = ShellScriptPartHandler(self.paths) - cloudconfig_handler = CloudConfigPartHandler(self.paths) + script_handler = ShellScriptPartHandler(paths) + cloudconfig_handler = CloudConfigPartHandler(paths) h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler, cloudconfig_handler] + paths, sub_handlers=[script_handler, cloudconfig_handler] ) - self.assertCountEqual( - [ - "text/cloud-config", - "text/cloud-config-jsonp", - "text/x-shellscript", - ], - h.sub_handlers, - ) - - def test_jinja_template_handle_noop_on_content_signals(self): + expected = [ + "text/cloud-config", + "text/cloud-config-jsonp", + "text/x-shellscript", + ] + assert sorted(expected) == sorted(h.sub_handlers) + + def test_jinja_template_handle_noop_on_content_signals(self, paths): """Perform no part handling when content type is CONTENT_SIGNALS.""" - script_handler = ShellScriptPartHandler(self.paths) + script_handler = ShellScriptPartHandler(paths) - h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) + h = JinjaTemplatePartHandler(paths, sub_handlers=[script_handler]) with mock.patch.object(script_handler, "handle_part") as m_handle_part: h.handle_part( data="data", @@ -103,16 +96,18 @@ def test_jinja_template_handle_noop_on_content_signals(self): m_handle_part.assert_not_called() @skipUnlessJinja() - def test_jinja_template_handle_subhandler_v2_with_clean_payload(self): + def test_jinja_template_handle_subhandler_v2_with_clean_payload( + self, paths + ): """Call version 2 subhandler.handle_part with stripped payload.""" - script_handler = ShellScriptPartHandler(self.paths) - self.assertEqual(2, script_handler.handler_version) + script_handler = ShellScriptPartHandler(paths) + assert 2 == script_handler.handler_version # Create required instance data json file - instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) + instance_json = os.path.join(paths.run_dir, INSTANCE_DATA_FILE) instance_data = {"topkey": "echo himom"} util.write_file(instance_json, atomic_helper.json_dumps(instance_data)) - h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) + h = JinjaTemplatePartHandler(paths, sub_handlers=[script_handler]) with mock.patch.object(script_handler, "handle_part") as m_part: # ctype with leading '!' not in handlers.CONTENT_SIGNALS h.handle_part( @@ -128,18 +123,18 @@ def test_jinja_template_handle_subhandler_v2_with_clean_payload(self): ) @skipUnlessJinja() - def test_jinja_template_handle_subhandler_v3_with_clean_payload(self): + def test_jinja_template_handle_subhandler_v3_with_clean_payload( + self, paths + ): """Call version 3 subhandler.handle_part with stripped payload.""" - cloudcfg_handler = CloudConfigPartHandler(self.paths) - self.assertEqual(3, cloudcfg_handler.handler_version) + cloudcfg_handler = CloudConfigPartHandler(paths) + assert 3 == cloudcfg_handler.handler_version # Create required instance-data.json file - instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) + instance_json = os.path.join(paths.run_dir, INSTANCE_DATA_FILE) instance_data = {"topkey": {"sub": "runcmd: [echo hi]"}} util.write_file(instance_json, atomic_helper.json_dumps(instance_data)) - h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[cloudcfg_handler] - ) + h = JinjaTemplatePartHandler(paths, sub_handlers=[cloudcfg_handler]) with mock.patch.object(cloudcfg_handler, "handle_part") as m_part: # ctype with leading '!' not in handlers.CONTENT_SIGNALS h.handle_part( @@ -159,11 +154,13 @@ def test_jinja_template_handle_subhandler_v3_with_clean_payload(self): "headers", ) - def test_jinja_template_handle_errors_on_missing_instance_data_json(self): + def test_jinja_template_handle_errors_on_missing_instance_data_json( + self, paths + ): """If instance-data is absent, raise an error from handle_part.""" - script_handler = ShellScriptPartHandler(self.paths) - h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) - with self.assertRaises(JinjaLoadError) as context_manager: + script_handler = ShellScriptPartHandler(paths) + h = JinjaTemplatePartHandler(paths, sub_handlers=[script_handler]) + with pytest.raises(JinjaLoadError) as context_manager: h.handle_part( data="data", ctype="!" + handlers.CONTENT_START, @@ -173,24 +170,25 @@ def test_jinja_template_handle_errors_on_missing_instance_data_json(self): headers="headers", ) script_file = os.path.join(script_handler.script_dir, "part01") - self.assertEqual( + assert ( "Cannot render jinja template vars. Instance data not yet present" - " at {}/{}".format(self.run_dir, INSTANCE_DATA_FILE), - str(context_manager.exception), + " at {}/{}".format(paths.run_dir, INSTANCE_DATA_FILE) + == str(context_manager.value) ) - self.assertFalse( - os.path.exists(script_file), - "Unexpected file created %s" % script_file, + assert not os.path.exists(script_file), ( + "Unexpected file created %s" % script_file ) - def test_jinja_template_handle_errors_on_unreadable_instance_data(self): + def test_jinja_template_handle_errors_on_unreadable_instance_data( + self, paths + ): """If instance-data is unreadable, raise an error from handle_part.""" - script_handler = ShellScriptPartHandler(self.paths) - instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) + script_handler = ShellScriptPartHandler(paths) + instance_json = os.path.join(paths.run_dir, INSTANCE_DATA_FILE) util.write_file(instance_json, atomic_helper.json_dumps({})) - h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) - with mock.patch(self.mpath + "load_text_file") as m_load: - with self.assertRaises(JinjaLoadError) as context_manager: + h = JinjaTemplatePartHandler(paths, sub_handlers=[script_handler]) + with mock.patch(MPATH + "load_text_file") as m_load: + with pytest.raises(JinjaLoadError) as context_manager: m_load.side_effect = OSError(errno.EACCES, "Not allowed") h.handle_part( data="data", @@ -201,24 +199,23 @@ def test_jinja_template_handle_errors_on_unreadable_instance_data(self): headers="headers", ) script_file = os.path.join(script_handler.script_dir, "part01") - self.assertEqual( + assert ( "Cannot render jinja template vars. No read permission on " - "'{}/{}'. Try sudo".format(self.run_dir, INSTANCE_DATA_FILE), - str(context_manager.exception), + "'{}/{}'. Try sudo".format(paths.run_dir, INSTANCE_DATA_FILE) + == str(context_manager.value) ) - self.assertFalse( - os.path.exists(script_file), - "Unexpected file created %s" % script_file, + assert not os.path.exists(script_file), ( + "Unexpected file created %s" % script_file ) @skipUnlessJinja() - def test_jinja_template_handle_renders_jinja_content(self): + def test_jinja_template_handle_renders_jinja_content(self, paths, caplog): """When present, render jinja variables from instance data""" - script_handler = ShellScriptPartHandler(self.paths) - instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) + script_handler = ShellScriptPartHandler(paths) + instance_json = os.path.join(paths.run_dir, INSTANCE_DATA_FILE) instance_data = {"topkey": {"subkey": "echo himom"}} util.write_file(instance_json, atomic_helper.json_dumps(instance_data)) - h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) + h = JinjaTemplatePartHandler(paths, sub_handlers=[script_handler]) h.handle_part( data="data", ctype="!" + handlers.CONTENT_START, @@ -232,24 +229,24 @@ def test_jinja_template_handle_renders_jinja_content(self): headers="headers", ) script_file = os.path.join(script_handler.script_dir, "part01") - self.assertNotIn( + assert ( "Instance data not yet present at {}/{}".format( - self.run_dir, INSTANCE_DATA_FILE - ), - self.logs.getvalue(), - ) - self.assertEqual( - "#!/bin/bash\necho himom", util.load_text_file(script_file) + paths.run_dir, INSTANCE_DATA_FILE + ) + not in caplog.text ) + assert "#!/bin/bash\necho himom" == util.load_text_file(script_file) @skipUnlessJinja() - def test_jinja_template_handle_renders_jinja_content_missing_keys(self): + def test_jinja_template_handle_renders_jinja_content_missing_keys( + self, paths, caplog + ): """When specified jinja variable is undefined, log a warning.""" - script_handler = ShellScriptPartHandler(self.paths) - instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) + script_handler = ShellScriptPartHandler(paths) + instance_json = os.path.join(paths.run_dir, INSTANCE_DATA_FILE) instance_data = {"topkey": {"subkey": "echo himom"}} util.write_file(instance_json, atomic_helper.json_dumps(instance_data)) - h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) + h = JinjaTemplatePartHandler(paths, sub_handlers=[script_handler]) h.handle_part( data="data", ctype="!" + handlers.CONTENT_START, @@ -259,14 +256,12 @@ def test_jinja_template_handle_renders_jinja_content_missing_keys(self): headers="headers", ) script_file = os.path.join(script_handler.script_dir, "part01") - self.assertTrue( - os.path.exists(script_file), - "Missing expected file %s" % script_file, + assert os.path.exists(script_file), ( + "Missing expected file %s" % script_file ) - self.assertIn( - "WARNING: Could not render jinja template variables in file" - " 'part01': 'goodtry'\n", - self.logs.getvalue(), + assert ( + "Could not render jinja template variables in file" + " 'part01': 'goodtry'\n" in caplog.text ) @@ -353,59 +348,50 @@ def test_convert_instance_data_decodes_decode_paths(self): assert expected_data == converted_data -class TestRenderJinjaPayload(CiTestCase): - - with_logs = True - +class TestRenderJinjaPayload: @skipUnlessJinja() - def test_render_jinja_payload_logs_jinja_vars_on_debug(self): + def test_render_jinja_payload_logs_jinja_vars_on_debug(self, caplog): """When debug is True, log jinja varables available.""" payload = ( "## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}" ) instance_data = {"v1": {"hostname": "foo"}, "instance-id": "iid"} - expected_log = dedent( - """\ - DEBUG: Converted jinja variables - { - "hostname": "foo", - "instance-id": "iid", - "instance_id": "iid", - "v1": { - "hostname": "foo" - } - } - """ - ) - self.assertEqual( + assert ( render_jinja_payload( payload=payload, payload_fn="myfile", instance_data=instance_data, debug=True, - ), - "#!/bin/sh\necho hi from foo", + ) + == "#!/bin/sh\necho hi from foo" + ) + expected_log = ( + '.*Converted jinja variables\\n.*{\\n.*"hostname": "foo",\\n.*' + '"instance-id": "iid",\\n.*"instance_id": "iid",\\n.*' + '"v1": {\\n.*"hostname": "foo"\\n.*}' ) - self.assertEqual(expected_log, self.logs.getvalue()) + assert re.match(expected_log, caplog.text, re.DOTALL) @skipUnlessJinja() - def test_render_jinja_payload_replaces_missing_variables_and_warns(self): + def test_render_jinja_payload_replaces_missing_variables_and_warns( + self, caplog + ): """Warn on missing jinja variables and replace the absent variable.""" payload = "## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}" instance_data = {"v1": {"hostname": "foo"}, "instance-id": "iid"} - self.assertEqual( + assert ( render_jinja_payload( payload=payload, payload_fn="myfile", instance_data=instance_data, - ), - "#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE", + ) + == "#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE" ) expected_log = ( - "WARNING: Could not render jinja template variables in file" + "Could not render jinja template variables in file" " 'myfile': 'NOTHERE'" ) - self.assertIn(expected_log, self.logs.getvalue()) + assert expected_log in caplog.text class TestShellScriptByFrequencyHandlers: diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 247624502..3a92d29e2 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -2,8 +2,10 @@ import contextlib import io +import json import logging import os +import sys from collections import namedtuple import pytest @@ -15,14 +17,29 @@ mock = test_helpers.mock M_PATH = "cloudinit.cmd.main." - - -@pytest.fixture(autouse=False) -def mock_get_user_data_file(mocker, tmpdir): - yield mocker.patch( - "cloudinit.cmd.devel.logs._get_user_data_file", - return_value=tmpdir.join("cloud"), - ) +Tmpdir = namedtuple("Tmpdir", ["tmpdir", "link_d", "data_d"]) +FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"]) + + +@pytest.fixture(autouse=True, scope="module") +def disable_setup_logging(): + # setup_basic_logging can change the logging level to WARNING, so + # ensure it is always mocked + with mock.patch(f"{M_PATH}log.setup_basic_logging", autospec=True): + yield + + +@pytest.fixture() +def mock_status_wrapper(mocker, tmpdir): + link_d = os.path.join(tmpdir, "link") + data_d = os.path.join(tmpdir, "data") + with mocker.patch( + "cloudinit.cmd.main.read_cfg_paths", + return_value=mock.Mock(get_cpath=lambda _: data_d), + ), mocker.patch( + "cloudinit.cmd.main.os.path.normpath", return_value=link_d + ): + yield Tmpdir(tmpdir, link_d, data_d) class TestCLI: @@ -51,43 +68,40 @@ def _call_main(self, sysv_args=None): ), ], ) - def test_status_wrapper_errors(self, action, name, match, caplog, tmpdir): - data_d = tmpdir.join("data") - link_d = tmpdir.join("link") - FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"]) + def test_status_wrapper_errors( + self, action, name, match, caplog, mock_status_wrapper + ): my_action = mock.Mock() myargs = FakeArgs((action, my_action), False, "bogusmode") with pytest.raises(ValueError, match=match): - cli.status_wrapper(name, myargs, data_d, link_d) + cli.status_wrapper(name, myargs) assert [] == my_action.call_args_list @mock.patch("cloudinit.cmd.main.atomic_helper.write_json") def test_status_wrapper_init_local_writes_fresh_status_info( self, m_json, - tmpdir, + mock_status_wrapper, ): """When running in init-local mode, status_wrapper writes status.json. Old status and results artifacts are also removed. """ - data_d = tmpdir.join("data") - link_d = tmpdir.join("link") + data_d = mock_status_wrapper.data_d + link_d = mock_status_wrapper.link_d # Write old artifacts which will be removed or updated. for _dir in data_d, link_d: test_helpers.populate_dir( str(_dir), {"status.json": "old", "result.json": "old"} ) - FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"]) - def myaction(name, args): # Return an error to watch status capture them return "SomeDatasource", ["an error"] myargs = FakeArgs(("ignored_name", myaction), True, "bogusmode") - cli.status_wrapper("init", myargs, data_d, link_d) + cli.status_wrapper("init", myargs) # No errors reported in status status_v1 = m_json.call_args_list[1][0][1]["v1"] assert status_v1.keys() == { @@ -109,23 +123,23 @@ def myaction(name, args): @mock.patch("cloudinit.cmd.main.atomic_helper.write_json") def test_status_wrapper_init_local_honor_cloud_dir( - self, m_json, mocker, tmpdir + self, m_json, mocker, mock_status_wrapper ): """When running in init-local mode, status_wrapper honors cloud_dir.""" - cloud_dir = tmpdir.join("cloud") + cloud_dir = mock_status_wrapper.tmpdir.join("cloud") paths = helpers.Paths({"cloud_dir": str(cloud_dir)}) - mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) - data_d = cloud_dir.join("data") - link_d = tmpdir.join("link") - - FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"]) + mocker.patch( + "cloudinit.config.schema.read_cfg_paths", return_value=paths + ) + data_d = mock_status_wrapper.data_d + link_d = mock_status_wrapper.link_d def myaction(name, args): # Return an error to watch status capture them return "SomeDatasource", ["an_error"] myargs = FakeArgs(("ignored_name", myaction), True, "bogusmode") - cli.status_wrapper("init", myargs, link_d=link_d) # No explicit data_d + cli.status_wrapper("init", myargs) # No explicit data_d # Access cloud_dir directly status_v1 = m_json.call_args_list[1][0][1]["v1"] @@ -193,7 +207,7 @@ def test_all_subcommands_represented_in_help(self, capsys): ), ), ) - @mock.patch("cloudinit.cmd.main.setup_basic_logging") + @mock.patch("cloudinit.cmd.main.log.setup_basic_logging") def test_subcommands_log_to_stderr_via_setup_basic_logging( self, setup_basic_logging, subcommand, log_to_stderr, mocks ): @@ -233,9 +247,12 @@ def test_modules_subcommand_parser(self, m_status_wrapper, subcommand): "schema", ], ) - @mock.patch("cloudinit.stages.Init._read_cfg", return_value={}) def test_conditional_subcommands_from_entry_point_sys_argv( - self, m_read_cfg, subcommand, capsys, mock_get_user_data_file, tmpdir + self, + subcommand, + capsys, + m_log_paths, + mock_status_wrapper, ): """Subcommands from entry-point are properly parsed from sys.argv.""" expected_error = f"usage: cloud-init {subcommand}" @@ -256,7 +273,9 @@ def test_conditional_subcommands_from_entry_point_sys_argv( "status", ], ) - def test_subcommand_parser(self, subcommand, mock_get_user_data_file): + def test_subcommand_parser( + self, subcommand, m_log_paths, mock_status_wrapper + ): """cloud-init `subcommand` calls its subparser.""" # Provide -h param to `subcommand` to avoid having to mock behavior. out = io.StringIO() @@ -300,16 +319,15 @@ def test_wb_schema_subcommand_parser(self, m_read_cfg, capsys): ["all"], [ "**Supported distros:** all", - "**Supported distros:** almalinux, alpine, centos, " - "cloudlinux, cos, debian, eurolinux, fedora, freebsd, " - "mariner, miraclelinux, " - "openbsd, openeuler, OpenCloudOS, openmandriva, " - "opensuse, opensuse-microos, opensuse-tumbleweed, " - "opensuse-leap, photon, rhel, rocky, sle_hpc, " - "sle-micro, sles, TencentOS, ubuntu, virtuozzo", + "**Supported distros:** almalinux, alpine, azurelinux, " + "centos, cloudlinux, cos, debian, eurolinux, fedora, " + "freebsd, mariner, miraclelinux, openbsd, openeuler, " + "OpenCloudOS, openmandriva, opensuse, opensuse-microos, " + "opensuse-tumbleweed, opensuse-leap, photon, rhel, rocky, " + "sle_hpc, sle-micro, sles, TencentOS, ubuntu, virtuozzo", " **resize_rootfs:** ", "(``true``/``false``/``noblock``)", - "runcmd:\n - [ ls, -l, / ]\n", + "runcmd:\n - [ls, -l, /]\n", ], False, id="all_spot_check", @@ -342,7 +360,13 @@ def test_wb_schema_subcommand_parser(self, m_read_cfg, capsys): ) @mock.patch("cloudinit.stages.Init._read_cfg", return_value={}) def test_wb_schema_subcommand( - self, m_read_cfg, args, expected_doc_sections, is_error + self, + m_read_cfg, + args, + expected_doc_sections, + is_error, + mocker, + request, ): """Validate that doc content has correct values.""" @@ -356,6 +380,12 @@ def test_wb_schema_subcommand( if is_error else contextlib.redirect_stdout ) + paths = helpers.Paths( + {"docs_dir": os.path.join(request.config.rootdir, "doc")} + ) + mocker.patch( + "cloudinit.config.schema.read_cfg_paths", return_value=paths + ) with redirecter(out_or_err): self._call_main(["cloud-init", "schema", "--docs"] + args) out_or_err = out_or_err.getvalue() @@ -386,3 +416,136 @@ def test_features_hook_subcommand(self, m_features): assert "features" == parseargs.action[0] assert False is parseargs.debug assert False is parseargs.force + + +class TestSignalHandling: + @mock.patch("cloudinit.cmd.main.atomic_helper.write_json") + def test_status_wrapper_signal_sys_exit( + self, + m_json, + mocker, + mock_status_wrapper, + ): + """make sure that when sys.exit(N) is called, the correct code is + returned + """ + for code in [1, 2, 3, 4]: + rc = cli.status_wrapper( + "init", + FakeArgs( + ( + None, + # silence pylint false positive + # https://github.com/pylint-dev/pylint/issues/9557 + lambda *_: sys.exit(code), # pylint: disable=W0640 + ), + False, + "bogusmode", + ), + ) + assert 1 == rc + + # assert that the status shows errors + assert ( + f"sys.exit({code}) called" + in m_json.call_args[0][1]["v1"]["init"]["errors"] + ) + + @mock.patch("cloudinit.cmd.main.atomic_helper.write_json") + def test_status_wrapper_no_signal_sys_exit( + self, + m_json, + mock_status_wrapper, + ): + """if sys.exit(0) is called, make sure that cloud-init doesn't log a + warning""" + # call status_wrapper() with the required args + rc = cli.status_wrapper( + "init", + FakeArgs( + ( + None, + lambda *_: sys.exit(0), + ), + False, + "bogusmode", + ), + ) + assert 0 == rc + assert not m_json.call_args[0][1]["v1"]["init"]["errors"] + + @mock.patch("cloudinit.cmd.main.atomic_helper.write_json") + def test_status_wrapper_signal_warnings( + self, + m_json, + mock_status_wrapper, + ): + """If a stage is started and status.json already has a start time but + no end time for that stage, this is an unknown state - make sure that + a warning is logged. + """ + + # Write a status.json to the mocked temporary directory + for dir in mock_status_wrapper.data_d, mock_status_wrapper.link_d: + test_helpers.populate_dir( + str(dir), + { + "status.json": json.dumps( + { + "v1": { + "stage": "init", + "datasource": ( + "DataSourceNoCloud " + "[seed=/var/.../seed/nocloud-net]" + "[dsmode=net]" + ), + "init": { + "errors": [], + "recoverable_errors": {}, + "start": 124.567, + "finished": None, + }, + "init-local": { + "errors": [], + "recoverable_errors": {}, + "start": 100.0, + "finished": 100.00001, + }, + "modules-config": { + "errors": [], + "recoverable_errors": {}, + "start": None, + "finished": None, + }, + "modules-final": { + "errors": [], + "recoverable_errors": {}, + "start": None, + "finished": None, + }, + } + } + ) + }, + ) + # call status_wrapper() with the required args + cli.status_wrapper( + "init", + FakeArgs( + ( + None, + lambda *_: ("SomeDataSource", []), + ), + False, + "bogusmode", + ), + ) + + # assert that the status shows recoverable errors + assert ( + "Unexpected start time found for Network Stage. " + "Was this stage restarted?" + in m_json.call_args[0][1]["v1"]["init"]["recoverable_errors"][ + "WARNING" + ] + ) diff --git a/tests/unittests/test_conftest.py b/tests/unittests/test_conftest.py index e9f7a4324..d1a4be23f 100644 --- a/tests/unittests/test_conftest.py +++ b/tests/unittests/test_conftest.py @@ -38,23 +38,27 @@ def test_subp_usage_can_be_conditionally_reenabled(self): assert "allowed: whoami" in str(excinfo.value) subp.subp(["whoami"]) - @pytest.mark.allow_subp_for("whoami", "bash") + @pytest.mark.allow_subp_for("whoami", "sh") def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self): with pytest.raises(UnexpectedSubpError) as excinfo: subp.subp(["some", "args"]) - assert "allowed: whoami,bash" in str(excinfo.value) - subp.subp(["bash", "-c", "true"]) + assert "allowed: whoami,sh" in str(excinfo.value) + subp.subp(["sh", "-c", "true"]) subp.subp(["whoami"]) @pytest.mark.allow_all_subp - @pytest.mark.allow_subp_for("bash") + @pytest.mark.allow_subp_for("sh") def test_both_marks_raise_an_error(self): with pytest.raises(UnexpectedSubpError, match="marked both"): - subp.subp(["bash"]) + subp.subp(["sh"]) class TestDisableSubpUsageInTestSubclass(CiTestCase): - """Test that disable_subp_usage doesn't impact CiTestCase's subp logic.""" + """Test that disable_subp_usage doesn't impact CiTestCase's subp logic. + + Once the rest of the CiTestCase tests are removed, this class + should be removed as well. + """ def test_using_subp_raises_exception(self): with pytest.raises(Exception): @@ -68,6 +72,6 @@ def test_subp_usage_can_be_reenabled(self): _old_allowed_subp = self.allow_subp self.allowed_subp = True try: - subp.subp(["bash", "-c", "true"]) + subp.subp(["sh", "-c", "true"]) finally: self.allowed_subp = _old_allowed_subp diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 823aab58a..14be6fa48 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -23,7 +23,7 @@ from cloudinit import user_data as ud from cloudinit import util from cloudinit.config.modules import Modules -from cloudinit.settings import PER_INSTANCE +from cloudinit.settings import DEFAULT_RUN_DIR, PER_INSTANCE from tests.unittests import helpers from tests.unittests.util import FakeDataSource @@ -482,6 +482,7 @@ def test_mime_text_plain(self, init_tmp, caplog): ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES=True, EXPIRE_APPLIES_TO_HASHED_USERS=False, NETPLAN_CONFIG_ROOT_READ_ONLY=True, + DEPRECATION_INFO_BOUNDARY="devel", NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH=False, APT_DEB822_SOURCE_LIST_FILE=True, ) @@ -513,6 +514,7 @@ def test_shellscript(self, init_tmp, tmpdir, caplog): "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES": True, "EXPIRE_APPLIES_TO_HASHED_USERS": False, "NETPLAN_CONFIG_ROOT_READ_ONLY": True, + "DEPRECATION_INFO_BOUNDARY": "devel", "NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH": False, "APT_DEB822_SOURCE_LIST_FILE": True, }, @@ -826,7 +828,7 @@ def mocks(self, mocker): def test_only_builtin_gets_builtin(self, mocker): mocker.patch(f"{MPATH}.read_runtime_config", return_value={}) mocker.patch(f"{MPATH}.util.read_conf_with_confd") - config = stages.fetch_base_config() + config = stages.fetch_base_config(DEFAULT_RUN_DIR) assert util.get_builtin_cfg() == config def test_conf_d_overrides_defaults(self, mocker): @@ -839,7 +841,7 @@ def test_conf_d_overrides_defaults(self, mocker): return_value={test_key: test_value}, ) mocker.patch(f"{MPATH}.read_runtime_config", return_value={}) - config = stages.fetch_base_config() + config = stages.fetch_base_config(DEFAULT_RUN_DIR) assert config.get(test_key) == test_value builtin[test_key] = test_value assert config == builtin @@ -853,7 +855,7 @@ def test_confd_with_template(self, mocker, tmp_path: Path): mocker.patch("cloudinit.stages.CLOUD_CONFIG", cfg_path) mocker.patch(f"{MPATH}.util.get_builtin_cfg", return_value={}) config = stages.fetch_base_config( - instance_data_file=instance_data_path + DEFAULT_RUN_DIR, instance_data_file=instance_data_path ) assert config == {"key": "template_value"} @@ -869,7 +871,7 @@ def test_cmdline_overrides_defaults(self, mocker): return_value=cmdline, ) mocker.patch(f"{MPATH}.read_runtime_config") - config = stages.fetch_base_config() + config = stages.fetch_base_config(DEFAULT_RUN_DIR) assert config.get(test_key) == test_value builtin[test_key] = test_value assert config == builtin @@ -888,7 +890,7 @@ def test_cmdline_overrides_confd_runtime_and_defaults(self, mocker): return_value=cmdline, ) - config = stages.fetch_base_config() + config = stages.fetch_base_config(DEFAULT_RUN_DIR) assert config == {"key1": "value1", "key2": "other2", "key3": "other3"} def test_order_precedence_is_builtin_system_runtime_cmdline(self, mocker): @@ -905,7 +907,7 @@ def test_order_precedence_is_builtin_system_runtime_cmdline(self, mocker): ) mocker.patch(f"{MPATH}.read_runtime_config", return_value=runtime) - config = stages.fetch_base_config() + config = stages.fetch_base_config(DEFAULT_RUN_DIR) assert config == { "key1": "cmdline1", diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 105825b69..e71e853f3 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -3,13 +3,16 @@ import copy import os from collections import namedtuple +from logging import getLogger from pathlib import Path +from tempfile import mkdtemp from textwrap import dedent from uuid import uuid4 import pytest +import yaml -from cloudinit import atomic_helper, safeyaml, subp, util +from cloudinit import atomic_helper, subp, util from cloudinit.sources import DataSourceIBMCloud as ds_ibm from cloudinit.sources import DataSourceOracle as ds_oracle from cloudinit.sources import DataSourceSmartOS as ds_smartos @@ -21,6 +24,8 @@ populate_dir_with_ts, ) +LOG = getLogger(__name__) + UNAME_MYSYS = "Linux #83-Ubuntu SMP Wed Jan 18 14:10:15 UTC 2017 x86_64" UNAME_PPC64EL = ( "Linux #106-Ubuntu SMP mon Jun 26 17:53:54 UTC 2017 " @@ -62,6 +67,146 @@ ] +DEFAULT_CLOUD_CONFIG = """\ +# The top level settings are used as module +# and base configuration. +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below +users: + - default + +# If this is set, 'root' will not be able to ssh in and they +# will get a message to login instead as the default $user +disable_root: true + +# This will cause the set+update hostname module to not operate (if true) +preserve_hostname: false + +# If you use datasource_list array, keep array items in a single line. +# If you use multi line array, ds-identify script won't read array items. +# Example datasource config +# datasource: +# Ec2: +# metadata_urls: [ 'blah.com' ] +# timeout: 5 # (defaults to 50 seconds) +# max_wait: 10 # (defaults to 120 seconds) + +# The modules that run in the 'init' stage +cloud_init_modules: + - migrator + - seed_random + - bootcmd + - write-files + - growpart + - resizefs + - disk_setup + - mounts + - set_hostname + - update_hostname + - update_etc_hosts + - ca-certs + - rsyslog + - users-groups + - ssh + +# The modules that run in the 'config' stage +cloud_config_modules: + - wireguard + - snap + - ubuntu_autoinstall + - ssh-import-id + - keyboard + - locale + - set-passwords + - grub-dpkg + - apt-pipelining + - apt-configure + - ubuntu-advantage + - ntp + - timezone + - disable-ec2-metadata + - runcmd + - byobu + +# The modules that run in the 'final' stage +cloud_final_modules: + - package-update-upgrade-install + - fan + - landscape + - lxd + - ubuntu-drivers + - write-files-deferred + - puppet + - chef + - ansible + - mcollective + - salt-minion + - reset_rmc + - refresh_rmc_and_interface + - rightscale_userdata + - scripts-vendor + - scripts-per-once + - scripts-per-boot + - scripts-per-instance + - scripts-user + - ssh-authkey-fingerprints + - keys-to-console + - install-hotplug + - phone-home + - final-message + - power-state-change + +# System and/or distro specific settings +# (not accessible to handlers/transforms) +system_info: + # This will affect which distro class gets used + distro: ubuntu + # Default user name + that default users groups (if added/used) + default_user: + name: ubuntu + lock_passwd: True + gecos: Ubuntu + groups: [adm, audio, cdrom, floppy, lxd, netdev, plugdev, sudo, video] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + network: + renderers: ['netplan', 'eni', 'sysconfig'] + activators: ['netplan', 'eni', 'network-manager', 'networkd'] + # Automatically discover the best ntp_client + ntp_client: auto + # Other config here will be given to the distro class and/or path classes + paths: + cloud_dir: /var/lib/cloud/ + templates_dir: /etc/cloud/templates/ + package_mirrors: + - arches: [i386, amd64] + failsafe: + primary: http://archive.ubuntu.com/ubuntu + security: http://security.ubuntu.com/ubuntu + search: + primary: + - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ + - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ + security: [] + - arches: [arm64, armel, armhf] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + search: + primary: + - http://%(ec2_region)s.ec2.ports.ubuntu.com/ubuntu-ports/ + - http://%(availability_zone)s.clouds.ports.ubuntu.com/ubuntu-ports/ + - http://%(region)s.clouds.ports.ubuntu.com/ubuntu-ports/ + security: [] + - arches: [default] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + ssh_svcname: ssh +""" + POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled" POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=disabled" @@ -99,16 +244,22 @@ "RET": "container-other", "ret": 0, } +IS_CONTAINER_OTHER_ENV = {"SYSTEMD_VIRTUALIZATION": "vm:kvm"} MOCK_NOT_LXD_DATASOURCE = {"name": "dscheck_LXD", "ret": 1} MOCK_VIRT_IS_KVM = {"name": "detect_virt", "RET": "kvm", "ret": 0} +KVM_ENV = {"SYSTEMD_VIRTUALIZATION": "vm:kvm"} # qemu support for LXD is only for host systems > 5.10 kernel as lxd # passed `hv_passthrough` which causes systemd < v.251 to misinterpret CPU # as "qemu" instead of "kvm" MOCK_VIRT_IS_KVM_QEMU = {"name": "detect_virt", "RET": "qemu", "ret": 0} +IS_KVM_QEMU_ENV = {"SYSTEMD_VIRTUALIZATION": "vm:qemu"} MOCK_VIRT_IS_VMWARE = {"name": "detect_virt", "RET": "vmware", "ret": 0} +IS_VMWARE_ENV = {"SYSTEMD_VIRTUALIZATION": "vm:vmware"} # currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt. MOCK_VIRT_IS_VM_OTHER = {"name": "detect_virt", "RET": "vm-other", "ret": 0} +IS_VM_OTHER = {"SYSTEMD_VIRTUALIZATION": "vm:vm-other"} MOCK_VIRT_IS_XEN = {"name": "detect_virt", "RET": "xen", "ret": 0} +IS_XEN_ENV = {"SYSTEMD_VIRTUALIZATION": "vm:xen"} MOCK_VIRT_IS_WSL = {"name": "detect_virt", "RET": "wsl", "ret": 0} MOCK_UNAME_IS_PPC64 = {"name": "uname", "out": UNAME_PPC64EL, "ret": 0} MOCK_UNAME_IS_FREEBSD = {"name": "uname", "out": UNAME_FREEBSD, "ret": 0} @@ -153,6 +304,9 @@ class DsIdentifyBase(CiTestCase): dsid_path = cloud_init_project_dir("tools/ds-identify") allowed_subp = ["sh"] + # set to true to write out the mocked ds-identify for inspection + debug_mode = True + def call( self, rootd=None, @@ -164,6 +318,7 @@ def call( policy_dmi=DI_DEFAULT_POLICY, policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI, ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT, + env_vars=None, ): if args is None: args = [] @@ -173,6 +328,10 @@ def call( if files is None: files = {} + cloudcfg = "etc/cloud/cloud.cfg" + if cloudcfg not in files: + files[cloudcfg] = DEFAULT_CLOUD_CONFIG + if rootd is None: rootd = self.tmp_dir() @@ -251,12 +410,26 @@ def write_mock(data): endlines = [func + " " + " ".join(['"%s"' % s for s in args])] + mocked_ds_identify = "\n".join(head + mocklines + endlines) + "\n" with open(wrap, "w") as fp: - fp.write("\n".join(head + mocklines + endlines) + "\n") + fp.write(mocked_ds_identify) + + # debug_mode force this test to write the mocked ds-identify script to + # a file for inspection + if self.debug_mode: + tempdir = mkdtemp() + dir = f"{tempdir}/ds-identify" + LOG.debug("Writing mocked ds-identify to %s for debugging.", dir) + with open(dir, "w") as fp: + fp.write(mocked_ds_identify) rc = 0 try: - out, err = subp.subp(["sh", "-c", ". %s" % wrap], capture=True) + out, err = subp.subp( + ["sh", "-c", ". %s" % wrap], + update_env=env_vars if env_vars else {}, + capture=True, + ) except subp.ProcessExecutionError as e: rc = e.exit_code out = e.stdout @@ -267,7 +440,7 @@ def write_mock(data): if os.path.exists(cfg_out): contents = util.load_text_file(cfg_out) try: - cfg = safeyaml.load(contents) + cfg = yaml.safe_load(contents) except Exception as e: cfg = {"_INVALID_YAML": contents, "_EXCEPTION": str(e)} @@ -281,6 +454,7 @@ def _call_via_dict(self, data, rootd=None, **kwargs): "mocks", "func", "args", + "env_vars", "policy_dmi", "policy_no_dmi", "files", @@ -294,8 +468,9 @@ def _call_via_dict(self, data, rootd=None, **kwargs): def _test_ds_found(self, name): data = copy.deepcopy(VALID_CFG[name]) + return self._check_via_dict( - data, RC_FOUND, dslist=[data.get("ds"), DS_NONE] + data, RC_FOUND, dslist=[data.pop("ds"), DS_NONE] ) def _test_ds_not_found(self, name): @@ -308,7 +483,7 @@ def _check_via_dict(self, data, rc, dslist=None, **kwargs): try: self.assertEqual(rc, ret.rc) if dslist is not None: - self.assertEqual(dslist, ret.cfg["datasource_list"]) + self.assertEqual(dslist, ret.cfg.get("datasource_list")) good = True finally: if not good: @@ -378,6 +553,7 @@ def test_maas_not_detected_2(self): config = "LXD-kvm-not-MAAS-2" self._test_ds_found(config) + @pytest.mark.xfail(reason="GH-4796") def test_maas_not_detected_3(self): """Don't incorrectly identify maas @@ -395,6 +571,21 @@ def test_maas_not_detected_3(self): config = "LXD-kvm-not-MAAS-3" self._test_ds_found(config) + def test_flow_sequence_control(self): + """ensure that an invalid key in the flow_sequence tests produces no + datasource list match + + control test: this test serves as a control test for test_flow_sequence + """ + data = copy.deepcopy(VALID_CFG["flow_sequence-control"]) + self._check_via_dict(data, RC_NOT_FOUND) + + def test_flow_sequence(self): + """correctly identify flow sequences""" + for i in range(1, 10): + data = copy.deepcopy(VALID_CFG[f"flow_sequence-{i}"]) + self._check_via_dict(data, RC_FOUND, dslist=[data.get("ds")]) + def test_azure_invalid_configuration(self): """Don't detect incorrect config when invalid datasource_list provided @@ -419,6 +610,20 @@ def test_aws_ec2_hvm(self): """EC2: hvm instances use dmi serial and uuid starting with 'ec2'.""" self._test_ds_found("Ec2-hvm") + def test_aws_ec2_hvm_env(self): + """EC2: hvm instances use dmi serial and uuid starting with 'ec2' + + test using SYSTEMD_VIRTUALIZATION, not systemd-detect-virt + """ + self._test_ds_found("Ec2-hvm-env") + + def test_aws_ec2_hvm_endian(self): + """EC2: hvm instances use system-uuid and may have swapped endianness + + test using SYSTEMD_VIRTUALIZATION, not systemd-detect-virt + """ + self._test_ds_found("Ec2-hvm-swap-endianness") + def test_aws_ec2_xen(self): """EC2: sys/hypervisor/uuid starts with ec2.""" self._test_ds_found("Ec2-xen") @@ -439,6 +644,13 @@ def test_gce_by_product_name(self): """GCE identifies itself with product_name.""" self._test_ds_found("GCE") + def test_gce_by_product_name_env(self): + """GCE identifies itself with product_name. + + Uses SYSTEMD_VIRTUALIZATION + """ + self._test_ds_found("GCE_ENV") + def test_gce_by_serial(self): """Older gce compute instances must be identified by serial.""" self._test_ds_found("GCE-serial") @@ -461,6 +673,20 @@ def test_lxd_kvm_jammy(self): """ self._test_ds_found("LXD-kvm-qemu-kernel-gt-5.10") + def test_lxd_kvm_jammy_env(self): + """LXD KVM on host systems with a kernel > 5.10 need to match "qemu". + LXD provides `hv_passthrough` when launching kvm instances when host + kernel is > 5.10. This results in systemd being unable to detect the + virtualized CPUID="Linux KVM Hv" as type "kvm" and results in + systemd-detect-virt returning "qemu" in this case. + + Assert ds-identify can match systemd-detect-virt="qemu" and + /sys/class/dmi/id/board_name = LXD. + Once systemd 251 is available on a target distro, the virtualized + CPUID will be represented properly as "kvm" + """ + self._test_ds_found("LXD-kvm-qemu-kernel-gt-5.10-env") + def test_lxd_containers(self): """LXD containers will have /dev/lxd/socket at generator time.""" self._test_ds_found("LXD") @@ -509,6 +735,13 @@ def test_config_drive_interacts_with_ibmcloud_config_disk(self): ret.cfg.get("datasource_list"), ["ConfigDrive", "None"] ) + @pytest.mark.xfail( + reason=("not supported: yaml parser implemented in POSIX shell") + ) + def test_multiline_yaml(self): + """Multi-line yaml is unsupported""" + self._test_ds_found("LXD-kvm-not-azure") + def test_ibmcloud_template_userdata_in_provisioning(self): """Template provisioned with user-data during provisioning stage. @@ -661,6 +894,10 @@ def test_openstack_sap_ccloud(self): """SAP Converged Cloud identification""" self._test_ds_found("OpenStack-SAPCCloud") + def test_openstack_sap_ccloud_env(self): + """SAP Converged Cloud identification""" + self._test_ds_found("OpenStack-SAPCCloud-env") + def test_openstack_huawei_cloud(self): """Open Huawei Cloud identification.""" self._test_ds_found("OpenStack-HuaweiCloud") @@ -677,7 +914,7 @@ def test_openstack_on_non_intel_is_maybe(self): """On non-Intel, openstack without dmi info is maybe. nova does not identify itself on platforms other than intel. - https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova""" + https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova""" data = copy.deepcopy(VALID_CFG["OpenStack"]) del data["files"][P_PRODUCT_NAME] @@ -828,12 +1065,22 @@ def test_smartos_lxbrand(self): """SmartOS cloud identified on lxbrand container.""" self._test_ds_found("SmartOS-lxbrand") + def test_smartos_lxbrand_env(self): + """SmartOS cloud identified on lxbrand container.""" + self._test_ds_found("SmartOS-lxbrand-env") + def test_smartos_lxbrand_requires_socket(self): """SmartOS cloud should not be identified if no socket file.""" mycfg = copy.deepcopy(VALID_CFG["SmartOS-lxbrand"]) del mycfg["files"][ds_smartos.METADATA_SOCKFILE] self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled") + def test_smartos_lxbrand_requires_socket_env(self): + """SmartOS cloud should not be identified if no socket file.""" + mycfg = copy.deepcopy(VALID_CFG["SmartOS-lxbrand-env"]) + del mycfg["files"][ds_smartos.METADATA_SOCKFILE] + self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled") + def test_path_env_gets_set_from_main(self): """PATH environment should always have some tokens when main is run. @@ -1111,14 +1358,29 @@ def test_no_fs_mounts(self): """Negative test by lack of host filesystem mount points.""" self._test_ds_not_found("WSL-no-host-mounts") - def test_no_cloudinitdir(self): + def test_no_userprofile(self): + """Negative test by failing to read the %USERPROFILE% environment + variable. + """ + data = copy.deepcopy(VALID_CFG["WSL-supported"]) + data["mocks"].append( + { + "name": "WSL_run_cmd", + "ret": 0, + "RET": "\r\n", + }, + ) + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_no_cloudinitdir_in_userprofile(self): """Negative test by not finding %USERPROFILE%/.cloud-init.""" data = copy.deepcopy(VALID_CFG["WSL-supported"]) + userprofile = self.tmp_dir() data["mocks"].append( { - "name": "WSL_cloudinit_dir_in", - "ret": 1, - "RET": "", + "name": "WSL_profile_dir", + "ret": 0, + "RET": userprofile, }, ) return self._check_via_dict(data, RC_NOT_FOUND) @@ -1126,27 +1388,58 @@ def test_no_cloudinitdir(self): def test_empty_cloudinitdir(self): """Negative test by lack of host filesystem mount points.""" data = copy.deepcopy(VALID_CFG["WSL-supported"]) - cloudinitdir = self.tmp_dir() + userprofile = self.tmp_dir() data["mocks"].append( { - "name": "WSL_cloudinit_dir_in", + "name": "WSL_profile_dir", "ret": 0, - "RET": cloudinitdir, + "RET": userprofile, }, ) + cloudinitdir = os.path.join(userprofile, ".cloud-init") + os.mkdir(cloudinitdir) return self._check_via_dict(data, RC_NOT_FOUND) + def test_found_fail_due_instance_name_parsing(self): + """WSL datasource detection fail due parsing error even though the file + exists. + """ + data = copy.deepcopy(VALID_CFG["WSL-supported-debian"]) + userprofile = self.tmp_dir() + data["mocks"].append( + { + "name": "WSL_profile_dir", + "ret": 0, + "RET": userprofile, + }, + ) + + # Forcing WSL_linux2win_path to return a path we'll fail to parse + # (missing one / in the begining of the path). + for i, m in enumerate(data["mocks"]): + if m["name"] == "WSL_linux2win_path": + data["mocks"][i]["RET"] = "/wsl.localhost/cant-findme" + + cloudinitdir = os.path.join(userprofile, ".cloud-init") + os.mkdir(cloudinitdir) + filename = os.path.join(cloudinitdir, "cant-findme.user-data") + Path(filename).touch() + self._check_via_dict(data, RC_NOT_FOUND) + Path(filename).unlink() + def test_found_via_userdata_version_codename(self): - """WLS datasource detected by VERSION_CODENAME when no VERSION_ID""" + """WSL datasource detected by VERSION_CODENAME when no VERSION_ID""" data = copy.deepcopy(VALID_CFG["WSL-supported-debian"]) - cloudinitdir = self.tmp_dir() + userprofile = self.tmp_dir() data["mocks"].append( { - "name": "WSL_cloudinit_dir_in", + "name": "WSL_profile_dir", "ret": 0, - "RET": cloudinitdir, + "RET": userprofile, }, ) + cloudinitdir = os.path.join(userprofile, ".cloud-init") + os.mkdir(cloudinitdir) filename = os.path.join(cloudinitdir, "debian-trixie.user-data") Path(filename).touch() self._check_via_dict(data, RC_FOUND, dslist=[data.get("ds"), DS_NONE]) @@ -1157,15 +1450,23 @@ def test_found_via_userdata(self): WSL datasource is found on applicable userdata files in cloudinitdir. """ data = copy.deepcopy(VALID_CFG["WSL-supported"]) - cloudinitdir = self.tmp_dir() + userprofile = self.tmp_dir() data["mocks"].append( { - "name": "WSL_cloudinit_dir_in", + "name": "WSL_profile_dir", "ret": 0, - "RET": cloudinitdir, + "RET": userprofile, }, ) + cloudinitdir = os.path.join(userprofile, ".cloud-init") + os.mkdir(cloudinitdir) + up4wcloudinitdir = os.path.join(userprofile, ".ubuntupro/.cloud-init") + os.makedirs(up4wcloudinitdir, exist_ok=True) userdata_files = [ + os.path.join( + up4wcloudinitdir, MOCK_WSL_INSTANCE_DATA["name"] + ".user-data" + ), + os.path.join(up4wcloudinitdir, "agent.yaml"), os.path.join( cloudinitdir, MOCK_WSL_INSTANCE_DATA["name"] + ".user-data" ), @@ -1298,6 +1599,21 @@ def _print_run_output(rc, out, err, cfg, files): P_PRODUCT_UUID: "EC23AEF5-54BE-4843-8D24-8C819F88453E\n", }, }, + "Ec2-hvm-swap-endianness": { + "ds": "Ec2", + "mocks": [{"name": "detect_virt", "RET": "kvm", "ret": 0}], + "files": { + P_PRODUCT_UUID: "AB232AEC-54BE-4843-8D24-8C819F88453E\n", + }, + }, + "Ec2-hvm-env": { + "ds": "Ec2", + "mocks": [{"name": "detect_virt_env", "RET": "vm:kvm", "ret": 0}], + "files": { + P_PRODUCT_SERIAL: "ec23aef5-54be-4843-8d24-8c819f88453e\n", + P_PRODUCT_UUID: "EC23AEF5-54BE-4843-8D24-8C819F88453E\n", + }, + }, "Ec2-xen": { "ds": "Ec2", "mocks": [MOCK_VIRT_IS_XEN], @@ -1318,6 +1634,12 @@ def _print_run_output(rc, out, err, cfg, files): "files": {P_PRODUCT_NAME: "Google Compute Engine\n"}, "mocks": [MOCK_VIRT_IS_KVM], }, + "GCE_ENV": { + "ds": "GCE", + "files": {P_PRODUCT_NAME: "Google Compute Engine\n"}, + "env_vars": KVM_ENV, + "no_mocks": ["detect_virt"], + }, "GCE-serial": { "ds": "GCE", "files": {P_PRODUCT_SERIAL: "GoogleCloud-8f2e88f\n"}, @@ -1356,7 +1678,137 @@ def _print_run_output(rc, out, err, cfg, files): "ds": "LXD", "files": { P_BOARD_NAME: "LXD\n", - "etc/cloud/cloud.cfg.d/92-broken-maas.cfg": ("MAAS: None"), + "etc/cloud/cloud.cfg.d/92-broken-maas.cfg": ("MAAS: None\n"), + }, + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + }, + "flow_sequence-control": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent( + """\ + "datasource-list": [ None ] \n + """ + ) + }, + }, + # no quotes, whitespace between all chars and at the end of line + "flow_sequence-1": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent( + """\ + datasource_list : [ None ] \n + """ + ) + }, + }, + # double quotes + "flow_sequence-2": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent( + """\ + "datasource_list": [None] + """ + ) + }, + }, + # single quotes + "flow_sequence-3": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent( + """\ + 'datasource_list': [None] + """ + ) + }, + }, + # no newlines + "flow_sequence-4": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent("datasource_list: [ None ]") + }, + }, + # double quoted key, single quoted list member + "flow_sequence-5": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent( + "\"datasource_list\": [ 'None' ] " + ) + }, + }, + # single quotes, whitespace before colon + "flow_sequence-6": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent("'datasource_list' : [ None ] ") + }, + }, + "flow_sequence-7": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent( + '"datasource_list" : [ None ] ' + ) + }, + }, + # tabs as part of whitespace between all chars + "flow_sequence-8": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": { + "etc/cloud/cloud.cfg": dedent( + '"datasource_list" \t\t : \t\t[\t \tNone \t \t ] \t\t ' + ) + }, + }, + # no quotes, no whitespace + "flow_sequence-9": { + "ds": "None", + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], + "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD + "files": {"etc/cloud/cloud.cfg": dedent("datasource_list: [None]")}, + }, + "LXD-kvm-not-azure": { + "ds": "Azure", + "files": { + P_BOARD_NAME: "LXD\n", + "etc/cloud/cloud.cfg.d/92-broken-azure.cfg": ( + "datasource_list:\n - Azure" + ), }, # /dev/lxd/sock does not exist and KVM virt-type "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM], @@ -1369,6 +1821,28 @@ def _print_run_output(rc, out, err, cfg, files): "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM_QEMU], "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD }, + # LXD host > 5.10 kvm launch virt==qemu + "LXD-kvm-qemu-kernel-gt-5.10-env": { + "ds": "LXD", + "files": { + P_BOARD_NAME: "LXD\n", + # this test is systemd-specific, but may run on non-systemd systems + # ensure that /run/systemd/ exists, such that this test will take + # the systemd branch on those systems as well + # + # https://github.com/canonical/cloud-init/issues/5095 + "/run/systemd/somefile": "", + }, + # /dev/lxd/sock does not exist and KVM virt-type + "mocks": [ + {"name": "is_socket_file", "ret": 1}, + ], + "env_vars": IS_KVM_QEMU_ENV, + "no_mocks": [ + "dscheck_LXD", + "detect_virt", + ], # Don't default mock dscheck_LXD + }, "LXD": { "ds": "LXD", # /dev/lxd/sock exists @@ -1404,9 +1878,9 @@ def _print_run_output(rc, out, err, cfg, files): # Also include a datasource list of more than just # [NoCloud, None], because that would automatically select # NoCloud without checking - "/etc/cloud/cloud.cfg": dedent( + "etc/cloud/cloud.cfg": dedent( """\ - datasource_list: [ Azure, Openstack, NoCloud, None ] + datasource_list: [ Azure, OpenStack, NoCloud, None ] datasource: NoCloud: user-data: | @@ -1520,6 +1994,13 @@ def _print_run_output(rc, out, err, cfg, files): "files": {P_CHASSIS_ASSET_TAG: "SAP CCloud VM\n"}, "mocks": [MOCK_VIRT_IS_VMWARE], }, + "OpenStack-SAPCCloud-env": { + # SAP CCloud hosts use OpenStack on VMware + "ds": "OpenStack", + "files": {P_CHASSIS_ASSET_TAG: "SAP CCloud VM\n"}, + "env_vars": IS_VMWARE_ENV, + "no_mocks": ["detect_virt"], + }, "OpenStack-HuaweiCloud": { # Huawei Cloud hosts use OpenStack "ds": "OpenStack", @@ -1878,6 +2359,20 @@ def _print_run_output(rc, out, err, cfg, files): ], "files": {ds_smartos.METADATA_SOCKFILE: "would be a socket\n"}, }, + "SmartOS-lxbrand-env": { + "ds": "SmartOS", + "mocks": [ + { + "name": "uname", + "ret": 0, + "out": ("Linux BrandZ virtual linux x86_64"), + }, + {"name": "blkid", "ret": 2, "out": ""}, + ], + "no_mocks": ["detect_virt"], + "env_vars": IS_CONTAINER_OTHER_ENV, + "files": {ds_smartos.METADATA_SOCKFILE: "would be a socket\n"}, + }, "Ec2-ZStack": { "ds": "Ec2", "files": {P_CHASSIS_ASSET_TAG: "123456.zstack.io\n"}, @@ -2241,9 +2736,9 @@ def _print_run_output(rc, out, err, cfg, files): MOCK_VIRT_IS_WSL, MOCK_UNAME_IS_WSL, { - "name": "WSL_instance_name", + "name": "WSL_path", "ret": 0, - "RET": MOCK_WSL_INSTANCE_DATA["name"], + "RET": "//wsl.localhost/%s/" % MOCK_WSL_INSTANCE_DATA["name"], }, ], "files": { @@ -2264,9 +2759,9 @@ def _print_run_output(rc, out, err, cfg, files): MOCK_VIRT_IS_WSL, MOCK_UNAME_IS_WSL, { - "name": "WSL_instance_name", + "name": "WSL_path", "ret": 0, - "RET": MOCK_WSL_INSTANCE_DATA["name"], + "RET": "//wsl.localhost/%s/" % MOCK_WSL_INSTANCE_DATA["name"], }, ], "files": { diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py index c9eff4070..e5e81fbff 100644 --- a/tests/unittests/test_features.py +++ b/tests/unittests/test_features.py @@ -19,6 +19,7 @@ def test_feature_without_override(self): ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES=True, EXPIRE_APPLIES_TO_HASHED_USERS=False, NETPLAN_CONFIG_ROOT_READ_ONLY=True, + DEPRECATION_INFO_BOUNDARY="devel", NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH=False, APT_DEB822_SOURCE_LIST_FILE=True, ): @@ -29,4 +30,5 @@ def test_feature_without_override(self): "NETPLAN_CONFIG_ROOT_READ_ONLY": True, "NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH": False, "APT_DEB822_SOURCE_LIST_FILE": True, + "DEPRECATION_INFO_BOUNDARY": "devel", } == features.get_features() diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py index caa2aeb4d..8110bbd45 100644 --- a/tests/unittests/test_gpg.py +++ b/tests/unittests/test_gpg.py @@ -1,10 +1,10 @@ +import os from unittest import mock import pytest from cloudinit import gpg, subp from cloudinit.subp import SubpResult -from tests.unittests.helpers import CiTestCase TEST_KEY_HUMAN = """ /etc/apt/cloud-init.gpg.d/my_key.gpg @@ -35,21 +35,44 @@ TEST_KEY_FINGERPRINT_MACHINE = "3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85" +@pytest.fixture() +def m_subp(): + with mock.patch.object( + gpg.subp, "subp", return_value=SubpResult("", "") + ) as m_subp, mock.patch.object(gpg.time, "sleep"): + yield m_subp + + +@pytest.fixture() +def m_which(): + with mock.patch.object(gpg.subp, "which") as m_which: + yield m_which + + +@pytest.fixture() +def m_sleep(): + with mock.patch("cloudinit.gpg.time.sleep") as sleep: + yield sleep + + class TestGPGCommands: def test_dearmor_bad_value(self): """This exception is handled by the callee. Ensure it is not caught internally. """ + gpg_instance = gpg.GPG() with mock.patch.object( subp, "subp", side_effect=subp.ProcessExecutionError ): with pytest.raises(subp.ProcessExecutionError): - gpg.dearmor("garbage key value") + gpg_instance.dearmor("garbage key value") - def test_gpg_list_args(self): + def test_gpg_list_args(self, m_subp): """Verify correct command gets called to list keys""" + gpg_instance = gpg.GPG() no_colons = [ "gpg", + "--no-options", "--with-fingerprint", "--no-default-keyring", "--list-keys", @@ -58,6 +81,7 @@ def test_gpg_list_args(self): ] colons = [ "gpg", + "--no-options", "--with-fingerprint", "--no-default-keyring", "--list-keys", @@ -65,80 +89,150 @@ def test_gpg_list_args(self): "--with-colons", "key", ] - with mock.patch.object( - subp, "subp", return_value=SubpResult("", "") - ) as m_subp: - gpg.list("key") - assert mock.call(colons, capture=True) == m_subp.call_args - - gpg.list("key", human_output=True) - test_calls = mock.call((no_colons), capture=True) - assert test_calls == m_subp.call_args - - def test_gpg_dearmor_args(self): + gpg_instance.list_keys("key") + assert ( + mock.call(colons, capture=True, update_env=gpg_instance.env) + == m_subp.call_args + ) + + gpg_instance = gpg.GPG() + gpg_instance.list_keys("key", human_output=True) + assert m_subp.call_args == mock.call( + no_colons, capture=True, update_env=gpg_instance.env + ) + + def test_gpg_dearmor_args(self, m_subp): """Verify correct command gets called to dearmor keys""" - with mock.patch.object( - subp, "subp", return_value=SubpResult("", "") - ) as m_subp: - gpg.dearmor("key") - test_call = mock.call( - ["gpg", "--dearmor"], data="key", decode=False - ) - assert test_call == m_subp.call_args - - @mock.patch("cloudinit.gpg.time.sleep") - @mock.patch("cloudinit.gpg.subp.subp") - class TestReceiveKeys(CiTestCase): - """Test the recv_key method.""" - - def test_retries_on_subp_exc(self, m_subp, m_sleep): - """retry should be done on gpg receive keys failure.""" - retries = (1, 2, 4) - my_exc = subp.ProcessExecutionError( - stdout="", stderr="", exit_code=2, cmd=["mycmd"] - ) - m_subp.side_effect = (my_exc, my_exc, ("", "")) - gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) - self.assertEqual( - [mock.call(1), mock.call(2)], m_sleep.call_args_list + gpg_instance = gpg.GPG() + gpg_instance.dearmor("key") + test_call = mock.call( + ["gpg", "--dearmor"], + data="key", + decode=False, + update_env=gpg_instance.env, + ) + assert test_call == m_subp.call_args + + +class TestReceiveKeys: + """Test the recv_key method.""" + + def test_retries_on_subp_exc(self, m_subp, m_sleep): + """retry should be done on gpg receive keys failure.""" + gpg_instance = gpg.GPG() + retries = (1, 2, 4) + my_exc = subp.ProcessExecutionError( + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) + m_subp.side_effect = (my_exc, my_exc, ("", "")) + gpg_instance.recv_key("ABCD", "keyserver.example.com", retries=retries) + assert [mock.call(1), mock.call(2)], m_sleep.call_args_list + + def test_raises_error_after_retries(self, m_subp, m_sleep): + """If the final run fails, error should be raised.""" + gpg_instance = gpg.GPG() + naplen = 1 + keyid, keyserver = ("ABCD", "keyserver.example.com") + m_subp.side_effect = subp.ProcessExecutionError( + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) + with pytest.raises( + ValueError, match=f"{keyid}.*{keyserver}|{keyserver}.*{keyid}" + ): + gpg_instance.recv_key(keyid, keyserver, retries=(naplen,)) + m_sleep.assert_called_once() + + def test_no_retries_on_none(self, m_subp, m_sleep): + """retry should not be done if retries is None.""" + gpg_instance = gpg.GPG() + m_subp.side_effect = subp.ProcessExecutionError( + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) + with pytest.raises(ValueError): + gpg_instance.recv_key( + "ABCD", "keyserver.example.com", retries=None ) + m_sleep.assert_not_called() + + def test_expected_gpg_command(self, m_subp, m_sleep): + """Verify gpg is called with expected args.""" + gpg_instance = gpg.GPG() + key, keyserver = ("DEADBEEF", "keyserver.example.com") + retries = (1, 2, 4) + m_subp.return_value = ("", "") + gpg_instance.recv_key(key, keyserver, retries=retries) + m_subp.assert_called_once_with( + [ + "gpg", + "--no-tty", + "--keyserver=%s" % keyserver, + "--recv-keys", + key, + ], + capture=True, + update_env=gpg_instance.env, + ) + m_sleep.assert_not_called() + + def test_kill_gpg_succeeds(self, m_subp, m_which): + """ensure that when gpgconf isn't found, processes are manually + cleaned up. Also test that the context manager does cleanup - def test_raises_error_after_retries(self, m_subp, m_sleep): - """If the final run fails, error should be raised.""" - naplen = 1 - keyid, keyserver = ("ABCD", "keyserver.example.com") - m_subp.side_effect = subp.ProcessExecutionError( - stdout="", stderr="", exit_code=2, cmd=["mycmd"] - ) - with self.assertRaises(ValueError) as rcm: - gpg.recv_key(keyid, keyserver, retries=(naplen,)) - self.assertIn(keyid, str(rcm.exception)) - self.assertIn(keyserver, str(rcm.exception)) - m_sleep.assert_called_with(naplen) - - def test_no_retries_on_none(self, m_subp, m_sleep): - """retry should not be done if retries is None.""" - m_subp.side_effect = subp.ProcessExecutionError( - stdout="", stderr="", exit_code=2, cmd=["mycmd"] - ) - with self.assertRaises(ValueError): - gpg.recv_key("ABCD", "keyserver.example.com", retries=None) - m_sleep.assert_not_called() - - def test_expected_gpg_command(self, m_subp, m_sleep): - """Verify gpg is called with expected args.""" - key, keyserver = ("DEADBEEF", "keyserver.example.com") - retries = (1, 2, 4) - m_subp.return_value = ("", "") - gpg.recv_key(key, keyserver, retries=retries) - m_subp.assert_called_once_with( - [ - "gpg", - "--no-tty", - "--keyserver=%s" % keyserver, - "--recv-keys", - key, - ], - capture=True, - ) - m_sleep.assert_not_called() + """ + m_which.return_value = True + with pytest.raises(ZeroDivisionError): + with gpg.GPG() as gpg_context: + + # run a gpg command so that we have "started" gpg + gpg_context.list_keys("") + 1 / 0 # pylint: disable=pointless-statement + m_subp.assert_has_calls( + [ + mock.call( + ["gpgconf", "--kill", "all"], + capture=True, + update_env=gpg_context.env, + ) + ] + ) + assert not os.path.isdir(str(gpg_context.temp_dir)) + + def test_do_not_kill_unstarted_gpg(self, m_subp): + """ensure that when gpg isn't started, gpg isn't killed, but the + directory is cleaned up. + """ + with pytest.raises(ZeroDivisionError): + with gpg.GPG() as gpg_context: + 1 / 0 # pylint: disable=pointless-statement + m_subp.assert_not_called() + assert not os.path.isdir(str(gpg_context.temp_dir)) + + def test_kill_gpg_failover_succeeds(self, m_subp, m_which): + """ensure that when gpgconf isn't found, processes are manually + cleaned up + """ + m_which.return_value = None + gpg_instance = gpg.GPG() + + # "start" gpg (if we don't, we won't kill gpg) + gpg_instance.recv_key("", "") + gpg_instance.kill_gpg() + m_subp.assert_has_calls( + [ + mock.call( + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], + capture=True, + rcs=[0, 1], + ) + ] + ) diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index 1df4a5ace..879963103 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -1,12 +1,14 @@ # This file is part of cloud-init. See LICENSE file for license information. -"""Tests for cloudinit.log """ +"""Tests for cloudinit.log""" import datetime import io import logging import time +import pytest + from cloudinit import log, util from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT from tests.unittests.helpers import CiTestCase @@ -61,13 +63,58 @@ def test_logger_uses_gmtime(self): class TestDeprecatedLogs: def test_deprecated_log_level(self, caplog): - logger = logging.getLogger() - logger.deprecated("deprecated message") + logging.getLogger().deprecated("deprecated message") assert "DEPRECATED" == caplog.records[0].levelname assert "deprecated message" in caplog.text + @pytest.mark.parametrize( + "expected_log_level, deprecation_info_boundary", + ( + pytest.param( + "DEPRECATED", + "19.2", + id="test_same_deprecation_info_boundary_is_deprecated_level", + ), + pytest.param( + "INFO", + "19.1", + id="test_lower_deprecation_info_boundary_is_info_level", + ), + ), + ) + def test_deprecate_log_level_based_on_features( + self, + expected_log_level, + deprecation_info_boundary, + caplog, + mocker, + clear_deprecation_log, + ): + """Deprecation log level depends on key deprecation_version + + When DEPRECATION_INFO_BOUNDARY is set to a version number, and a key + has a deprecated_version with a version greater than the boundary + the log level is INFO instead of DEPRECATED. If + DEPRECATION_INFO_BOUNDARY is set to the default, "devel", all + deprecated keys are logged at level DEPRECATED. + """ + mocker.patch.object( + util.features, + "DEPRECATION_INFO_BOUNDARY", + deprecation_info_boundary, + ) + util.deprecate( + deprecated="some key", + deprecated_version="19.2", + extra_message="dont use it", + ) + assert expected_log_level == caplog.records[0].levelname + assert ( + "some key is deprecated in 19.2 and scheduled to be removed in" + " 24.2" in caplog.text + ) + def test_log_deduplication(self, caplog): - log.define_deprecation_logger() util.deprecate( deprecated="stuff", deprecated_version="19.1", @@ -90,6 +137,5 @@ def test_log_deduplication(self, caplog): def test_logger_prints_to_stderr(capsys): message = "to stdout" log.setup_basic_logging() - LOG = logging.getLogger() - LOG.warning(message) + logging.getLogger().warning(message) assert message in capsys.readouterr().err diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 3812bc4b0..dc53de597 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import base64 import copy @@ -11,11 +12,10 @@ from typing import Optional import pytest +import yaml from yaml.serializer import Serializer -from cloudinit import distros, net -from cloudinit import safeyaml as yaml -from cloudinit import subp, temp_utils, util +from cloudinit import distros, net, subp, temp_utils, util from cloudinit.net import ( cmdline, eni, @@ -31,13 +31,12 @@ ) from cloudinit.sources.helpers import openstack from tests.unittests.helpers import ( - CiTestCase, - FilesystemMockingTestCase, dir2dict, does_not_raise, mock, populate_dir, ) +from tests.unittests.net.network_configs import NETWORK_CONFIGS DHCP_CONTENT_1 = """ DEVICE='eth0' @@ -134,6 +133,37 @@ ], } +STATIC_CONTENT_2 = """ +DEVICE='eth1' +PROTO='static' +IPV4ADDR='10.0.0.2' +IPV4BROADCAST='10.0.0.255' +IPV4NETMASK='255.255.255.0' +IPV4GATEWAY='10.0.0.1' +IPV4DNS0='10.0.1.1' +IPV4DNS1='0.0.0.0' +HOSTNAME='foohost' +UPTIME='21' +DHCPLEASETIME='3600' +DOMAINSEARCH='foo.com' +""" + +STATIC_CONTENT_3 = """ +DEVICE='eth1' +PROTO='off' +IPV4ADDR='10.0.0.2' +IPV4BROADCAST='10.0.0.255' +IPV4NETMASK='255.255.255.0' +IPV4GATEWAY='10.0.0.1' +IPV4DNS0='10.0.1.1' +IPV4DNS1='0.0.0.0' +HOSTNAME='foohost' +UPTIME='21' +DHCPLEASETIME='3600' +DOMAINSEARCH='foo.com' +""" + + V1_NAMESERVER_ALIAS = """ config: - id: eno1 @@ -516,6 +546,8 @@ } ], "ip_address": "172.19.1.34", + "dns_search": ["testweb.com"], + "dns_nameservers": ["172.19.0.13"], "id": "network0", } ], @@ -550,7 +582,9 @@ """ ; Created by cloud-init automatically, do not edit. ; +nameserver 172.19.0.13 nameserver 172.19.0.12 +search testweb.com """.lstrip(), ), ( @@ -581,11 +615,12 @@ BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 +DNS1=172.19.0.13 +DOMAIN=testweb.com GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 NETMASK=255.255.252.0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -596,7 +631,9 @@ """ ; Created by cloud-init automatically, do not edit. ; +nameserver 172.19.0.13 nameserver 172.19.0.12 +search testweb.com """.lstrip(), ), ( @@ -646,7 +683,8 @@ may-fail=false address1=172.19.1.34/22 route1=0.0.0.0/0,172.19.3.254 -dns=172.19.0.12; +dns=172.19.0.13; +dns-search=testweb.com; """.lstrip(), ), @@ -654,13 +692,19 @@ }, { "in_data": { - "services": [{"type": "dns", "address": "172.19.0.12"}], + "services": [ + { + "type": "dns", + "address": "172.19.0.12", + "search": ["example1.com", "example2.com"], + } + ], "networks": [ { - "network_id": "public-ipv4", + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", + "link": "eth0", "routes": [ { "netmask": "0.0.0.0", @@ -669,25 +713,17 @@ } ], "ip_address": "172.19.1.34", + "dns_search": ["example3.com"], + "dns_nameservers": ["172.19.0.12"], "id": "network0", - }, - { - "network_id": "private-ipv4", - "type": "ipv4", - "netmask": "255.255.255.0", - "link": "tap1a81968a-79", - "routes": [], - "ip_address": "10.0.0.10", - "id": "network1", - }, + } ], "links": [ { "ethernet_mac_address": "fa:16:3e:ed:9a:59", "mtu": None, - "type": "bridge", - "id": "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", + "type": "physical", + "id": "eth0", }, ], }, @@ -702,10 +738,8 @@ # BOOTPROTO=static IPADDR=172.19.1.34 -IPADDR1=10.0.0.10 LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 -NETMASK1=255.255.255.0 STARTMODE=auto """.lstrip(), ), @@ -715,6 +749,7 @@ ; Created by cloud-init automatically, do not edit. ; nameserver 172.19.0.12 +search example3.com example1.com example2.com """.lstrip(), ), ( @@ -745,13 +780,12 @@ BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 +DNS1=172.19.0.12 +DOMAIN=example3.com GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 -IPADDR1=10.0.0.10 NETMASK=255.255.252.0 -NETMASK1=255.255.255.0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -763,6 +797,7 @@ ; Created by cloud-init automatically, do not edit. ; nameserver 172.19.0.12 +search example3.com example1.com example2.com """.lstrip(), ), ( @@ -784,10 +819,50 @@ ), ), ], + "expected_network_manager": [ + ( + "".join( + [ + "etc/NetworkManager/system-connections", + "/cloud-init-eth0.nmconnection", + ] + ), + """ +# Generated by cloud-init. Changes will be lost. + +[connection] +id=cloud-init eth0 +uuid=1dd9a779-d327-56e1-8454-c65e2556c12c +autoconnect-priority=120 +type=ethernet + +[user] +org.freedesktop.NetworkManager.origin=cloud-init + +[ethernet] +mac-address=FA:16:3E:ED:9A:59 + +[ipv4] +method=manual +may-fail=false +address1=172.19.1.34/22 +route1=0.0.0.0/0,172.19.3.254 +dns=172.19.0.12; +dns-search=example3.com; + +""".lstrip(), + ), + ], }, { "in_data": { - "services": [{"type": "dns", "address": "172.19.0.12"}], + "services": [ + { + "type": "dns", + "address": "172.19.0.12", + "search": "example.com", + } + ], "networks": [ { "network_id": "public-ipv4", @@ -805,37 +880,13 @@ "id": "network0", }, { - "network_id": "public-ipv6-a", - "type": "ipv6", - "netmask": "", - "link": "tap1a81968a-79", - "routes": [ - { - "gateway": "2001:DB8::1", - "netmask": "::", - "network": "::", - } - ], - "ip_address": "2001:DB8::10", - "id": "network1", - }, - { - "network_id": "public-ipv6-b", - "type": "ipv6", - "netmask": "64", - "link": "tap1a81968a-79", - "routes": [], - "ip_address": "2001:DB9::10", - "id": "network2", - }, - { - "network_id": "public-ipv6-c", - "type": "ipv6", - "netmask": "64", + "network_id": "private-ipv4", + "type": "ipv4", + "netmask": "255.255.255.0", "link": "tap1a81968a-79", "routes": [], - "ip_address": "2001:DB10::10", - "id": "network3", + "ip_address": "10.0.0.10", + "id": "network1", }, ], "links": [ @@ -859,11 +910,10 @@ # BOOTPROTO=static IPADDR=172.19.1.34 -IPADDR6=2001:DB8::10/64 -IPADDR6_1=2001:DB9::10/64 -IPADDR6_2=2001:DB10::10/64 +IPADDR1=10.0.0.10 LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 +NETMASK1=255.255.255.0 STARTMODE=auto """.lstrip(), ), @@ -873,6 +923,7 @@ ; Created by cloud-init automatically, do not edit. ; nameserver 172.19.0.12 +search example.com """.lstrip(), ), ( @@ -906,14 +957,9 @@ GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 -IPV6ADDR=2001:DB8::10/64 -IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" -IPV6INIT=yes -IPV6_AUTOCONF=no -IPV6_DEFAULTGW=2001:DB8::1 -IPV6_FORCE_ACCEPT_RA=no +IPADDR1=10.0.0.10 NETMASK=255.255.252.0 -NM_CONTROLLED=no +NETMASK1=255.255.255.0 ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -925,6 +971,7 @@ ; Created by cloud-init automatically, do not edit. ; nameserver 172.19.0.12 +search example.com """.lstrip(), ), ( @@ -947,3701 +994,207 @@ ), ], }, -] - -EXAMPLE_ENI = """ -auto lo -iface lo inet loopback - dns-nameservers 10.0.0.1 - dns-search foo.com - -auto eth0 -iface eth0 inet static - address 1.2.3.12 - netmask 255.255.255.248 - broadcast 1.2.3.15 - gateway 1.2.3.9 - dns-nameservers 69.9.160.191 69.9.191.4 -auto eth1 -iface eth1 inet static - address 10.248.2.4 - netmask 255.255.255.248 - broadcast 10.248.2.7 -""" - -RENDERED_ENI = """ -auto lo -iface lo inet loopback - dns-nameservers 10.0.0.1 - dns-search foo.com - -auto eth0 -iface eth0 inet static - address 1.2.3.12/29 - broadcast 1.2.3.15 - dns-nameservers 69.9.160.191 69.9.191.4 - gateway 1.2.3.9 - -auto eth1 -iface eth1 inet static - address 10.248.2.4/29 - broadcast 10.248.2.7 -""".lstrip() - -NETWORK_CONFIGS = { - "small_v1_suse_dhcp6": { - "expected_sysconfig_opensuse": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=cf:d6:af:48:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DHCLIENT6_MODE=managed - LLADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - STARTMODE=auto""" - ), + { + "in_data": { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [ + { + "network_id": "public-ipv4", + "type": "ipv4", + "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + } + ], + "ip_address": "172.19.1.34", + "id": "network0", + }, + { + "network_id": "public-ipv6-a", + "type": "ipv6", + "netmask": "", + "link": "tap1a81968a-79", + "routes": [ + { + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::", + } + ], + "ip_address": "2001:DB8::10", + "id": "network1", + }, + { + "network_id": "public-ipv6-b", + "type": "ipv6", + "netmask": "64", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "2001:DB9::10", + "id": "network2", + }, + { + "network_id": "public-ipv6-c", + "type": "ipv6", + "netmask": "64", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "2001:DB10::10", + "id": "network3", + }, + ], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", + }, + ], }, - "yaml": textwrap.dedent( - """ - version: 1 - config: - # Physical interfaces. - - type: physical - name: eth99 - mac_address: c0:d6:9f:2c:e8:80 - subnets: - - type: dhcp4 - - type: dhcp6 - - type: static - address: 192.168.21.3/24 - dns_nameservers: - - 8.8.8.8 - - 8.8.4.4 - dns_search: barley.maas sach.maas - routes: - - gateway: 65.61.151.37 - netmask: 0.0.0.0 - network: 0.0.0.0 - metric: 10000 - - type: physical - name: eth1 - mac_address: cf:d6:af:48:e8:80 - - type: nameserver - address: - - 1.2.3.4 - - 5.6.7.8 - search: - - wark.maas - """ - ), - }, - "small_v1": { - "expected_networkd_eth99": textwrap.dedent( - """\ - [Match] - Name=eth99 - MACAddress=c0:d6:9f:2c:e8:80 - [Address] - Address=192.168.21.3/24 - [Network] - DHCP=ipv4 - Domains=barley.maas sach.maas - Domains=wark.maas - DNS=1.2.3.4 5.6.7.8 - DNS=8.8.8.8 8.8.4.4 - [Route] - Gateway=65.61.151.37 - Destination=0.0.0.0/0 - Metric=10000 - """ - ).rstrip(" "), - "expected_networkd_eth1": textwrap.dedent( - """\ - [Match] - Name=eth1 - MACAddress=cf:d6:af:48:e8:80 - [Network] - DHCP=no - Domains=wark.maas - DNS=1.2.3.4 5.6.7.8 - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - dns-nameservers 1.2.3.4 5.6.7.8 - dns-search wark.maas - - iface eth1 inet manual - - auto eth99 - iface eth99 inet dhcp - - # control-alias eth99 - iface eth99 inet static - address 192.168.21.3/24 - dns-nameservers 8.8.8.8 8.8.4.4 - dns-search barley.maas sach.maas - post-up route add default gw 65.61.151.37 metric 10000 || true - pre-down route del default gw 65.61.151.37 metric 10000 || true - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - eth1: - match: - macaddress: cf:d6:af:48:e8:80 - set-name: eth1 - eth99: - addresses: - - 192.168.21.3/24 - dhcp4: true - match: - macaddress: c0:d6:9f:2c:e8:80 - nameservers: - addresses: - - 8.8.8.8 - - 8.8.4.4 - search: - - barley.maas - - sach.maas - routes: - - metric: 10000 - to: 0.0.0.0/0 - via: 65.61.151.37 - set-name: eth99 - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=cf:d6:af:48:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - LLADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - STARTMODE=auto""" - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth1 - HWADDR=cf:d6:af:48:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEFROUTE=yes - DEVICE=eth99 - DHCLIENT_SET_DEFAULT_ROUTE=yes - DNS1=8.8.8.8 - DNS2=8.8.4.4 - DOMAIN="barley.maas sach.maas" - GATEWAY=65.61.151.37 - HWADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - METRIC=10000 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - }, - "expected_network_manager": { - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=CF:D6:AF:48:E8:80 - - """ - ), - "cloud-init-eth99.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth99 - uuid=b1b88000-1f03-5360-8377-1a2205efffb4 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=C0:D6:9F:2C:E8:80 - - [ipv4] - method=auto - may-fail=false - address1=192.168.21.3/24 - route1=0.0.0.0/0,65.61.151.37 - dns=8.8.8.8;8.8.4.4; - dns-search=barley.maas;sach.maas; - - """ - ), - }, - "yaml": textwrap.dedent( - """ - version: 1 - config: - # Physical interfaces. - - type: physical - name: eth99 - mac_address: c0:d6:9f:2c:e8:80 - subnets: - - type: dhcp4 - - type: static - address: 192.168.21.3/24 - dns_nameservers: - - 8.8.8.8 - - 8.8.4.4 - dns_search: barley.maas sach.maas - routes: - - gateway: 65.61.151.37 - netmask: 0.0.0.0 - network: 0.0.0.0 - metric: 10000 - - type: physical - name: eth1 - mac_address: cf:d6:af:48:e8:80 - - type: nameserver - address: - - 1.2.3.4 - - 5.6.7.8 - search: - - wark.maas - """ - ), - }, - # We test a separate set of configs here because v2 doesn't support - # generic nameservers, so that aspect needs to be modified - "small_v2": { - "expected_networkd_eth99": textwrap.dedent( - """\ - [Match] - Name=eth99 - MACAddress=c0:d6:9f:2c:e8:80 - [Address] - Address=192.168.21.3/24 - [Network] - DHCP=ipv4 - Domains=barley.maas sach.maas - DNS=8.8.8.8 8.8.4.4 - [Route] - Gateway=65.61.151.37 - Destination=0.0.0.0/0 - Metric=10000 - """ - ).rstrip(" "), - "expected_networkd_eth1": textwrap.dedent( - """\ - [Match] - Name=eth1 - MACAddress=cf:d6:af:48:e8:80 - [Network] - DHCP=no - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - iface eth1 inet manual - - auto eth99 - iface eth99 inet dhcp - - # control-alias eth99 - iface eth99 inet static - address 192.168.21.3/24 - dns-nameservers 8.8.8.8 8.8.4.4 - dns-search barley.maas sach.maas - post-up route add default gw 65.61.151.37 metric 10000 || true - pre-down route del default gw 65.61.151.37 metric 10000 || true - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=cf:d6:af:48:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - LLADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - STARTMODE=auto""" - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth1 - HWADDR=cf:d6:af:48:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth99": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEFROUTE=yes - DEVICE=eth99 - DHCLIENT_SET_DEFAULT_ROUTE=yes - DNS1=8.8.8.8 - DNS2=8.8.4.4 - DOMAIN="barley.maas sach.maas" - GATEWAY=65.61.151.37 - HWADDR=c0:d6:9f:2c:e8:80 - IPADDR=192.168.21.3 - NETMASK=255.255.255.0 - METRIC=10000 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - }, - "expected_network_manager": { - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=CF:D6:AF:48:E8:80 - - """ - ), - "cloud-init-eth99.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth99 - uuid=b1b88000-1f03-5360-8377-1a2205efffb4 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=C0:D6:9F:2C:E8:80 - - [ipv4] - method=auto - may-fail=false - route1=0.0.0.0/0,65.61.151.37 - address1=192.168.21.3/24 - dns=8.8.8.8;8.8.4.4; - dns-search=barley.maas;sach.maas; - - """ - ), - }, - "yaml": textwrap.dedent( - """ - version: 2 - ethernets: - eth1: - match: - macaddress: cf:d6:af:48:e8:80 - set-name: eth1 - eth99: - addresses: - - 192.168.21.3/24 - dhcp4: true - match: - macaddress: c0:d6:9f:2c:e8:80 - nameservers: - addresses: - - 8.8.8.8 - - 8.8.4.4 - search: - - barley.maas - - sach.maas - routes: - - metric: 10000 - to: 0.0.0.0/0 - via: 65.61.151.37 - set-name: eth99 - """ - ), - }, - "v4_and_v6": { - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Network] - DHCP=yes - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet dhcp - - # control-alias iface0 - iface iface0 inet6 dhcp - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - dhcp4: true - dhcp6: true - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DHCLIENT6_MODE=managed - STARTMODE=auto""" - ) - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv4] - method=auto - may-fail=true - - [ipv6] - method=auto - may-fail=true - - """ - ), - }, - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'dhcp4'} - - {'type': 'dhcp6'} - """ - ).rstrip(" "), - }, - "v4_and_v6_static": { - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Link] - MTUBytes=8999 - [Network] - DHCP=no - [Address] - Address=192.168.14.2/24 - [Address] - Address=2001:1::1/64 - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet static - address 192.168.14.2/24 - mtu 9000 - - # control-alias iface0 - iface iface0 inet6 static - address 2001:1::1/64 - mtu 1500 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - addresses: - - 192.168.14.2/24 - - 2001:1::1/64 - ipv6-mtu: 1500 - mtu: 9000 - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - mtu: 8999 - subnets: - - type: static - address: 192.168.14.2/24 - mtu: 9000 - - type: static - address: 2001:1::1/64 - mtu: 1500 - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=static - IPADDR=192.168.14.2 - IPADDR6=2001:1::1/64 - NETMASK=255.255.255.0 - STARTMODE=auto - MTU=9000 - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - IPADDR=192.168.14.2 - IPV6ADDR=2001:1::1/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - MTU=9000 - IPV6_MTU=1500 - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mtu=9000 - - [ipv4] - method=manual - may-fail=false - address1=192.168.14.2/24 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::1/64 - - """ - ), - }, - }, - "v6_and_v4": { - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DHCLIENT6_MODE=managed - STARTMODE=auto""" - ) - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv6] - method=auto - may-fail=true - - [ipv4] - method=auto - may-fail=true - - """ - ), - }, - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - type: dhcp6 - - type: dhcp4 - """ - ).rstrip(" "), - }, - "dhcpv6_only": { - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Network] - DHCP=ipv6 - """ - ).rstrip(" "), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 dhcp - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - dhcp6: true - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'dhcp6'} - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - IPV6INIT=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv6] - method=auto - may-fail=false - - """ - ), - }, - }, - "dhcpv6_accept_ra": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 dhcp - accept_ra 1 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - accept-ra: true - dhcp6: true - """ - ).rstrip(" "), - "yaml_v1": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'dhcp6'} - accept-ra: true - """ - ).rstrip(" "), - "yaml_v2": textwrap.dedent( - """\ - version: 2 - ethernets: - iface0: - dhcp6: true - accept-ra: true - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - IPV6INIT=yes - IPV6_FORCE_ACCEPT_RA=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Network] - DHCP=ipv6 - IPv6AcceptRA=True - """ - ).rstrip(" "), - }, - "dhcpv6_reject_ra": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 dhcp - accept_ra 0 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - accept-ra: false - dhcp6: true - """ - ).rstrip(" "), - "yaml_v1": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'dhcp6'} - accept-ra: false - """ - ).rstrip(" "), - "yaml_v2": textwrap.dedent( - """\ - version: 2 - ethernets: - iface0: - dhcp6: true - accept-ra: false - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - IPV6INIT=yes - IPV6_FORCE_ACCEPT_RA=no - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_networkd": textwrap.dedent( - """\ - [Match] - Name=iface0 - [Network] - DHCP=ipv6 - IPv6AcceptRA=False - """ - ).rstrip(" "), - }, - "ipv6_slaac": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 auto - dhcp 0 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - dhcp6: true - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'ipv6_slaac'} - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=info - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - IPV6_AUTOCONF=yes - IPV6INIT=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv6] - method=auto - may-fail=false - - [ipv4] - method=disabled - - """ - ), - }, - }, - "static6": { - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - accept-ra: 'no' - subnets: - - type: 'static6' - address: 2001:1::1/64 - """ - ).rstrip(" "), - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - IPV6ADDR=2001:1::1/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - }, - "dhcpv6_stateless": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 auto - dhcp 1 - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - dhcp6: true - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'ipv6_dhcpv6-stateless'} - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=info - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - DHCPV6C_OPTIONS=-S - IPV6_AUTOCONF=yes - IPV6INIT=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv6] - method=auto - may-fail=false - - [ipv4] - method=disabled - - """ - ), - }, - }, - "dhcpv6_stateful": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet6 dhcp - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - iface0: - accept-ra: true - dhcp6: true - """ - ).rstrip(" "), - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - subnets: - - {'type': 'ipv6_dhcpv6-stateful'} - accept-ra: true - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=iface0 - DHCPV6C=yes - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FAILURE_FATAL=yes - IPV6_FORCE_ACCEPT_RA=yes - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - }, - "wakeonlan_disabled": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet dhcp - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - ethernets: - iface0: - dhcp4: true - wakeonlan: false - version: 2 - """ - ), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv4] - method=auto - may-fail=false - - """ - ), - }, - "yaml_v2": textwrap.dedent( - """\ - version: 2 - ethernets: - iface0: - dhcp4: true - wakeonlan: false - """ - ).rstrip(" "), - }, - "wakeonlan_enabled": { - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - auto iface0 - iface iface0 inet dhcp - ethernet-wol g - """ - ).rstrip(" "), - "expected_netplan": textwrap.dedent( - """ - network: - ethernets: - iface0: - dhcp4: true - wakeonlan: true - version: 2 - """ - ), - "expected_sysconfig_opensuse": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - ETHTOOL_OPTS="wol g" - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-iface0": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEVICE=iface0 - ETHTOOL_OPTS="wol g" - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-iface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init iface0 - uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70 - autoconnect-priority=120 - type=ethernet - interface-name=iface0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - wake-on-lan=64 - - [ipv4] - method=auto - may-fail=false - - """ - ), - }, - "yaml_v2": textwrap.dedent( - """\ - version: 2 - ethernets: - iface0: - dhcp4: true - wakeonlan: true - """ - ).rstrip(" "), - }, - "all": { - "expected_eni": """\ -auto lo -iface lo inet loopback - dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 - dns-search barley.maas wark.maas foobar.maas - -iface eth0 inet manual - -auto eth1 -iface eth1 inet manual - bond-master bond0 - bond-mode active-backup - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - -auto eth2 -iface eth2 inet manual - bond-master bond0 - bond-mode active-backup - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - -iface eth3 inet manual - -iface eth4 inet manual - -# control-manual eth5 -iface eth5 inet dhcp - -auto ib0 -iface ib0 inet static - address 192.168.200.7/24 - mtu 9000 - hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 - -auto bond0 -iface bond0 inet6 dhcp - bond-mode active-backup - bond-slaves none - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - hwaddress aa:bb:cc:dd:ee:ff - -auto br0 -iface br0 inet static - address 192.168.14.2/24 - bridge_ageing 250 - bridge_bridgeprio 22 - bridge_fd 1 - bridge_gcint 2 - bridge_hello 1 - bridge_maxage 10 - bridge_pathcost eth3 50 - bridge_pathcost eth4 75 - bridge_portprio eth3 28 - bridge_portprio eth4 14 - bridge_ports eth3 eth4 - bridge_stp off - bridge_waitport 1 eth3 - bridge_waitport 2 eth4 - hwaddress bb:bb:bb:bb:bb:aa - -# control-alias br0 -iface br0 inet6 static - address 2001:1::1/64 - post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true - pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true - -auto bond0.200 -iface bond0.200 inet dhcp - vlan-raw-device bond0 - vlan_id 200 - -auto eth0.101 -iface eth0.101 inet static - address 192.168.0.2/24 - dns-nameservers 192.168.0.10 10.23.23.134 - dns-search barley.maas sacchromyces.maas brettanomyces.maas - gateway 192.168.0.1 - mtu 1500 - hwaddress aa:bb:cc:dd:ee:11 - vlan-raw-device eth0 - vlan_id 101 - -# control-alias eth0.101 -iface eth0.101 inet static - address 192.168.2.10/24 - -post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true -pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true -""", - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - eth0: - match: - macaddress: c0:d6:9f:2c:e8:80 - set-name: eth0 - eth1: - match: - macaddress: aa:d6:9f:2c:e8:80 - set-name: eth1 - eth2: - match: - macaddress: c0:bb:9f:2c:e8:80 - set-name: eth2 - eth3: - match: - macaddress: 66:bb:9f:2c:e8:80 - set-name: eth3 - eth4: - match: - macaddress: 98:bb:9f:2c:e8:80 - set-name: eth4 - eth5: - dhcp4: true - match: - macaddress: 98:bb:9f:2c:e8:8a - set-name: eth5 - bonds: - bond0: - dhcp6: true - interfaces: - - eth1 - - eth2 - macaddress: aa:bb:cc:dd:ee:ff - parameters: - mii-monitor-interval: 100 - mode: active-backup - transmit-hash-policy: layer3+4 - bridges: - br0: - addresses: - - 192.168.14.2/24 - - 2001:1::1/64 - interfaces: - - eth3 - - eth4 - macaddress: bb:bb:bb:bb:bb:aa - nameservers: - addresses: - - 8.8.8.8 - - 4.4.4.4 - - 8.8.4.4 - search: - - barley.maas - - wark.maas - - foobar.maas - parameters: - ageing-time: 250 - forward-delay: 1 - hello-time: 1 - max-age: 10 - path-cost: - eth3: 50 - eth4: 75 - port-priority: - eth3: 28 - eth4: 14 - priority: 22 - stp: false - routes: - - to: ::/0 - via: 2001:4800:78ff:1b::1 - vlans: - bond0.200: - dhcp4: true - id: 200 - link: bond0 - eth0.101: - addresses: - - 192.168.0.2/24 - - 192.168.2.10/24 - id: 101 - link: eth0 - macaddress: aa:bb:cc:dd:ee:11 - mtu: 1500 - nameservers: - addresses: - - 192.168.0.10 - - 10.23.23.134 - search: - - barley.maas - - sacchromyces.maas - - brettanomyces.maas - routes: - - to: 0.0.0.0/0 - via: 192.168.0.1 - """ - ).rstrip(" "), - "expected_sysconfig_opensuse": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_MODULE_OPTS="mode=active-backup """ - """xmit_hash_policy=layer3+4 """ - """miimon=100" - BONDING_SLAVE_0=eth1 - BONDING_SLAVE_1=eth2 - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - LLADDR=aa:bb:cc:dd:ee:ff - STARTMODE=auto""" - ), - "ifcfg-bond0.200": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - ETHERDEVICE=bond0 - STARTMODE=auto - VLAN_ID=200""" - ), - "ifcfg-br0": textwrap.dedent( - """\ - BRIDGE_AGEINGTIME=250 - BOOTPROTO=static - IPADDR=192.168.14.2 - IPADDR6=2001:1::1/64 - LLADDRESS=bb:bb:bb:bb:bb:aa - NETMASK=255.255.255.0 - BRIDGE_PRIORITY=22 - BRIDGE_PORTS='eth3 eth4' - STARTMODE=auto - BRIDGE_STP=off""" - ), - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=c0:d6:9f:2c:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth0.101": textwrap.dedent( - """\ - BOOTPROTO=static - IPADDR=192.168.0.2 - IPADDR1=192.168.2.10 - MTU=1500 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - ETHERDEVICE=eth0 - STARTMODE=auto - VLAN_ID=101""" - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - LLADDR=aa:d6:9f:2c:e8:80 - STARTMODE=hotplug""" - ), - "ifcfg-eth2": textwrap.dedent( - """\ - BOOTPROTO=none - LLADDR=c0:bb:9f:2c:e8:80 - STARTMODE=hotplug""" - ), - "ifcfg-eth3": textwrap.dedent( - """\ - BOOTPROTO=static - BRIDGE=yes - LLADDR=66:bb:9f:2c:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth4": textwrap.dedent( - """\ - BOOTPROTO=static - BRIDGE=yes - LLADDR=98:bb:9f:2c:e8:80 - STARTMODE=auto""" - ), - "ifcfg-eth5": textwrap.dedent( - """\ - BOOTPROTO=dhcp4 - LLADDR=98:bb:9f:2c:e8:8a - STARTMODE=manual""" - ), - "ifcfg-ib0": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 - IPADDR=192.168.200.7 - MTU=9000 - NETMASK=255.255.255.0 - STARTMODE=auto - TYPE=InfiniBand""" - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup """ - """xmit_hash_policy=layer3+4 """ - """miimon=100" - BONDING_SLAVE0=eth1 - BONDING_SLAVE1=eth2 - BOOTPROTO=none - DEVICE=bond0 - DHCPV6C=yes - IPV6INIT=yes - MACADDR=aa:bb:cc:dd:ee:ff - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Bond - USERCTL=no""" - ), - "ifcfg-bond0.200": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEVICE=bond0.200 - DHCLIENT_SET_DEFAULT_ROUTE=no - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=bond0 - USERCTL=no - VLAN=yes""" - ), - "ifcfg-br0": textwrap.dedent( - """\ - AGEING=250 - BOOTPROTO=none - DEFROUTE=yes - DEVICE=br0 - IPADDR=192.168.14.2 - IPV6ADDR=2001:1::1/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - IPV6_DEFAULTGW=2001:4800:78ff:1b::1 - MACADDR=bb:bb:bb:bb:bb:aa - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - PRIO=22 - STP=no - TYPE=Bridge - USERCTL=no""" - ), - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=c0:d6:9f:2c:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth0.101": textwrap.dedent( - """\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=eth0.101 - DNS1=192.168.0.10 - DNS2=10.23.23.134 - DOMAIN="barley.maas sacchromyces.maas brettanomyces.maas" - GATEWAY=192.168.0.1 - IPADDR=192.168.0.2 - IPADDR1=192.168.2.10 - MTU=1500 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=eth0 - USERCTL=no - VLAN=yes""" - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth1 - HWADDR=aa:d6:9f:2c:e8:80 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth2": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth2 - HWADDR=c0:bb:9f:2c:e8:80 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth3": textwrap.dedent( - """\ - BOOTPROTO=none - BRIDGE=br0 - DEVICE=eth3 - HWADDR=66:bb:9f:2c:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth4": textwrap.dedent( - """\ - BOOTPROTO=none - BRIDGE=br0 - DEVICE=eth4 - HWADDR=98:bb:9f:2c:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-eth5": textwrap.dedent( - """\ - BOOTPROTO=dhcp - DEVICE=eth5 - DHCLIENT_SET_DEFAULT_ROUTE=no - HWADDR=98:bb:9f:2c:e8:8a - NM_CONTROLLED=no - ONBOOT=no - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-ib0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=ib0 - HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 - IPADDR=192.168.200.7 - MTU=9000 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=InfiniBand - USERCTL=no""" - ), - }, - "expected_network_manager": { - "cloud-init-eth3.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth3 - uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790 - autoconnect-priority=120 - type=ethernet - slave-type=bridge - master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=66:BB:9F:2C:E8:80 - - """ - ), - "cloud-init-eth5.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth5 - uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=98:BB:9F:2C:E8:8A - - [ipv4] - method=auto - may-fail=false - dns=8.8.8.8;4.4.4.4;8.8.4.4; - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-ib0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init ib0 - uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b - autoconnect-priority=120 - type=infiniband - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [infiniband] - transport-mode=datagram - mtu=9000 - mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1 - - [ipv4] - method=manual - may-fail=false - address1=192.168.200.7/24 - dns=8.8.8.8;4.4.4.4;8.8.4.4; - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-bond0.200.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0.200 - uuid=88984a9c-ff22-5233-9267-86315e0acaa7 - autoconnect-priority=120 - type=vlan - interface-name=bond0.200 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [vlan] - id=200 - parent=54317911-f840-516b-a10d-82cb4c1f075c - - [ipv4] - method=auto - may-fail=false - dns=8.8.8.8;4.4.4.4;8.8.4.4; - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=C0:D6:9F:2C:E8:80 - - """ - ), - "cloud-init-eth4.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth4 - uuid=e27e4959-fb50-5580-b9a4-2073554627b9 - autoconnect-priority=120 - type=ethernet - slave-type=bridge - master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=98:BB:9F:2C:E8:80 - - """ - ), - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - slave-type=bond - master=54317911-f840-516b-a10d-82cb4c1f075c - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=AA:D6:9F:2C:E8:80 - - """ - ), - "cloud-init-br0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init br0 - uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - autoconnect-priority=120 - type=bridge - interface-name=br0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [bridge] - stp=false - priority=22 - mac-address=BB:BB:BB:BB:BB:AA - - [ipv4] - method=manual - may-fail=false - address1=192.168.14.2/24 - dns=8.8.8.8;4.4.4.4;8.8.4.4; - dns-search=barley.maas;wark.maas;foobar.maas; - - [ipv6] - method=manual - may-fail=false - address1=2001:1::1/64 - route1=::/0,2001:4800:78ff:1b::1 - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-eth0.101.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0.101 - uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf - autoconnect-priority=120 - type=vlan - interface-name=eth0.101 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [vlan] - id=101 - parent=1dd9a779-d327-56e1-8454-c65e2556c12c - - [ipv4] - method=manual - may-fail=false - address1=192.168.0.2/24 - gateway=192.168.0.1 - address2=192.168.2.10/24 - dns=192.168.0.10;10.23.23.134; - dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas; - - """ - ), - "cloud-init-bond0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0 - uuid=54317911-f840-516b-a10d-82cb4c1f075c - autoconnect-priority=120 - type=bond - interface-name=bond0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [bond] - mode=active-backup - miimon=100 - xmit_hash_policy=layer3+4 - - [ipv6] - method=auto - may-fail=false - dns-search=barley.maas;wark.maas;foobar.maas; - - """ - ), - "cloud-init-eth2.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth2 - uuid=5559a242-3421-5fdd-896e-9cb8313d5804 - autoconnect-priority=120 - type=ethernet - slave-type=bond - master=54317911-f840-516b-a10d-82cb4c1f075c - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=C0:BB:9F:2C:E8:80 - - """ - ), - }, - "yaml": textwrap.dedent( - """ - version: 1 - config: - # Physical interfaces. - - type: physical - name: eth0 - mac_address: c0:d6:9f:2c:e8:80 - - type: physical - name: eth1 - mac_address: aa:d6:9f:2c:e8:80 - - type: physical - name: eth2 - mac_address: c0:bb:9f:2c:e8:80 - - type: physical - name: eth3 - mac_address: 66:bb:9f:2c:e8:80 - - type: physical - name: eth4 - mac_address: 98:bb:9f:2c:e8:80 - # specify how ifupdown should treat iface - # control is one of ['auto', 'hotplug', 'manual'] - # with manual meaning ifup/ifdown should not affect the iface - # useful for things like iscsi root + dhcp - - type: physical - name: eth5 - mac_address: 98:bb:9f:2c:e8:8a - subnets: - - type: dhcp - control: manual - # VLAN interface. - - type: vlan - name: eth0.101 - vlan_link: eth0 - vlan_id: 101 - mac_address: aa:bb:cc:dd:ee:11 - mtu: 1500 - subnets: - - type: static - # When 'mtu' matches device-level mtu, no warnings - mtu: 1500 - address: 192.168.0.2/24 - gateway: 192.168.0.1 - dns_nameservers: - - 192.168.0.10 - - 10.23.23.134 - dns_search: - - barley.maas - - sacchromyces.maas - - brettanomyces.maas - - type: static - address: 192.168.2.10/24 - # Bond. - - type: bond - name: bond0 - # if 'mac_address' is omitted, the MAC is taken from - # the first slave. - mac_address: aa:bb:cc:dd:ee:ff - bond_interfaces: - - eth1 - - eth2 - params: - bond-mode: active-backup - bond_miimon: 100 - bond-xmit-hash-policy: "layer3+4" - subnets: - - type: dhcp6 - # A Bond VLAN. - - type: vlan - name: bond0.200 - vlan_link: bond0 - vlan_id: 200 - subnets: - - type: dhcp4 - # An infiniband - - type: infiniband - name: ib0 - mac_address: >- - a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 - subnets: - - type: static - address: 192.168.200.7/24 - mtu: 9000 - # A bridge. - - type: bridge - name: br0 - bridge_interfaces: - - eth3 - - eth4 - ipv4_conf: - rp_filter: 1 - proxy_arp: 0 - forwarding: 1 - ipv6_conf: - autoconf: 1 - disable_ipv6: 1 - use_tempaddr: 1 - forwarding: 1 - # basically anything in /proc/sys/net/ipv6/conf/.../ - mac_address: bb:bb:bb:bb:bb:aa - params: - bridge_ageing: 250 - bridge_bridgeprio: 22 - bridge_fd: 1 - bridge_gcint: 2 - bridge_hello: 1 - bridge_maxage: 10 - bridge_maxwait: 0 - bridge_pathcost: - - eth3 50 - - eth4 75 - bridge_portprio: - - eth3 28 - - eth4 14 - bridge_stp: 'off' - bridge_waitport: - - 1 eth3 - - 2 eth4 - subnets: - - type: static - address: 192.168.14.2/24 - - type: static - address: 2001:1::1/64 # default to /64 - routes: - - gateway: 2001:4800:78ff:1b::1 - netmask: '::' - network: '::' - # A global nameserver. - - type: nameserver - address: 8.8.8.8 - search: barley.maas - # global nameservers and search in list form - - type: nameserver - address: - - 4.4.4.4 - - 8.8.4.4 - search: - - wark.maas - - foobar.maas - # A global route. - - type: route - destination: 10.0.0.0/8 - gateway: 11.0.0.1 - metric: 3 - """ - ).lstrip(), - }, - "bond": { - "yaml": textwrap.dedent( - """ - version: 1 - config: - - type: physical - name: bond0s0 - mac_address: aa:bb:cc:dd:e8:00 - - type: physical - name: bond0s1 - mac_address: aa:bb:cc:dd:e8:01 - - type: bond - name: bond0 - mac_address: aa:bb:cc:dd:e8:ff - mtu: 9000 - bond_interfaces: - - bond0s0 - - bond0s1 - params: - bond-mode: active-backup - bond_miimon: 100 - bond-xmit-hash-policy: "layer3+4" - bond-num-grat-arp: 5 - bond-downdelay: 10 - bond-updelay: 20 - bond-fail-over-mac: active - bond-primary: bond0s0 - bond-primary-reselect: always - subnets: - - type: static - address: 192.168.0.2/24 - gateway: 192.168.0.1 - routes: - - gateway: 192.168.0.3 - netmask: 255.255.255.0 - network: 10.1.3.0 - - type: static - address: 192.168.1.2/24 - - type: static - address: 2001:1::1/92 - routes: - - gateway: 2001:67c:1562::1 - network: "2001:67c::" - netmask: "ffff:ffff::" - - gateway: 3001:67c:15::1 - network: "3001:67c::" - netmask: "ffff:ffff::" - metric: 10000 - """ - ), - "expected_netplan": textwrap.dedent( - """ - network: - version: 2 - ethernets: - bond0s0: - match: - macaddress: aa:bb:cc:dd:e8:00 - set-name: bond0s0 - bond0s1: - match: - macaddress: aa:bb:cc:dd:e8:01 - set-name: bond0s1 - bonds: - bond0: - addresses: - - 192.168.0.2/24 - - 192.168.1.2/24 - - 2001:1::1/92 - interfaces: - - bond0s0 - - bond0s1 - macaddress: aa:bb:cc:dd:e8:ff - mtu: 9000 - parameters: - down-delay: 10 - fail-over-mac-policy: active - gratuitous-arp: 5 - mii-monitor-interval: 100 - mode: active-backup - primary: bond0s0 - primary-reselect-policy: always - transmit-hash-policy: layer3+4 - up-delay: 20 - routes: - - to: 0.0.0.0/0 - via: 192.168.0.1 - - to: 10.1.3.0/24 - via: 192.168.0.3 - - to: 2001:67c::/32 - via: 2001:67c:1562::1 - - metric: 10000 - to: 3001:67c::/32 - via: 3001:67c:15::1 - """ - ), - "expected_eni": textwrap.dedent( - """\ -auto lo -iface lo inet loopback - -auto bond0s0 -iface bond0s0 inet manual - bond-downdelay 10 - bond-fail-over-mac active - bond-master bond0 - bond-mode active-backup - bond-num-grat-arp 5 - bond-primary bond0s0 - bond-primary-reselect always - bond-updelay 20 - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - -auto bond0s1 -iface bond0s1 inet manual - bond-downdelay 10 - bond-fail-over-mac active - bond-master bond0 - bond-mode active-backup - bond-num-grat-arp 5 - bond-primary bond0s0 - bond-primary-reselect always - bond-updelay 20 - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - -auto bond0 -iface bond0 inet static - address 192.168.0.2/24 - gateway 192.168.0.1 - bond-downdelay 10 - bond-fail-over-mac active - bond-mode active-backup - bond-num-grat-arp 5 - bond-primary bond0s0 - bond-primary-reselect always - bond-slaves none - bond-updelay 20 - bond-xmit-hash-policy layer3+4 - bond_miimon 100 - hwaddress aa:bb:cc:dd:e8:ff - mtu 9000 - post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true - pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true - -# control-alias bond0 -iface bond0 inet static - address 192.168.1.2/24 - -# control-alias bond0 -iface bond0 inet6 static - address 2001:1::1/92 - post-up route add -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true - pre-down route del -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true - post-up route add -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ -|| true - pre-down route del -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \ -|| true - """ - ), - "yaml-v2": textwrap.dedent( - """ - version: 2 - ethernets: - eth0: - match: - driver: "virtio_net" - macaddress: aa:bb:cc:dd:e8:00 - vf0: - set-name: vf0 - match: - driver: "e1000" - macaddress: aa:bb:cc:dd:e8:01 - bonds: - bond0: - addresses: - - 192.168.0.2/24 - - 192.168.1.2/24 - - 2001:1::1/92 - gateway4: 192.168.0.1 - interfaces: - - eth0 - - vf0 - parameters: - down-delay: 10 - fail-over-mac-policy: active - gratuitous-arp: 5 - mii-monitor-interval: 100 - mode: active-backup - primary: bond0s0 - primary-reselect-policy: always - transmit-hash-policy: layer3+4 - up-delay: 20 - routes: - - to: 10.1.3.0/24 - via: 192.168.0.3 - - to: 2001:67c:1562:8007::1/64 - via: 2001:67c:1562:8007::aac:40b2 - - metric: 10000 - to: 3001:67c:15:8007::1/64 - via: 3001:67c:15:8007::aac:40b2 - """ - ), - "expected_netplan-v2": textwrap.dedent( - """ - network: - bonds: - bond0: - addresses: - - 192.168.0.2/24 - - 192.168.1.2/24 - - 2001:1::1/92 - gateway4: 192.168.0.1 - interfaces: - - eth0 - - vf0 - parameters: - down-delay: 10 - fail-over-mac-policy: active - gratuitous-arp: 5 - mii-monitor-interval: 100 - mode: active-backup - primary: bond0s0 - primary-reselect-policy: always - transmit-hash-policy: layer3+4 - up-delay: 20 - routes: - - to: 10.1.3.0/24 - via: 192.168.0.3 - - to: 2001:67c:1562:8007::1/64 - via: 2001:67c:1562:8007::aac:40b2 - - metric: 10000 - to: 3001:67c:15:8007::1/64 - via: 3001:67c:15:8007::aac:40b2 - ethernets: - eth0: - match: - driver: virtio_net - macaddress: aa:bb:cc:dd:e8:00 - vf0: - match: - driver: e1000 - macaddress: aa:bb:cc:dd:e8:01 - set-name: vf0 - version: 2 - """ - ), - "expected_sysconfig_opensuse": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" - BONDING_SLAVE_0=bond0s0 - BONDING_SLAVE_1=bond0s1 - BOOTPROTO=static - LLADDR=aa:bb:cc:dd:e8:ff - IPADDR=192.168.0.2 - IPADDR1=192.168.1.2 - IPADDR6=2001:1::1/92 - MTU=9000 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - STARTMODE=auto - """ - ), - "ifcfg-bond0s0": textwrap.dedent( - """\ - BOOTPROTO=none - LLADDR=aa:bb:cc:dd:e8:00 - STARTMODE=hotplug - """ - ), - "ifcfg-bond0s1": textwrap.dedent( - """\ - BOOTPROTO=none - LLADDR=aa:bb:cc:dd:e8:01 - STARTMODE=hotplug - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" - BONDING_SLAVE0=bond0s0 - BONDING_SLAVE1=bond0s1 - BOOTPROTO=none - DEFROUTE=yes - DEVICE=bond0 - GATEWAY=192.168.0.1 - MACADDR=aa:bb:cc:dd:e8:ff - IPADDR=192.168.0.2 - IPADDR1=192.168.1.2 - IPV6ADDR=2001:1::1/92 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - MTU=9000 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Bond - USERCTL=no - """ - ), - "ifcfg-bond0s0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=bond0s0 - HWADDR=aa:bb:cc:dd:e8:00 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no - """ - ), - "route6-bond0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - 2001:67c::/32 via 2001:67c:1562::1 dev bond0 - 3001:67c::/32 via 3001:67c:15::1 metric 10000 dev bond0 - """ - ), - "route-bond0": textwrap.dedent( - """\ - ADDRESS0=10.1.3.0 - GATEWAY0=192.168.0.3 - NETMASK0=255.255.255.0 - """ - ), - "ifcfg-bond0s1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=bond0s1 - HWADDR=aa:bb:cc:dd:e8:01 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-bond0s0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0s0 - uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e - autoconnect-priority=120 - type=ethernet - slave-type=bond - master=54317911-f840-516b-a10d-82cb4c1f075c - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=AA:BB:CC:DD:E8:00 - - """ - ), - "cloud-init-bond0s1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0s1 - uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0 - autoconnect-priority=120 - type=ethernet - slave-type=bond - master=54317911-f840-516b-a10d-82cb4c1f075c - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=AA:BB:CC:DD:E8:01 - - """ - ), - "cloud-init-bond0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init bond0 - uuid=54317911-f840-516b-a10d-82cb4c1f075c - autoconnect-priority=120 - type=bond - interface-name=bond0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [bond] - mode=active-backup - miimon=100 - xmit_hash_policy=layer3+4 - num_grat_arp=5 - downdelay=10 - updelay=20 - fail_over_mac=active - primary_reselect=always - primary=bond0s0 - - [ipv4] - method=manual - may-fail=false - address1=192.168.0.2/24 - gateway=192.168.0.1 - route1=10.1.3.0/24,192.168.0.3 - address2=192.168.1.2/24 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::1/92 - route1=2001:67c::/32,2001:67c:1562::1 - route2=3001:67c::/32,3001:67c:15::1 - - """ - ), - }, - }, - "vlan": { - "yaml": textwrap.dedent( - """ - version: 1 - config: - - type: physical - name: en0 - mac_address: aa:bb:cc:dd:e8:00 - - type: vlan - mtu: 2222 - name: en0.99 - vlan_link: en0 - vlan_id: 99 - subnets: - - type: static - address: '192.168.2.2/24' - - type: static - address: '192.168.1.2/24' - gateway: 192.168.1.1 - - type: static - address: 2001:1::bbbb/96 - routes: - - gateway: 2001:1::1 - netmask: '::' - network: '::' - """ - ), - "expected_sysconfig_opensuse": { - # TODO RJS: unknown proper BOOTPROTO setting ask Marius - "ifcfg-en0": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=aa:bb:cc:dd:e8:00 - STARTMODE=auto""" - ), - "ifcfg-en0.99": textwrap.dedent( - """\ - BOOTPROTO=static - IPADDR=192.168.2.2 - IPADDR1=192.168.1.2 - IPADDR6=2001:1::bbbb/96 - MTU=2222 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - STARTMODE=auto - ETHERDEVICE=en0 - VLAN_ID=99 - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-en0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=en0 - HWADDR=aa:bb:cc:dd:e8:00 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - "ifcfg-en0.99": textwrap.dedent( - """\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=en0.99 - GATEWAY=192.168.1.1 - IPADDR=192.168.2.2 - IPADDR1=192.168.1.2 - IPV6ADDR=2001:1::bbbb/96 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - IPV6_DEFAULTGW=2001:1::1 - MTU=2222 - NETMASK=255.255.255.0 - NETMASK1=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=en0 - USERCTL=no - VLAN=yes""" - ), - }, - "expected_network_manager": { - "cloud-init-en0.99.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init en0.99 - uuid=f594e2ed-f107-51df-b225-1dc530a5356b - autoconnect-priority=120 - type=vlan - interface-name=en0.99 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [vlan] - id=99 - parent=e0ca478b-8d84-52ab-8fae-628482c629b5 - - [ipv4] - method=manual - may-fail=false - address1=192.168.2.2/24 - address2=192.168.1.2/24 - gateway=192.168.1.1 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::bbbb/96 - route1=::/0,2001:1::1 - - """ - ), - "cloud-init-en0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init en0 - uuid=e0ca478b-8d84-52ab-8fae-628482c629b5 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=AA:BB:CC:DD:E8:00 - - """ - ), - }, - }, - "bridge": { - "yaml": textwrap.dedent( - """ - version: 1 - config: - - type: physical - name: eth0 - mac_address: '52:54:00:12:34:00' - subnets: - - type: static - address: 2001:1::100/96 - - type: physical - name: eth1 - mac_address: '52:54:00:12:34:01' - subnets: - - type: static - address: 2001:1::101/96 - - type: bridge - name: br0 - bridge_interfaces: - - eth0 - - eth1 - params: - bridge_stp: 0 - bridge_bridgeprio: 22 - subnets: - - type: static - address: 192.168.2.2/24""" - ), - "expected_sysconfig_opensuse": { - "ifcfg-br0": textwrap.dedent( - """\ - BOOTPROTO=static - IPADDR=192.168.2.2 - NETMASK=255.255.255.0 - STARTMODE=auto - BRIDGE_STP=off - BRIDGE_PRIORITY=22 - BRIDGE_PORTS='eth0 eth1' - """ - ), - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=static - BRIDGE=yes - LLADDR=52:54:00:12:34:00 - IPADDR6=2001:1::100/96 - STARTMODE=auto - """ - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - BRIDGE=yes - LLADDR=52:54:00:12:34:01 - IPADDR6=2001:1::101/96 - STARTMODE=auto - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-br0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=br0 - IPADDR=192.168.2.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - PRIO=22 - STP=no - TYPE=Bridge - USERCTL=no - """ - ), - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=none - BRIDGE=br0 - DEVICE=eth0 - HWADDR=52:54:00:12:34:00 - IPV6ADDR=2001:1::100/96 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - BRIDGE=br0 - DEVICE=eth1 - HWADDR=52:54:00:12:34:01 - IPV6ADDR=2001:1::101/96 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-br0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init br0 - uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - autoconnect-priority=120 - type=bridge - interface-name=br0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [bridge] - stp=false - priority=22 - - [ipv4] - method=manual - may-fail=false - address1=192.168.2.2/24 - - """ - ), - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - slave-type=bridge - master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=52:54:00:12:34:00 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::100/96 - - """ - ), - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - slave-type=bridge - master=dee46ce4-af7a-5e7c-aa08-b25533ae9213 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=52:54:00:12:34:01 - - [ipv6] - method=manual - may-fail=false - address1=2001:1::101/96 - - """ - ), - }, - }, - "manual": { - "yaml": textwrap.dedent( - """ - version: 1 - config: - - type: physical - name: eth0 - mac_address: '52:54:00:12:34:00' - subnets: - - type: static - address: 192.168.1.2/24 - control: manual - - type: physical - name: eth1 - mtu: 1480 - mac_address: 52:54:00:12:34:aa - subnets: - - type: manual - - type: physical - name: eth2 - mac_address: 52:54:00:12:34:ff - subnets: - - type: manual - control: manual - """ - ), - "expected_eni": textwrap.dedent( - """\ - auto lo - iface lo inet loopback - - # control-manual eth0 - iface eth0 inet static - address 192.168.1.2/24 - - auto eth1 - iface eth1 inet manual - mtu 1480 - - # control-manual eth2 - iface eth2 inet manual - """ - ), - "expected_netplan": textwrap.dedent( - """\ - - network: - version: 2 - ethernets: - eth0: - addresses: - - 192.168.1.2/24 - match: - macaddress: '52:54:00:12:34:00' - set-name: eth0 - eth1: - match: - macaddress: 52:54:00:12:34:aa - mtu: 1480 - set-name: eth1 - eth2: - match: - macaddress: 52:54:00:12:34:ff - set-name: eth2 - """ - ), - "expected_sysconfig_opensuse": { - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=52:54:00:12:34:00 - IPADDR=192.168.1.2 - NETMASK=255.255.255.0 - STARTMODE=manual - """ - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=52:54:00:12:34:aa - MTU=1480 - STARTMODE=auto - """ - ), - "ifcfg-eth2": textwrap.dedent( - """\ - BOOTPROTO=static - LLADDR=52:54:00:12:34:ff - STARTMODE=manual - """ - ), - }, - "expected_sysconfig_rhel": { - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=52:54:00:12:34:00 - IPADDR=192.168.1.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=no - TYPE=Ethernet - USERCTL=no - """ - ), - "ifcfg-eth1": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth1 - HWADDR=52:54:00:12:34:aa - MTU=1480 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - "ifcfg-eth2": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth2 - HWADDR=52:54:00:12:34:ff - NM_CONTROLLED=no - ONBOOT=no - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=52:54:00:12:34:00 - - [ipv4] - method=manual - may-fail=false - address1=192.168.1.2/24 - - """ - ), - "cloud-init-eth1.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth1 - uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mtu=1480 - mac-address=52:54:00:12:34:AA - - [ipv4] - method=auto - may-fail=true - - """ - ), - "cloud-init-eth2.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth2 - uuid=5559a242-3421-5fdd-896e-9cb8313d5804 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=52:54:00:12:34:FF - - [ipv4] - method=auto - may-fail=true - - """ - ), - }, - }, - "v1-dns": { - "expected_networkd": textwrap.dedent( - """\ - [Address] - Address=192.168.1.20/16 - - [Match] - MACAddress=11:22:33:44:55:66 - Name=interface0 - - [Network] - DHCP=no - DNS=1.1.1.1 3.3.3.3 - Domains=aaaa cccc - - [Route] - Gateway=192.168.1.1 - """ - ), - "expected_eni": textwrap.dedent( - """\ - # This file is generated from information provided by the datasource. Changes - # to it will not persist across an instance reboot. To disable cloud-init's - # network configuration capabilities, write a file - # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: - # network: {config: disabled} - auto lo - iface lo inet loopback - dns-nameservers 2.2.2.2 - dns-search bbbb - - iface lo inet6 loopback - dns-nameservers FEDC::1 - dns-search bbbb - - auto interface0 - iface interface0 inet static - address 192.168.1.20/16 - dns-nameservers 1.1.1.1 3.3.3.3 - dns-search aaaa cccc - gateway 192.168.1.1 - """ # noqa: E501 - ), - "expected_netplan": textwrap.dedent( - """\ - # This file is generated from information provided by the datasource. Changes - # to it will not persist across an instance reboot. To disable cloud-init's - # network configuration capabilities, write a file - # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: - # network: {config: disabled} - network: - version: 2 - ethernets: - interface0: - addresses: - - 192.168.1.20/16 - match: - macaddress: 11:22:33:44:55:66 - nameservers: - addresses: - - 1.1.1.1 - - 3.3.3.3 - search: - - aaaa - - cccc - routes: - - to: default - via: 192.168.1.1 - set-name: interface0 - """ # noqa: E501 - ), - "expected_sysconfig_opensuse": { - "ifcfg-interface0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - BOOTPROTO=static - IPADDR=192.168.1.20 - LLADDR=11:22:33:44:55:66 - NETMASK=255.255.0.0 - STARTMODE=auto - """ - ) - }, - "expected_sysconfig_rhel": { - "ifcfg-eth0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - BOOTPROTO=none - DEFROUTE=yes - DEVICE=interface0 - DNS1=1.1.1.1 - DNS2=3.3.3.3 - DOMAIN=aaaa cccc - GATEWAY=192.168.1.1 - HWADDR=11:22:33:44:55:66 - IPADDR=192.168.1.20 - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ), - }, - "expected_network_manager": { - "cloud-init-interface0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init interface0 - uuid=8b6862ed-dbd6-5830-93f7-a91451c13828 - autoconnect-priority=120 - type=ethernet - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mac-address=11:22:33:44:55:66 - - [ipv4] - method=manual - may-fail=false - address1=192.168.1.20/16 - gateway=192.168.1.1 - dns=3.3.3.3;1.1.1.1; - dns-search=cccc;aaaa; - - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 1 - config: - - type: physical - name: interface0 - mac_address: "11:22:33:44:55:66" - subnets: - - type: static - address: 192.168.1.20/16 - gateway: 192.168.1.1 - dns_nameservers: - - 3.3.3.3 - dns_search: - - cccc - - type: nameserver - interface: interface0 - address: - - 1.1.1.1 - search: - - aaaa - - type: nameserver - address: - - 2.2.2.2 - - FEDC::1 - search: - - bbbb - """ - ), - }, - "v2-dev-name-via-mac-lookup": { - "expected_sysconfig_rhel": { - "ifcfg-eth0": textwrap.dedent( - """\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=cf:d6:af:48:e8:80 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no""" - ), - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - nic0: - match: - macaddress: 'cf:d6:af:48:e8:80' - """ - ), - }, - "v2-mixed-routes": { - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - mtu=500 - - [ipv4] - method=auto - may-fail=true - route1=169.254.42.42/32,62.210.0.1 - route1_options=mtu=400 - route2=169.254.42.43/32,62.210.0.2 - route2_options=mtu=200 - address1=192.168.1.20/16 - dns=8.8.8.8; - dns-search=lab;home; - - [ipv6] - route1=::/0,fe80::dc00:ff:fe20:186 - route1_options=mtu=300 - route2=fe80::dc00:ff:fe20:188/64,fe80::dc00:ff:fe20:187 - route2_options=mtu=100 - method=auto - may-fail=true - address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 - dns=FEDC::1; - dns-search=lab;home; - - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - dhcp4: true - dhcp6: true - mtu: 500 - nameservers: - search: [lab, home] - addresses: [8.8.8.8, "FEDC::1"] - routes: - - to: 169.254.42.42/32 - via: 62.210.0.1 - mtu: 400 - - via: fe80::dc00:ff:fe20:186 - to: ::/0 - mtu: 300 - - to: 169.254.42.43/32 - via: 62.210.0.2 - mtu: 200 - - via: fe80::dc00:ff:fe20:187 - to: fe80::dc00:ff:fe20:188 - mtu: 100 - addresses: - - 192.168.1.20/16 - - 2001:bc8:1210:232:dc00:ff:fe20:185/64 - """ - ), - }, - "v2-dns": { - "expected_networkd": textwrap.dedent( - """\ - [Address] - Address=192.168.1.20/16 - - [Address] - Address=2001:bc8:1210:232:dc00:ff:fe20:185/64 - - [Match] - Name=eth0 - - [Network] - DHCP=no - DNS=8.8.8.8 FEDC::1 - Domains=lab home - """ - ), - "expected_eni": textwrap.dedent( - """\ - # This file is generated from information provided by the datasource. Changes - # to it will not persist across an instance reboot. To disable cloud-init's - # network configuration capabilities, write a file - # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: - # network: {config: disabled} - auto lo - iface lo inet loopback - - auto eth0 - iface eth0 inet static - address 192.168.1.20/16 - dns-nameservers 8.8.8.8 - dns-search lab home - - # control-alias eth0 - iface eth0 inet6 static - address 2001:bc8:1210:232:dc00:ff:fe20:185/64 - dns-nameservers FEDC::1 - dns-search lab home - """ # noqa: E501 - ), - "expected_sysconfig_opensuse": { - "ifcfg-eth0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - BOOTPROTO=static - IPADDR=192.168.1.20 - IPADDR6=2001:bc8:1210:232:dc00:ff:fe20:185/64 - NETMASK=255.255.0.0 - STARTMODE=auto - """ - ) - }, - "expected_sysconfig_rhel": { - "ifcfg-eth0": textwrap.dedent( - """\ - # Created by cloud-init automatically, do not edit. - # - BOOTPROTO=none - DEVICE=eth0 - DNS1=8.8.8.8 - DNS2=FEDC::1 - DOMAIN="lab home" - IPADDR=192.168.1.20 - IPV6ADDR=2001:bc8:1210:232:dc00:ff:fe20:185/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """ - ) - }, - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv4] - method=manual - may-fail=false - address1=192.168.1.20/16 - dns=8.8.8.8; - dns-search=lab;home; - - [ipv6] - method=manual - may-fail=false - address1=2001:bc8:1210:232:dc00:ff:fe20:185/64 - dns=FEDC::1; - dns-search=lab;home; - - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - nameservers: - search: [lab, home] - addresses: [8.8.8.8, "FEDC::1"] - addresses: - - 192.168.1.20/16 - - 2001:bc8:1210:232:dc00:ff:fe20:185/64 - """ - ), - }, - "v2-dns-no-if-ips": { - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - [ipv4] - method=auto - may-fail=true - dns=8.8.8.8; - dns-search=lab;home; - - [ipv6] - method=auto - may-fail=true - dns=FEDC::1; - dns-search=lab;home; - - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - dhcp4: true - dhcp6: true - nameservers: - search: [lab, home] - addresses: [8.8.8.8, "FEDC::1"] - """ - ), - }, - "v2-dns-no-dhcp": { - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. - - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 - - [user] - org.freedesktop.NetworkManager.origin=cloud-init - - [ethernet] - - """ - ) + "in_macs": { + "fa:16:3e:ed:9a:59": "eth0", }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - nameservers: - search: [lab, home] - addresses: [8.8.8.8, "FEDC::1"] - """ - ), + "out_sysconfig_opensuse": [ + ( + "etc/sysconfig/network/ifcfg-eth0", + """ +# Created by cloud-init automatically, do not edit. +# +BOOTPROTO=static +IPADDR=172.19.1.34 +IPADDR6=2001:DB8::10/64 +IPADDR6_1=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 +LLADDR=fa:16:3e:ed:9a:59 +NETMASK=255.255.252.0 +STARTMODE=auto +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ +; Created by cloud-init automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ +# Created by cloud-init automatically, do not edit. +# +[main] +dns = none +""".lstrip(), + ), + ( + "etc/udev/rules.d/85-persistent-net-cloud-init.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], + "out_sysconfig_rhel": [ + ( + "etc/sysconfig/network-scripts/ifcfg-eth0", + """ +# Created by cloud-init automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=172.19.3.254 +HWADDR=fa:16:3e:ed:9a:59 +IPADDR=172.19.1.34 +IPV6ADDR=2001:DB8::10/64 +IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" +IPV6INIT=yes +IPV6_AUTOCONF=no +IPV6_DEFAULTGW=2001:DB8::1 +IPV6_FORCE_ACCEPT_RA=no +NETMASK=255.255.252.0 +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ +; Created by cloud-init automatically, do not edit. +; +nameserver 172.19.0.12 +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ +# Created by cloud-init automatically, do not edit. +# +[main] +dns = none +""".lstrip(), + ), + ( + "etc/udev/rules.d/70-persistent-net.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], }, - "v2-route-no-gateway": { - "expected_network_manager": { - "cloud-init-eth0.nmconnection": textwrap.dedent( - """\ - # Generated by cloud-init. Changes will be lost. +] - [connection] - id=cloud-init eth0 - uuid=1dd9a779-d327-56e1-8454-c65e2556c12c - autoconnect-priority=120 - type=ethernet - interface-name=eth0 +EXAMPLE_ENI = """ +auto lo +iface lo inet loopback + dns-nameservers 10.0.0.1 + dns-search foo.com - [user] - org.freedesktop.NetworkManager.origin=cloud-init +auto eth0 +iface eth0 inet static + address 1.2.3.12 + netmask 255.255.255.248 + broadcast 1.2.3.15 + gateway 1.2.3.9 + dns-nameservers 69.9.160.191 69.9.191.4 +auto eth1 +iface eth1 inet static + address 10.248.2.4 + netmask 255.255.255.248 + broadcast 10.248.2.7 +""" - [ethernet] +RENDERED_ENI = """ +auto lo +iface lo inet loopback + dns-nameservers 10.0.0.1 + dns-search foo.com - [ipv4] - method=auto - may-fail=false - route1=0.0.0.0/0 +auto eth0 +iface eth0 inet static + address 1.2.3.12/29 + broadcast 1.2.3.15 + dns-nameservers 69.9.160.191 69.9.191.4 + gateway 1.2.3.9 - """ - ) - }, - "yaml": textwrap.dedent( - """\ - version: 2 - ethernets: - eth0: - dhcp4: true - routes: - - to: "0.0.0.0/0" - """ - ), - }, -} +auto eth1 +iface eth1 inet static + address 10.248.2.4/29 + broadcast 10.248.2.7 +""".lstrip() CONFIG_V1_EXPLICIT_LOOPBACK = { @@ -4773,14 +1326,14 @@ def sys_dev_path(devname, path=""): mock_sys_dev_path.side_effect = sys_dev_path -class TestGenerateFallbackConfig(CiTestCase): - def setUp(self): - super(TestGenerateFallbackConfig, self).setUp() - self.add_patch( +class TestGenerateFallbackConfig: + @pytest.fixture(autouse=True) + def setup(self, mocker, tmpdir_factory): + mocker.patch( "cloudinit.util.get_cmdline", - "m_get_cmdline", return_value="root=/dev/sda1", ) + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -4829,14 +1382,14 @@ def test_device_driver_v2( "dhcp6": True, "set-name": "eth0", "match": { - "name": "eth0", + "macaddress": "00:11:22:33:44:55", "driver": "hv_netvsc", }, } }, "version": 2, } - self.assertEqual(expected, network_cfg) + assert expected == network_cfg @mock.patch("cloudinit.net.openvswitch_is_installed", return_value=False) @mock.patch("cloudinit.net.sys_dev_path") @@ -4901,7 +1454,7 @@ def test_device_driver( ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + assert os.path.exists(os.path.join(render_dir, "interfaces")) with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() print(contents) @@ -4915,13 +1468,20 @@ def test_device_driver( # control-alias eth0 iface eth0 inet6 dhcp """ - self.assertEqual(expected.lstrip(), contents.lstrip()) + assert expected.lstrip() == contents.lstrip() - self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules"))) + assert os.path.exists(os.path.join(render_dir, "netrules")) with open(os.path.join(render_dir, "netrules")) as fh: contents = fh.read() print(contents) - self.assertEqual("", contents.lstrip()) + expected_rule = [ + 'SUBSYSTEM=="net"', + 'ACTION=="add"', + 'DRIVERS=="hv_netvsc"', + 'ATTR{address}=="00:11:22:33:44:55"', + 'NAME="eth0"', + ] + assert ", ".join(expected_rule) + "\n" == contents.lstrip() @mock.patch("cloudinit.net.openvswitch_is_installed", return_value=False) @mock.patch("cloudinit.net.sys_dev_path") @@ -4986,7 +1546,7 @@ def test_hv_netvsc_vf_filter( ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + assert os.path.exists(os.path.join(render_dir, "interfaces")) with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() print(contents) @@ -5000,13 +1560,20 @@ def test_hv_netvsc_vf_filter( # control-alias eth1 iface eth1 inet6 dhcp """ - self.assertEqual(expected.lstrip(), contents.lstrip()) + assert expected.lstrip() == contents.lstrip() - self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules"))) + assert os.path.exists(os.path.join(render_dir, "netrules")) with open(os.path.join(render_dir, "netrules")) as fh: contents = fh.read() print(contents) - self.assertEqual("", contents.lstrip()) + expected_rule = [ + 'SUBSYSTEM=="net"', + 'ACTION=="add"', + 'DRIVERS=="hv_netvsc"', + 'ATTR{address}=="00:11:22:33:44:55"', + 'NAME="eth1"', + ] + assert ", ".join(expected_rule) + "\n" == contents.lstrip() @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.udevadm_settle") @@ -5055,7 +1622,7 @@ def test_unstable_names( dev_attrs=devices, ) net.generate_fallback_config(config_driver=True) - self.assertEqual(1, mock_settle.call_count) + assert 1 == mock_settle.call_count @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.udevadm_settle") @@ -5104,21 +1671,23 @@ def test_unstable_names_disabled( dev_attrs=devices, ) net.generate_fallback_config(config_driver=True) - self.assertEqual(0, mock_settle.call_count) + assert 0 == mock_settle.call_count @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestRhelSysConfigRendering(CiTestCase): - with_logs = True - +class TestRhelSysConfigRendering: scripts_dir = "/etc/sysconfig/network-scripts" header = "# Created by cloud-init automatically, do not edit.\n#\n" expected_name = "expected_sysconfig_rhel" + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def _get_renderer(self): distro_cls = distros.fetch("rhel") return sysconfig.Renderer( @@ -5150,7 +1719,6 @@ def _try_load(f): # route6- * files aren't shell content, but iproute2 params return f - orig_maxdiff = self.maxDiff expected_d = dict( (os.path.join(self.scripts_dir, k), _try_load(v)) for k, v in expected.items() @@ -5162,11 +1730,7 @@ def _try_load(f): for k, v in found.items() if k.startswith(self.scripts_dir) ) - try: - self.maxDiff = None - self.assertEqual(expected_d, scripts_found) - finally: - self.maxDiff = orig_maxdiff + assert expected_d == scripts_found def _assert_headers(self, found): missing = [ @@ -5216,13 +1780,13 @@ def test_default_generation( BOOTPROTO=dhcp DEVICE=eth1000 DHCPV6C=yes +HWADDR=07-1c-c6-75-a4-be IPV6INIT=yes -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """.lstrip() - self.assertEqual(expected_content, content) + assert expected_content == content def test_multiple_ipv4_default_gateways(self): """ValueError is raised when duplicate ipv4 gateways exist.""" @@ -5267,9 +1831,9 @@ def test_multiple_ipv4_default_gateways(self): network_cfg, skip_broken=False ) renderer = self._get_renderer() - with self.assertRaises(ValueError): + with pytest.raises(ValueError): renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) + assert [] == os.listdir(render_dir) def test_multiple_ipv6_default_gateways(self): """ValueError is raised when duplicate ipv6 gateways exist.""" @@ -5314,9 +1878,9 @@ def test_multiple_ipv6_default_gateways(self): network_cfg, skip_broken=False ) renderer = self._get_renderer() - with self.assertRaises(ValueError): + with pytest.raises(ValueError): renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) + assert [] == os.listdir(render_dir) def test_invalid_network_mask_ipv6(self): net_json = { @@ -5350,7 +1914,7 @@ def test_invalid_network_mask_ipv6(self): } macs = {"fa:16:3e:ed:9a:59": "eth0"} network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): network_state.parse_net_config_data(network_cfg, skip_broken=False) def test_invalid_network_mask_ipv4(self): @@ -5385,7 +1949,7 @@ def test_invalid_network_mask_ipv4(self): } macs = {"fa:16:3e:ed:9a:59": "eth0"} network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): network_state.parse_net_config_data(network_cfg, skip_broken=False) def test_openstack_rendering_samples(self): @@ -5408,17 +1972,17 @@ def test_openstack_rendering_samples(self): "out_sysconfig_rhel", [] ): with open(os.path.join(render_dir, fn)) as fh: - self.assertEqual(expected_content, fh.read()) + assert expected_content == fh.read() def test_network_config_v1_samples(self): ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network-scripts/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected = """\ # Created by cloud-init automatically, do not edit. # @@ -5429,26 +1993,25 @@ def test_network_config_v1_samples(self): HWADDR=52:54:00:12:34:00 IPADDR=10.0.2.15 NETMASK=255.255.255.0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected, found[nspath + "ifcfg-interface0"]) + assert expected == found[nspath + "ifcfg-interface0"] # The configuration has no nameserver information make sure we # do not write the resolv.conf file respath = "/etc/resolv.conf" - self.assertNotIn(respath, found.keys()) + assert respath not in found.keys() def test_network_config_v1_multi_iface_samples(self): ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network-scripts/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected_i1 = """\ # Created by cloud-init automatically, do not edit. # @@ -5460,12 +2023,11 @@ def test_network_config_v1_multi_iface_samples(self): IPADDR=51.68.89.122 MTU=1500 NETMASK=255.255.240.0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected_i1, found[nspath + "ifcfg-eth0"]) + assert expected_i1 == found[nspath + "ifcfg-eth0"] expected_i2 = """\ # Created by cloud-init automatically, do not edit. # @@ -5474,16 +2036,15 @@ def test_network_config_v1_multi_iface_samples(self): DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=fa:16:3e:b1:ca:29 MTU=9000 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected_i2, found[nspath + "ifcfg-eth1"]) + assert expected_i2 == found[nspath + "ifcfg-eth1"] def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) # write an etc/resolv.conf and expect it to not be modified resolvconf = os.path.join(render_dir, "etc/resolv.conf") @@ -5493,107 +2054,109 @@ def test_config_with_explicit_loopback(self): renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network-scripts/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected = """\ # Created by cloud-init automatically, do not edit. # BOOTPROTO=dhcp DEVICE=eth0 -NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected, found[nspath + "ifcfg-eth0"]) + assert expected == found[nspath + "ifcfg-eth0"] # a dhcp only config should not modify resolv.conf - self.assertEqual(resolvconf_content, found["/etc/resolv.conf"]) - - def test_bond_config(self): - entry = NETWORK_CONFIGS["bond"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_vlan_config(self): - entry = NETWORK_CONFIGS["vlan"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_bridge_config(self): - entry = NETWORK_CONFIGS["bridge"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_manual_config(self): - entry = NETWORK_CONFIGS["manual"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_all_config(self): - entry = NETWORK_CONFIGS["all"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - self.assertNotIn( - "WARNING: Network config: ignoring eth0.101 device-level mtu", - self.logs.getvalue(), - ) - - def test_small_config_v1(self): - entry = NETWORK_CONFIGS["small_v1"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_small_config_v2(self): - entry = NETWORK_CONFIGS["small_v2"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_v4_and_v6_static_config(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - expected_msg = ( - "WARNING: Network config: ignoring iface0 device-level mtu:8999" - " because ipv4 subnet-level mtu:9000 provided." - ) - self.assertIn(expected_msg, self.logs.getvalue()) - - def test_dhcpv6_only_config(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + assert resolvconf_content == found["/etc/resolv.conf"] - def test_dhcpv6_accept_ra_config_v1(self): - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + @pytest.mark.parametrize( + "expected_name,yaml_version", + [ + ("bond_v1", "yaml"), + pytest.param( + "bond_v2", + "yaml", + marks=pytest.mark.xfail( + reason="Bond MAC address not rendered" + ), + ), + ("vlan_v1", "yaml"), + ("vlan_v2", "yaml"), + ("bridge", "yaml_v1"), + ("bridge", "yaml_v2"), + ("manual", "yaml"), + ("small_v1", "yaml"), + ("small_v2", "yaml"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("dhcpv6_accept_ra", "yaml_v1"), + ("dhcpv6_accept_ra", "yaml_v2"), + ("dhcpv6_reject_ra", "yaml_v1"), + ("dhcpv6_reject_ra", "yaml_v2"), + ("static6", "yaml_v1"), + ("static6", "yaml_v2"), + ("dhcpv6_stateless", "yaml"), + ("dhcpv6_stateful", "yaml"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + pytest.param( + "v1-dns", + "yaml", + marks=pytest.mark.xfail( + reason="sysconfig should render interface-level DNS" + ), + ), + ("v2-dns", "yaml"), + pytest.param( + "large_v2", + "yaml", + marks=pytest.mark.xfail( + reason="Bond and Bridge MAC address not rendered" + ), + ), + ], + ) + def test_config(self, expected_name, yaml_version): + entry = NETWORK_CONFIGS[expected_name] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) + network_config=yaml.safe_load(entry[yaml_version]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) - def test_dhcpv6_accept_ra_config_v2(self): - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + def test_large_v1_config(self, caplog): + entry = NETWORK_CONFIGS["large_v1"] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry["yaml"]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + assert ( + "Network config: ignoring eth0.101 device-level mtu" + not in caplog.text + ) - def test_dhcpv6_reject_ra_config_v1(self): - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + @pytest.mark.parametrize( + "yaml_file,network_config", + [ + ("yaml_v1", "v1_ipv4_and_ipv6_static"), + ("yaml_v2", "v2_ipv4_and_ipv6_static"), + ], + ) + def test_ipv4_and_ipv6_static_config( + self, yaml_file, network_config, caplog + ): + entry = NETWORK_CONFIGS[network_config] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) + network_config=yaml.safe_load(entry[yaml_file]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + expected_msg = ( + "Network config: ignoring iface0 device-level mtu:8999" + " because ipv4 subnet-level mtu:9000 provided." + ) + if yaml_file == "yaml_v1": + assert expected_msg in caplog.text def test_stattic6_from_json(self): net_json = { @@ -5634,70 +2197,28 @@ def test_stattic6_from_json(self): ], "links": [ { - "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, - "type": "bridge", - "id": "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", - }, - ], - } - macs = {"fa:16:3e:ed:9a:59": "eth0"} - render_dir = self.tmp_dir() - network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - ns = network_state.parse_net_config_data( - network_cfg, skip_broken=False - ) - renderer = self._get_renderer() - with self.assertRaises(ValueError): - renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) - - def test_static6_from_yaml(self): - entry = NETWORK_CONFIGS["static6"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_dhcpv6_reject_ra_config_v2(self): - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] - found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_dhcpv6_stateless_config(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_dhcpv6_stateful_config(self): - entry = NETWORK_CONFIGS["dhcpv6_stateful"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] - found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] - found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", + }, + ], + } + macs = {"fa:16:3e:ed:9a:59": "eth0"} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False ) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + renderer = self._get_renderer() + with pytest.raises(ValueError): + renderer.render_network_state(ns, target=render_dir) + assert [] == os.listdir(render_dir) def test_netplan_dhcp_false_disable_dhcp_in_state(self): """netplan config with dhcp[46]: False should not add dhcp in state""" - net_config = yaml.load(NETPLAN_DHCP_FALSE) + net_config = yaml.safe_load(NETPLAN_DHCP_FALSE) ns = network_state.parse_net_config_data(net_config, skip_broken=False) dhcp_found = [ @@ -5707,21 +2228,7 @@ def test_netplan_dhcp_false_disable_dhcp_in_state(self): if "dhcp" in snet["type"] ] - self.assertEqual([], dhcp_found) - - @pytest.mark.xfail(reason="sysconfig should render interface-level DNS") - def test_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - # TODO: verify resolv.conf - - def test_v2_dns(self): - entry = NETWORK_CONFIGS["v2-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + assert [] == dhcp_found def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): """netplan cfg with dhcp[46]: False should not have bootproto=dhcp""" @@ -5746,7 +2253,6 @@ def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): IPV6_FORCE_ACCEPT_RA=no IPV6_DEFAULTGW=2001:db8::1 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -5755,7 +2261,9 @@ def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): }, } - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + found = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) self._compare_files_to_expected(entry["expected_sysconfig"], found) self._assert_headers(found) @@ -5778,7 +2286,6 @@ def test_from_v2_vlan_mtu(self): """\ BOOTPROTO=none DEVICE=eno1 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -5791,7 +2298,6 @@ def test_from_v2_vlan_mtu(self): IPADDR=192.6.1.9 MTU=1495 NETMASK=255.255.255.0 - NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eno1 USERCTL=no @@ -5827,7 +2333,6 @@ def test_from_v2_bond_mtu(self): IPADDR=10.101.8.65 MTU=1334 NETMASK=255.255.255.192 - NM_CONTROLLED=no ONBOOT=yes TYPE=Bond USERCTL=no @@ -5839,7 +2344,6 @@ def test_from_v2_bond_mtu(self): BOOTPROTO=none DEVICE=enp0s0 MASTER=bond0 - NM_CONTROLLED=no ONBOOT=yes SLAVE=yes TYPE=Bond @@ -5852,7 +2356,6 @@ def test_from_v2_bond_mtu(self): BOOTPROTO=none DEVICE=enp0s1 MASTER=bond0 - NM_CONTROLLED=no ONBOOT=yes SLAVE=yes TYPE=Bond @@ -5883,7 +2386,6 @@ def test_from_v2_route_metric(self): DEVICE=eno1 HWADDR=07-1c-c6-75-a4-be METRIC=100 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -5974,7 +2476,6 @@ def test_from_v2_routes(self): IPV6_FORCE_ACCEPT_RA=no MTU=1400 NETMASK=255.255.248.0 - NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no @@ -6036,9 +2537,10 @@ def test_iface_name_from_device_with_matching_mac_address( mock_sys_dev_path, dev_attrs=devices, ) - entry = NETWORK_CONFIGS["v2-dev-name-via-mac-lookup"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + found = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) @@ -6047,14 +2549,17 @@ def test_iface_name_from_device_with_matching_mac_address( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestOpenSuseSysConfigRendering(CiTestCase): - with_logs = True +class TestOpenSuseSysConfigRendering: scripts_dir = "/etc/sysconfig/network" header = "# Created by cloud-init automatically, do not edit.\n#\n" expected_name = "expected_sysconfig_opensuse" + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def _get_renderer(self): distro_cls = distros.fetch("opensuse") return sysconfig.Renderer( @@ -6077,7 +2582,6 @@ def _render_and_read(self, network_config=None, state=None, dir=None): return dir2dict(dir) def _compare_files_to_expected(self, expected, found): - orig_maxdiff = self.maxDiff expected_d = dict( (os.path.join(self.scripts_dir, k), util.load_shell_content(v)) for k, v in expected.items() @@ -6089,11 +2593,7 @@ def _compare_files_to_expected(self, expected, found): for k, v in found.items() if k.startswith(self.scripts_dir) ) - try: - self.maxDiff = None - self.assertEqual(expected_d, scripts_found) - finally: - self.maxDiff = orig_maxdiff + assert expected_d == scripts_found def _assert_headers(self, found): missing = [ @@ -6142,87 +2642,13 @@ def test_default_generation( # BOOTPROTO=dhcp DHCLIENT6_MODE=managed +LLADDR=07-1c-c6-75-a4-be STARTMODE=auto """.lstrip() - self.assertEqual(expected_content, content) - - # TODO(rjschwei): re-enable test once route writing is implemented - # for SUSE distros - # def test_multiple_ipv4_default_gateways(self): - # """ValueError is raised when duplicate ipv4 gateways exist.""" - # net_json = { - # "services": [{"type": "dns", "address": "172.19.0.12"}], - # "networks": [{ - # "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", - # "type": "ipv4", "netmask": "255.255.252.0", - # "link": "tap1a81968a-79", - # "routes": [{ - # "netmask": "0.0.0.0", - # "network": "0.0.0.0", - # "gateway": "172.19.3.254", - # }, { - # "netmask": "0.0.0.0", # A second default gateway - # "network": "0.0.0.0", - # "gateway": "172.20.3.254", - # }], - # "ip_address": "172.19.1.34", "id": "network0" - # }], - # "links": [ - # { - # "ethernet_mac_address": "fa:16:3e:ed:9a:59", - # "mtu": None, "type": "bridge", "id": - # "tap1a81968a-79", - # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" - # }, - # ], - # } - # macs = {'fa:16:3e:ed:9a:59': 'eth0'} - # render_dir = self.tmp_dir() - # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501 - # ns = network_state.parse_net_config_data(network_cfg, - # skip_broken=False) - # renderer = self._get_renderer() - # with self.assertRaises(ValueError): - # renderer.render_network_state(ns, target=render_dir) - # self.assertEqual([], os.listdir(render_dir)) - # - # def test_multiple_ipv6_default_gateways(self): - # """ValueError is raised when duplicate ipv6 gateways exist.""" - # net_json = { - # "services": [{"type": "dns", "address": "172.19.0.12"}], - # "networks": [{ - # "network_id": "public-ipv6", - # "type": "ipv6", "netmask": "", - # "link": "tap1a81968a-79", - # "routes": [{ - # "gateway": "2001:DB8::1", - # "netmask": "::", - # "network": "::" - # }, { - # "gateway": "2001:DB9::1", - # "netmask": "::", - # "network": "::" - # }], - # "ip_address": "2001:DB8::10", "id": "network1" - # }], - # "links": [ - # { - # "ethernet_mac_address": "fa:16:3e:ed:9a:59", - # "mtu": None, "type": "bridge", "id": - # "tap1a81968a-79", - # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" - # }, - # ], - # } - # macs = {'fa:16:3e:ed:9a:59': 'eth0'} - # render_dir = self.tmp_dir() - # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501 - # ns = network_state.parse_net_config_data(network_cfg, - # skip_broken=False) - # renderer = self._get_renderer() - # with self.assertRaises(ValueError): - # renderer.render_network_state(ns, target=render_dir) - # self.assertEqual([], os.listdir(render_dir)) + assert expected_content == content + + # TODO(rjschwei): re-add tests once route writing is implemented. + # See git history for removed commented tests def test_openstack_rendering_samples(self): for os_sample in OS_SAMPLES: @@ -6244,17 +2670,17 @@ def test_openstack_rendering_samples(self): "out_sysconfig_opensuse", [] ): with open(os.path.join(render_dir, fn)) as fh: - self.assertEqual(expected_content, fh.read()) + assert expected_content == fh.read() def test_network_config_v1_samples(self): ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected = """\ # Created by cloud-init automatically, do not edit. # @@ -6264,15 +2690,15 @@ def test_network_config_v1_samples(self): NETMASK=255.255.255.0 STARTMODE=auto """ - self.assertEqual(expected, found[nspath + "ifcfg-interface0"]) + assert expected == found[nspath + "ifcfg-interface0"] # The configuration has no nameserver information make sure we # do not write the resolv.conf file respath = "/etc/resolv.conf" - self.assertNotIn(respath, found.keys()) + assert respath not in found.keys() def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) # write an etc/resolv.conf and expect it to not be modified resolvconf = os.path.join(render_dir, "etc/resolv.conf") @@ -6282,157 +2708,89 @@ def test_config_with_explicit_loopback(self): renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) nspath = "/etc/sysconfig/network/" - self.assertNotIn(nspath + "ifcfg-lo", found.keys()) + assert nspath + "ifcfg-lo" not in found.keys() expected = """\ # Created by cloud-init automatically, do not edit. # BOOTPROTO=dhcp4 STARTMODE=auto """ - self.assertEqual(expected, found[nspath + "ifcfg-eth0"]) + assert expected == found[nspath + "ifcfg-eth0"] # a dhcp only config should not modify resolv.conf - self.assertEqual(resolvconf_content, found["/etc/resolv.conf"]) - - def test_bond_config(self): - expected_name = "expected_sysconfig_opensuse" - entry = NETWORK_CONFIGS["bond"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - for fname, contents in entry[expected_name].items(): - print(fname) - print(contents) - print() - print("-- expected ^ | v rendered --") - for fname, contents in found.items(): - print(fname) - print(contents) - print() - self._compare_files_to_expected(entry[expected_name], found) - self._assert_headers(found) - - def test_vlan_config(self): - entry = NETWORK_CONFIGS["vlan"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_bridge_config(self): - entry = NETWORK_CONFIGS["bridge"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_manual_config(self): - entry = NETWORK_CONFIGS["manual"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_all_config(self): - entry = NETWORK_CONFIGS["all"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - self.assertNotIn( - "WARNING: Network config: ignoring eth0.101 device-level mtu", - self.logs.getvalue(), - ) - - def test_small_config_v1(self): - entry = NETWORK_CONFIGS["small_v1"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_small_config_v1_suse(self): - entry = NETWORK_CONFIGS["small_v1_suse_dhcp6"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_small_config_v2(self): - entry = NETWORK_CONFIGS["small_v1"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_v4_and_v6_static_config(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - expected_msg = ( - "WARNING: Network config: ignoring iface0 device-level mtu:8999" - " because ipv4 subnet-level mtu:9000 provided." - ) - self.assertIn(expected_msg, self.logs.getvalue()) - - def test_dhcpv6_only_config(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_simple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS["ipv6_slaac"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_dhcpv6_stateless_config(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + assert resolvconf_content == found["/etc/resolv.conf"] - def test_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] + @pytest.mark.parametrize( + "expected_name,yaml_name", + [ + ("bond_v1", "yaml"), + pytest.param( + "bond_v2", + "yaml", + marks=pytest.mark.xfail( + reason="Bond MAC address not rendered" + ), + ), + ("vlan_v1", "yaml"), + ("vlan_v2", "yaml"), + ("bridge", "yaml_v1"), + ("bridge", "yaml_v2"), + ("manual", "yaml"), + ("small_v1", "yaml"), + ("small_suse_dhcp6", "yaml_v1"), + ("small_suse_dhcp6", "yaml_v2"), + ("small_v2", "yaml"), + ("v1_ipv4_and_ipv6_static", "yaml_v1"), + ("v2_ipv4_and_ipv6_static", "yaml_v2"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("ipv6_slaac", "yaml"), + ("dhcpv6_stateless", "yaml"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("v6_and_v4", "yaml"), + ("v1-dns", "yaml"), + ("v2-dns", "yaml"), + pytest.param( + "large_v2", + "yaml", + marks=pytest.mark.xfail( + reason="Bond and Bridge LLADDR not rendered" + ), + ), + ], + ) + def test_config( + self, + expected_name, + yaml_name, + ): + entry = NETWORK_CONFIGS[expected_name] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry[yaml_name]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) - def test_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] + def test_large_v2_config(self, caplog): + entry = NETWORK_CONFIGS["large_v1"] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry["yaml"]) ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) - - def test_render_v4_and_v6(self): - entry = NETWORK_CONFIGS["v4_and_v6"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_render_v6_and_v4(self): - entry = NETWORK_CONFIGS["v6_and_v4"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - - def test_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - # TODO: verify resolv.conf - - def test_v2_dns(self): - entry = NETWORK_CONFIGS["v2-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) + assert ( + "Network config: ignoring eth0.101 device-level mtu" + not in caplog.text + ) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestNetworkManagerRendering(CiTestCase): - with_logs = True - +class TestNetworkManagerRendering: scripts_dir = "/etc/NetworkManager/system-connections" conf_dir = "/etc/NetworkManager/conf.d" @@ -6453,6 +2811,10 @@ class TestNetworkManagerRendering(CiTestCase): ), } + @pytest.fixture(autouse=True) + def setup(self, tmpdir): + self.tmp_dir = lambda: str(tmpdir) + def _get_renderer(self): return network_manager.Renderer() @@ -6474,7 +2836,6 @@ def _render_and_read(self, network_config=None, state=None, dir=None): def _compare_files_to_expected( self, expected_scripts, expected_conf, found ): - orig_maxdiff = self.maxDiff conf_d = dict( (os.path.join(self.conf_dir, k), v) for k, v in expected_conf.items() @@ -6485,11 +2846,7 @@ def _compare_files_to_expected( ) expected_d = {**conf_d, **scripts_d} - try: - self.maxDiff = None - self.assertEqual(expected_d, found) - finally: - self.maxDiff = orig_maxdiff + assert expected_d == found @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @@ -6530,12 +2887,12 @@ def test_default_generation( uuid=8c517500-0c95-5308-9c8a-3092eebc44eb autoconnect-priority=120 type=ethernet - interface-name=eth1000 [user] org.freedesktop.NetworkManager.origin=cloud-init [ethernet] + mac-address=07:1C:C6:75:A4:BE [ipv4] method=auto @@ -6570,11 +2927,11 @@ def test_openstack_rendering_samples(self): renderer.render_network_state(ns, target=render_dir) for fn, expected_content in os_sample.get(self.expected_name, []): with open(os.path.join(render_dir, fn)) as fh: - self.assertEqual(expected_content, fh.read()) + assert expected_content == fh.read() def test_network_config_v1_samples(self): ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET) - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) @@ -6611,7 +2968,7 @@ def test_network_config_v1_samples(self): ) def test_config_with_explicit_loopback(self): - render_dir = self.tmp_path("render") + render_dir = os.path.join(self.tmp_dir(), "render") os.makedirs(render_dir) ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) renderer = self._get_renderer() @@ -6646,164 +3003,94 @@ def test_config_with_explicit_loopback(self): found, ) - def test_bond_config(self): - entry = NETWORK_CONFIGS["bond"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_vlan_config(self): - entry = NETWORK_CONFIGS["vlan"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_bridge_config(self): - entry = NETWORK_CONFIGS["bridge"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_manual_config(self): - entry = NETWORK_CONFIGS["manual"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_all_config(self): - entry = NETWORK_CONFIGS["all"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - self.assertNotIn( - "WARNING: Network config: ignoring eth0.101 device-level mtu", - self.logs.getvalue(), - ) - - def test_small_config_v1(self): - entry = NETWORK_CONFIGS["small_v1"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_small_config_v2(self): - entry = NETWORK_CONFIGS["small_v2"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found + @pytest.mark.parametrize( + "yaml_file,config", + [ + ("yaml_v1", "v1_ipv4_and_ipv6_static"), + ("yaml_v2", "v2_ipv4_and_ipv6_static"), + ], + ) + def test_ipv4_and_ipv6_static_config(self, yaml_file, config, caplog): + entry = NETWORK_CONFIGS[config] + found = self._render_and_read( + network_config=yaml.safe_load(entry[yaml_file]) ) - - def test_v4_and_v6_static_config(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected( entry[self.expected_name], self.expected_conf_d, found ) expected_msg = ( - "WARNING: Network config: ignoring iface0 device-level mtu:8999" + "Network config: ignoring iface0 device-level mtu:8999" " because ipv4 subnet-level mtu:9000 provided." ) - self.assertIn(expected_msg, self.logs.getvalue()) - - def test_dhcpv6_only_config(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_simple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS["ipv6_slaac"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_dhcpv6_stateless_config(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) + if yaml_file == "yaml_v1": + assert expected_msg in caplog.text - def test_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] + @pytest.mark.parametrize( + "expected_name,yaml_name", + [ + ("bond_v1", "yaml"), + pytest.param( + "bond_v2", + "yaml", + marks=pytest.mark.xfail( + reason="mii-monitor-interval not rendered." + ), + ), + ("vlan_v1", "yaml"), + ("vlan_v2", "yaml"), + ("bridge", "yaml_v1"), + ("bridge", "yaml_v2"), + ("manual", "yaml"), + ("small_v1", "yaml"), + ("small_v2", "yaml"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("ipv6_slaac", "yaml"), + ("dhcpv6_stateless", "yaml"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("v6_and_v4", "yaml"), + ("v1-dns", "yaml"), + ("v2-mixed-routes", "yaml"), + ("v2-dns", "yaml"), + ("v2-dns-no-if-ips", "yaml"), + ("v2-dns-no-dhcp", "yaml"), + ("v2-route-no-gateway", "yaml"), + pytest.param( + "large_v2", + "yaml", + marks=pytest.mark.xfail( + reason=( + "Bridge MAC and bond miimon not rendered. " + "Bond DNS not rendered. " + "DNS not rendered when DHCP is enabled." + ), + ), + ), + ], + ) + def test_config(self, expected_name, yaml_name): + entry = NETWORK_CONFIGS[expected_name] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry[yaml_name]) ) self._compare_files_to_expected( entry[self.expected_name], self.expected_conf_d, found ) - def test_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] + def test_large_v1_config(self, caplog): + entry = NETWORK_CONFIGS["large_v1"] found = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry["yaml"]) ) self._compare_files_to_expected( entry[self.expected_name], self.expected_conf_d, found ) - - def test_render_v4_and_v6(self): - entry = NETWORK_CONFIGS["v4_and_v6"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_render_v6_and_v4(self): - entry = NETWORK_CONFIGS["v6_and_v4"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_mixed_routes(self): - entry = NETWORK_CONFIGS["v2-mixed-routes"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_dns(self): - entry = NETWORK_CONFIGS["v2-dns"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_dns_no_ips(self): - entry = NETWORK_CONFIGS["v2-dns-no-if-ips"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_dns_no_dhcp(self): - entry = NETWORK_CONFIGS["v2-dns-no-dhcp"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found - ) - - def test_v2_route_no_gateway(self): - entry = NETWORK_CONFIGS["v2-route-no-gateway"] - found = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self._compare_files_to_expected( - entry[self.expected_name], self.expected_conf_d, found + assert ( + "Network config: ignoring eth0.101 device-level mtu" + not in caplog.text ) @@ -6811,7 +3098,11 @@ def test_v2_route_no_gateway(self): "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestEniNetRendering(CiTestCase): +class TestEniNetRendering: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @@ -6841,7 +3132,7 @@ def test_default_generation( ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + assert os.path.exists(os.path.join(render_dir, "interfaces")) with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() @@ -6855,7 +3146,7 @@ def test_default_generation( # control-alias eth1000 iface eth1000 inet6 dhcp """ - self.assertEqual(expected.lstrip(), contents.lstrip()) + assert expected.lstrip() == contents.lstrip() def test_config_with_explicit_loopback(self): tmp_dir = self.tmp_dir() @@ -6869,9 +3160,7 @@ def test_config_with_explicit_loopback(self): auto eth0 iface eth0 inet dhcp """ - self.assertEqual( - expected, dir2dict(tmp_dir)["/etc/network/interfaces"] - ) + assert expected == dir2dict(tmp_dir)["/etc/network/interfaces"] def test_v2_route_metric_to_eni(self): """Network v2 route-metric overrides are preserved in eni output""" @@ -6896,9 +3185,9 @@ def test_v2_route_metric_to_eni(self): v2_input = {"version": 2, "ethernets": {"eth0": dhcp_cfg}} ns = network_state.parse_net_config_data(v2_input) renderer.render_network_state(ns, target=tmp_dir) - self.assertEqual( - expected_tmpl.format(suffix=suffix), - dir2dict(tmp_dir)["/etc/network/interfaces"], + assert ( + expected_tmpl.format(suffix=suffix) + == dir2dict(tmp_dir)["/etc/network/interfaces"] ) @@ -6915,7 +3204,7 @@ class TestNetplanNetRendering: dhcp4: true dhcp6: true match: - name: eth1000 + macaddress: 07-1c-c6-75-a4-be set-name: eth1000 version: 2 """, @@ -7264,7 +3553,7 @@ def test_render( if network_cfg is None: network_cfg = net.generate_fallback_config() else: - network_cfg = yaml.load(network_cfg) + network_cfg = yaml.safe_load(network_cfg) assert isinstance(network_cfg, dict) ns = network_state.parse_net_config_data( @@ -7285,11 +3574,11 @@ def test_render( contents = fh.read() print(contents) - assert yaml.load(expected) == yaml.load(contents) + assert yaml.safe_load(expected) == yaml.safe_load(contents) assert 1, mock_clean_default.call_count -class TestNetplanCleanDefault(CiTestCase): +class TestNetplanCleanDefault: snapd_known_path = "etc/netplan/00-snapd-config.yaml" snapd_known_content = textwrap.dedent( """\ @@ -7314,6 +3603,10 @@ class TestNetplanCleanDefault(CiTestCase): "run/systemd/generator/netplan.stamp": "stamp", } + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def test_clean_known_config_cleaned(self): content = { self.snapd_known_path: self.snapd_known_content, @@ -7323,7 +3616,7 @@ def test_clean_known_config_cleaned(self): files = sorted(populate_dir(tmpd, content)) netplan._clean_default(target=tmpd) found = [t for t in files if os.path.exists(t)] - self.assertEqual([], found) + assert [] == found def test_clean_unknown_config_not_cleaned(self): content = { @@ -7335,7 +3628,7 @@ def test_clean_unknown_config_not_cleaned(self): files = sorted(populate_dir(tmpd, content)) netplan._clean_default(target=tmpd) found = [t for t in files if os.path.exists(t)] - self.assertEqual(files, found) + assert files == found def test_clean_known_config_cleans_only_expected(self): astamp = "run/systemd/generator/another.stamp" @@ -7354,10 +3647,10 @@ def test_clean_known_config_cleans_only_expected(self): netplan._clean_default(target=tmpd) found = [t for t in files if os.path.exists(t)] expected = [subp.target_path(tmpd, f) for f in (astamp, anet, ayaml)] - self.assertEqual(sorted(expected), found) + assert sorted(expected) == found -class TestNetplanPostcommands(CiTestCase): +class TestNetplanPostcommands: mycfg = { "config": [ { @@ -7370,6 +3663,10 @@ class TestNetplanPostcommands(CiTestCase): "version": 1, } + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + @mock.patch.object(netplan.Renderer, "_netplan_generate") @mock.patch.object(netplan.Renderer, "_net_setup_link") @mock.patch("cloudinit.subp.subp") @@ -7389,13 +3686,16 @@ def test_netplan_render_calls_postcmds( mock_subp.side_effect = iter([subp.ProcessExecutionError]) renderer.render_network_state(ns, target=render_dir) - mock_netplan_generate.assert_called_with(run=True, same_content=False) + mock_netplan_generate.assert_called_with(run=True, config_changed=True) mock_net_setup_link.assert_called_with(run=True) + @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.SeLinuxGuard") @mock.patch.object(netplan, "get_devicelist") @mock.patch("cloudinit.subp.subp") - def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): + def test_netplan_postcmds( + self, mock_subp, mock_devlist, mock_sel, m_get_cmdline + ): mock_sel.__enter__ = mock.Mock(return_value=False) mock_sel.__exit__ = mock.Mock() mock_devlist.side_effect = [["lo"]] @@ -7411,13 +3711,11 @@ def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): ) mock_subp.side_effect = iter( [ - subp.ProcessExecutionError, ("", ""), ("", ""), ] ) expected = [ - mock.call(["netplan", "info"], capture=True), mock.call(["netplan", "generate"], capture=True), mock.call( [ @@ -7434,7 +3732,7 @@ def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): mock_subp.assert_has_calls(expected) -class TestEniNetworkStateToEni(CiTestCase): +class TestEniNetworkStateToEni: mycfg = { "config": [ { @@ -7453,8 +3751,8 @@ def test_no_header(self): network_state=network_state.parse_net_config_data(self.mycfg), render_hwaddress=True, ) - self.assertIn(self.my_mac, rendered) - self.assertIn("hwaddress", rendered) + assert self.my_mac in rendered + assert "hwaddress" in rendered def test_with_header(self): header = "# hello world\n" @@ -7463,21 +3761,19 @@ def test_with_header(self): header=header, render_hwaddress=True, ) - self.assertIn(header, rendered) - self.assertIn(self.my_mac, rendered) + assert header in rendered + assert self.my_mac in rendered def test_no_hwaddress(self): rendered = eni.network_state_to_eni( network_state=network_state.parse_net_config_data(self.mycfg), render_hwaddress=False, ) - self.assertNotIn(self.my_mac, rendered) - self.assertNotIn("hwaddress", rendered) - + assert self.my_mac not in rendered + assert "hwaddress" not in rendered -class TestCmdlineConfigParsing(CiTestCase): - with_logs = True +class TestCmdlineConfigParsing: simple_cfg = { "config": [ { @@ -7491,15 +3787,19 @@ class TestCmdlineConfigParsing(CiTestCase): def test_cmdline_convert_dhcp(self): found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1) - self.assertEqual(found, ("eth0", DHCP_EXPECTED_1)) + assert found == ("eth0", DHCP_EXPECTED_1) def test_cmdline_convert_dhcp6(self): found = cmdline._klibc_to_config_entry(DHCP6_CONTENT_1) - self.assertEqual(found, ("eno1", DHCP6_EXPECTED_1)) + assert found == ("eno1", DHCP6_EXPECTED_1) def test_cmdline_convert_static(self): - found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1) - self.assertEqual(found, ("eth1", STATIC_EXPECTED_1)) + found1 = cmdline._klibc_to_config_entry(STATIC_CONTENT_1) + assert found1 == ("eth1", STATIC_EXPECTED_1) + found2 = cmdline._klibc_to_config_entry(STATIC_CONTENT_2) + assert found2 == ("eth1", STATIC_EXPECTED_1) + found3 = cmdline._klibc_to_config_entry(STATIC_CONTENT_3) + assert found3 == ("eth1", STATIC_EXPECTED_1) def test_config_from_cmdline_net_cfg(self): files = [] @@ -7526,46 +3826,50 @@ def test_config_from_cmdline_net_cfg(self): found = cmdline.config_from_klibc_net_cfg( files=files, mac_addrs=macs ) - self.assertEqual(found, expected) + assert found == expected def test_cmdline_with_b64(self): data = base64.b64encode(json.dumps(self.simple_cfg).encode()) encoded_text = data.decode() raw_cmdline = "ro network-config=" + encoded_text + " root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertEqual(found, self.simple_cfg) + assert found == self.simple_cfg def test_cmdline_with_net_config_disabled(self): raw_cmdline = "ro network-config=disabled root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertEqual(found, {"config": "disabled"}) + assert found == {"config": "disabled"} - def test_cmdline_with_net_config_unencoded_logs_error(self): + def test_cmdline_with_net_config_unencoded_logs_error(self, caplog): """network-config cannot be unencoded besides 'disabled'.""" raw_cmdline = "ro network-config={config:disabled} root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertIsNone(found) + assert found is None expected_log = ( - "ERROR: Expected base64 encoded kernel commandline parameter" + "Expected base64 encoded kernel command line parameter" " network-config. Ignoring network-config={config:disabled}." ) - self.assertIn(expected_log, self.logs.getvalue()) + assert expected_log in caplog.text def test_cmdline_with_b64_gz(self): data = _gzip_data(json.dumps(self.simple_cfg).encode()) encoded_text = base64.b64encode(data).decode() raw_cmdline = "ro network-config=" + encoded_text + " root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertEqual(found, self.simple_cfg) + assert found == self.simple_cfg -class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase): +class TestCmdlineKlibcNetworkConfigSource: macs = { "eth0": "14:02:ec:42:48:00", "eno1": "14:02:ec:42:48:01", } - def test_without_ip(self): + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + + def test_without_ip(self, fake_filesystem): content = { "/run/net-eth0.conf": DHCP_CONTENT_1, cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n", @@ -7573,70 +3877,61 @@ def test_without_ip(self): exp1 = copy.deepcopy(DHCP_EXPECTED_1) exp1["mac_address"] = self.macs["eth0"] - root = self.tmp_dir() - populate_dir(root, content) - self.reRoot(root) + populate_dir(fake_filesystem, content) src = cmdline.KlibcNetworkConfigSource( _cmdline="foo root=/root/bar", _mac_addrs=self.macs, ) - self.assertTrue(src.is_applicable()) + assert src.is_applicable() found = src.render_config() - self.assertEqual(found["version"], 1) - self.assertEqual(found["config"], [exp1]) + assert found["version"] == 1 + assert found["config"] == [exp1] - def test_with_ip(self): + def test_with_ip(self, fake_filesystem): content = {"/run/net-eth0.conf": DHCP_CONTENT_1} exp1 = copy.deepcopy(DHCP_EXPECTED_1) exp1["mac_address"] = self.macs["eth0"] - root = self.tmp_dir() - populate_dir(root, content) - self.reRoot(root) + populate_dir(fake_filesystem, content) src = cmdline.KlibcNetworkConfigSource( _cmdline="foo ip=dhcp", _mac_addrs=self.macs, ) - self.assertTrue(src.is_applicable()) + assert src.is_applicable() found = src.render_config() - self.assertEqual(found["version"], 1) - self.assertEqual(found["config"], [exp1]) + assert found["version"] == 1 + assert found["config"] == [exp1] - def test_with_ip6(self): + def test_with_ip6(self, fake_filesystem): content = {"/run/net6-eno1.conf": DHCP6_CONTENT_1} - root = self.tmp_dir() - populate_dir(root, content) - self.reRoot(root) + populate_dir(fake_filesystem, content) src = cmdline.KlibcNetworkConfigSource( _cmdline="foo ip6=dhcp root=/dev/sda", _mac_addrs=self.macs, ) - self.assertTrue(src.is_applicable()) + assert src.is_applicable() found = src.render_config() - self.assertEqual( - found, - { - "version": 1, - "config": [ - { - "type": "physical", - "name": "eno1", - "mac_address": self.macs["eno1"], - "subnets": [ - { - "dns_nameservers": ["2001:67c:1562:8010::2:1"], - "control": "manual", - "type": "dhcp6", - "netmask": "64", - } - ], - } - ], - }, - ) + assert found == { + "version": 1, + "config": [ + { + "type": "physical", + "name": "eno1", + "mac_address": self.macs["eno1"], + "subnets": [ + { + "dns_nameservers": ["2001:67c:1562:8010::2:1"], + "control": "manual", + "type": "dhcp6", + "netmask": "64", + } + ], + } + ], + } def test_with_no_ip_or_ip6(self): # if there is no ip= or ip6= on cmdline, return value should be None @@ -7647,7 +3942,7 @@ def test_with_no_ip_or_ip6(self): _cmdline="foo root=/dev/sda", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_with_faux_ip(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7657,7 +3952,7 @@ def test_with_faux_ip(self): _cmdline="foo iscsi_target_ip=root=/dev/sda", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_empty_cmdline(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7667,7 +3962,7 @@ def test_empty_cmdline(self): _cmdline="", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_whitespace_cmdline(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7677,7 +3972,7 @@ def test_whitespace_cmdline(self): _cmdline=" ", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_cmdline_no_lhand(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7687,7 +3982,7 @@ def test_cmdline_no_lhand(self): _cmdline="=wut", _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() def test_cmdline_embedded_ip(self): content = {"net6-eno1.conf": DHCP6_CONTENT_1} @@ -7697,9 +3992,9 @@ def test_cmdline_embedded_ip(self): _cmdline='opt="some things and ip=foo"', _mac_addrs=self.macs, ) - self.assertFalse(src.is_applicable()) + assert not src.is_applicable() - def test_with_both_ip_ip6(self): + def test_with_both_ip_ip6(self, fake_filesystem): content = { "/run/net-eth0.conf": DHCP_CONTENT_1, "/run/net6-eth0.conf": DHCP6_CONTENT_1.replace("eno1", "eth0"), @@ -7716,22 +4011,20 @@ def test_with_both_ip_ip6(self): ) expected = [eth0] - root = self.tmp_dir() - populate_dir(root, content) - self.reRoot(root) + populate_dir(fake_filesystem, content) src = cmdline.KlibcNetworkConfigSource( _cmdline="foo ip=dhcp ip6=dhcp", _mac_addrs=self.macs, ) - self.assertTrue(src.is_applicable()) + assert src.is_applicable() found = src.render_config() - self.assertEqual(found["version"], 1) - self.assertEqual(found["config"], expected) + assert found["version"] == 1 + assert found["config"] == expected -class TestReadInitramfsConfig(CiTestCase): +class TestReadInitramfsConfig: def _config_source_cls_mock(self, is_applicable, render_config=None): return lambda: mock.Mock( is_applicable=lambda: is_applicable, @@ -7740,7 +4033,7 @@ def _config_source_cls_mock(self, is_applicable, render_config=None): def test_no_sources(self): with mock.patch("cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", []): - self.assertIsNone(cmdline.read_initramfs_config()) + assert cmdline.read_initramfs_config() is None def test_no_applicable_sources(self): sources = [ @@ -7751,7 +4044,7 @@ def test_no_applicable_sources(self): with mock.patch( "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources ): - self.assertIsNone(cmdline.read_initramfs_config()) + assert cmdline.read_initramfs_config() is None def test_one_applicable_source(self): expected_config = object() @@ -7764,7 +4057,7 @@ def test_one_applicable_source(self): with mock.patch( "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources ): - self.assertEqual(expected_config, cmdline.read_initramfs_config()) + assert expected_config == cmdline.read_initramfs_config() def test_one_applicable_source_after_inapplicable_sources(self): expected_config = object() @@ -7779,7 +4072,7 @@ def test_one_applicable_source_after_inapplicable_sources(self): with mock.patch( "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources ): - self.assertEqual(expected_config, cmdline.read_initramfs_config()) + assert expected_config == cmdline.read_initramfs_config() def test_first_applicable_source_is_used(self): first_config, second_config = object(), object() @@ -7796,10 +4089,10 @@ def test_first_applicable_source_is_used(self): with mock.patch( "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources ): - self.assertEqual(first_config, cmdline.read_initramfs_config()) + assert first_config == cmdline.read_initramfs_config() -class TestNetplanRoundTrip(CiTestCase): +class TestNetplanRoundTrip: NETPLAN_INFO_OUT = textwrap.dedent( """ netplan.io: @@ -7810,10 +4103,13 @@ class TestNetplanRoundTrip(CiTestCase): """ ) - def setUp(self): - super(TestNetplanRoundTrip, self).setUp() - self.add_patch("cloudinit.net.netplan.subp.subp", "m_subp") - self.m_subp.return_value = (self.NETPLAN_INFO_OUT, "") + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory, mocker): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + mocker.patch( + "cloudinit.net.netplan.subp.subp", + return_value=(self.NETPLAN_INFO_OUT, ""), + ) def _render_and_read( self, network_config=None, state=None, netplan_path=None, target=None @@ -7821,169 +4117,58 @@ def _render_and_read( if target is None: target = self.tmp_dir() - if network_config: - ns = network_state.parse_net_config_data(network_config) - elif state: - ns = state - else: - raise ValueError("Expected data or state, got neither") - - if netplan_path is None: - netplan_path = "etc/netplan/50-cloud-init.yaml" - - renderer = netplan.Renderer(config={"netplan_path": netplan_path}) - - renderer.render_network_state(ns, target=target) - return dir2dict(target) - - def testsimple_render_bond_netplan(self): - entry = NETWORK_CONFIGS["bond"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - print(entry["expected_netplan"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_bond_v2_input_netplan(self): - entry = NETWORK_CONFIGS["bond"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml-v2"]) - ) - print(entry["expected_netplan-v2"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan-v2"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_small_netplan(self): - entry = NETWORK_CONFIGS["small_v1"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_v4_and_v6(self): - entry = NETWORK_CONFIGS["v4_and_v6"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_v4_and_v6_static(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_dhcpv6_only(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_dhcpv6_accept_ra(self): - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_dhcpv6_reject_ra(self): - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS["ipv6_slaac"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) + if network_config: + ns = network_state.parse_net_config_data(network_config) + elif state: + ns = state + else: + raise ValueError("Expected data or state, got neither") - def testsimple_render_dhcpv6_stateless(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) + if netplan_path is None: + netplan_path = "etc/netplan/50-cloud-init.yaml" - def testsimple_render_dhcpv6_stateful(self): - entry = NETWORK_CONFIGS["dhcpv6_stateful"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) + renderer = netplan.Renderer(config={"netplan_path": netplan_path}) - def testsimple_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) + renderer.render_network_state(ns, target=target) + return dir2dict(target) - def testsimple_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] + @pytest.mark.parametrize( + "expected_name,yaml_version", + [ + ("bond_v1", "yaml"), + ("bond_v2", "yaml"), + ("small_v1", "yaml"), + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("v1_ipv4_and_ipv6_static", "yaml_v1"), + ("v2_ipv4_and_ipv6_static", "yaml_v2"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("dhcpv6_accept_ra", "yaml_v1"), + ("dhcpv6_reject_ra", "yaml_v1"), + ("ipv6_slaac", "yaml"), + ("dhcpv6_stateless", "yaml"), + ("dhcpv6_stateful", "yaml"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + ("large_v1", "yaml"), + ("manual", "yaml"), + pytest.param( + "v1-dns", + "yaml", + marks=pytest.mark.xfail( + reason="netplan should render interface-level nameservers" + ), + ), + ], + ) + def test_config(self, expected_name, yaml_version): + entry = NETWORK_CONFIGS[expected_name] files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry[yaml_version]) ) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_all(self): - entry = NETWORK_CONFIGS["all"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - print(entry["expected_netplan"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - def testsimple_render_manual(self): - entry = NETWORK_CONFIGS["manual"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), - ) - - @pytest.mark.xfail( - reason="netplan should render interface-level nameservers" - ) - def testsimple_render_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + assert yaml.safe_load(entry["expected_netplan"]) == yaml.safe_load( + files["/etc/netplan/50-cloud-init.yaml"] ) def test_render_output_has_yaml_no_aliases(self): @@ -7991,7 +4176,7 @@ def test_render_output_has_yaml_no_aliases(self): "yaml": V1_NAMESERVER_ALIAS, "expected_netplan": NETPLAN_NO_ALIAS, } - network_config = yaml.load(entry["yaml"]) + network_config = yaml.safe_load(entry["yaml"]) ns = network_state.parse_net_config_data(network_config) files = self._render_and_read(state=ns) # check for alias @@ -7999,8 +4184,8 @@ def test_render_output_has_yaml_no_aliases(self): # test load the yaml to ensure we don't render something not loadable # this allows single aliases, but not duplicate ones - parsed = yaml.load(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertNotEqual(None, parsed) + parsed = yaml.safe_load(files["/etc/netplan/50-cloud-init.yaml"]) + assert parsed is not None # now look for any alias, avoid rendering them entirely # generate the first anchor string using the template @@ -8011,12 +4196,9 @@ def test_render_output_has_yaml_no_aliases(self): msg = "Error at: %s\nContent:\n%s" % (found_alias, content) raise ValueError("Found yaml alias in rendered netplan: " + msg) - print(entry["expected_netplan"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + assert ( + entry["expected_netplan"].splitlines() + == files["/etc/netplan/50-cloud-init.yaml"].splitlines() ) def test_render_output_supports_both_grat_arp_spelling(self): @@ -8026,18 +4208,19 @@ def test_render_output_supports_both_grat_arp_spelling(self): "gratuitious", "gratuitous" ), } - network_config = yaml.load(entry["yaml"]).get("network") + network_config = yaml.safe_load(entry["yaml"]).get("network") files = self._render_and_read(network_config=network_config) - print(entry["expected_netplan"]) - print("-- expected ^ | v rendered --") - print(files["/etc/netplan/50-cloud-init.yaml"]) - self.assertEqual( - entry["expected_netplan"].splitlines(), - files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + assert ( + entry["expected_netplan"].splitlines() + == files["/etc/netplan/50-cloud-init.yaml"].splitlines() ) -class TestEniRoundTrip(CiTestCase): +class TestEniRoundTrip: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def _render_and_read( self, network_config=None, @@ -8069,137 +4252,84 @@ def _render_and_read( def testsimple_convert_and_render(self): network_config = eni.convert_eni_data(EXAMPLE_ENI) files = self._render_and_read(network_config=network_config) - self.assertEqual( - RENDERED_ENI.splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_all(self): - entry = NETWORK_CONFIGS["all"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_small_v1(self): - entry = NETWORK_CONFIGS["small_v1"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - @pytest.mark.xfail(reason="GH-4219") - def testsimple_render_small_v2(self): - entry = NETWORK_CONFIGS["small_v2"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_v4_and_v6(self): - entry = NETWORK_CONFIGS["v4_and_v6"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_only(self): - entry = NETWORK_CONFIGS["dhcpv6_only"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_v4_and_v6_static(self): - entry = NETWORK_CONFIGS["v4_and_v6_static"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_stateless(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS["ipv6_slaac"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_stateful(self): - entry = NETWORK_CONFIGS["dhcpv6_stateless"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_accept_ra(self): - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_dhcpv6_reject_ra(self): - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_disabled"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), + assert ( + RENDERED_ENI.splitlines() + == files["/etc/network/interfaces"].splitlines() ) - def testsimple_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS["wakeonlan_enabled"] + @pytest.mark.parametrize( + "expected_name,yaml_version", + [ + ("large_v1", "yaml"), + pytest.param( + "large_v2", + "yaml", + marks=pytest.mark.xfail( + reason=( + "MAC for bond and bridge not being rendered. " + "bond-miimon is used rather than bond_miimon. " + "No rendering of bridge_gcint. " + "No rendering of bridge_waitport. " + "IPv6 routes added to IPv4 section. " + "DNS rendering inconsistencies." + ) + ), + ), + ("small_v1", "yaml"), + pytest.param( + "small_v2", "yaml", marks=pytest.mark.xfail(reason="GH-4219") + ), + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("v1_ipv4_and_ipv6_static", "yaml_v1"), + ("v2_ipv4_and_ipv6_static", "yaml_v2"), + ("dhcpv6_stateless", "yaml"), + ("ipv6_slaac", "yaml"), + pytest.param( + "dhcpv6_stateful", + "yaml", + marks=pytest.mark.xfail( + reason="Test never passed due to typo in name" + ), + ), + ("dhcpv6_accept_ra", "yaml_v1"), + ("dhcpv6_accept_ra", "yaml_v2"), + ("dhcpv6_reject_ra", "yaml_v1"), + ("dhcpv6_reject_ra", "yaml_v2"), + ("wakeonlan_disabled", "yaml_v2"), + ("wakeonlan_enabled", "yaml_v2"), + ("manual", "yaml"), + ("bond_v1", "yaml"), + pytest.param( + "bond_v2", + "yaml", + marks=pytest.mark.xfail( + reason=( + "Rendering bond_miimon rather than bond-miimon. " + "Using pre-down/post-up routes for gateway rather " + "gateway. " + "Adding ipv6 routes to ipv4 section" + ) + ), + ), + pytest.param( + "v1-dns", "yaml", marks=pytest.mark.xfail(reason="GH-4219") + ), + pytest.param( + "v2-dns", "yaml", marks=pytest.mark.xfail(reason="GH-4219") + ), + ], + ) + def test_config(self, expected_name, yaml_version): + entry = NETWORK_CONFIGS[expected_name] files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) + network_config=yaml.safe_load(entry[yaml_version]) ) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - def testsimple_render_manual(self): - """Test rendering of 'manual' for 'type' and 'control'. - - 'type: manual' in a subnet is odd, but it is the way that was used - to declare that a network device should get a mtu set on it even - if there were no addresses to configure. Also strange is the fact - that in order to apply that MTU the ifupdown device must be set - to 'auto', or the MTU would not be set.""" - entry = NETWORK_CONFIGS["manual"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), + assert ( + entry["expected_eni"].splitlines() + == files["/etc/network/interfaces"].splitlines() ) def test_routes_rendered(self): @@ -8276,7 +4406,7 @@ def test_routes_rendered(self): ] found = files["/etc/network/interfaces"].splitlines() - self.assertEqual(expected, [line for line in found if line]) + assert expected == [line for line in found if line] def test_ipv6_static_routes(self): # as reported in bug 1818669 @@ -8350,36 +4480,14 @@ def test_ipv6_static_routes(self): ] found = files["/etc/network/interfaces"].splitlines() - self.assertEqual(expected, [line for line in found if line]) - - def testsimple_render_bond(self): - entry = NETWORK_CONFIGS["bond"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) - - @pytest.mark.xfail(reason="GH-4219") - def test_v1_dns(self): - entry = NETWORK_CONFIGS["v1-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) + assert expected == [line for line in found if line] - @pytest.mark.xfail(reason="GH-4219") - def test_v2_dns(self): - entry = NETWORK_CONFIGS["v2-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - self.assertEqual( - entry["expected_eni"].splitlines(), - files["/etc/network/interfaces"].splitlines(), - ) +class TestNetworkdNetRendering: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) -class TestNetworkdNetRendering(CiTestCase): def create_conf_dict(self, contents): content_dict = {} for line in contents: @@ -8395,7 +4503,7 @@ def create_conf_dict(self, contents): def compare_dicts(self, actual, expected): for k, v in actual.items(): - self.assertEqual(sorted(expected[k]), sorted(v)) + assert sorted(expected[k]) == sorted(v) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @@ -8427,9 +4535,7 @@ def test_networkd_default_generation( renderer = networkd.Renderer({}) renderer.render_network_state(ns, target=render_dir) - self.assertTrue( - os.path.exists(os.path.join(render_dir, render_target)) - ) + assert os.path.exists(os.path.join(render_dir, render_target)) with open(os.path.join(render_dir, render_target)) as fh: contents = fh.readlines() @@ -8440,6 +4546,7 @@ def test_networkd_default_generation( """\ [Match] Name=eth1000 + MACAddress=07-1c-c6-75-a4-be [Network] DHCP=yes""" ).rstrip(" ") @@ -8449,7 +4556,11 @@ def test_networkd_default_generation( self.compare_dicts(actual, expected) -class TestNetworkdRoundTrip(CiTestCase): +class TestNetworkdRoundTrip: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + self.tmp_dir = lambda: str(tmpdir_factory.mktemp("a", numbered=True)) + def create_conf_dict(self, contents): content_dict = {} for line in contents: @@ -8465,7 +4576,7 @@ def create_conf_dict(self, contents): def compare_dicts(self, actual, expected): for k, v in actual.items(): - self.assertEqual(sorted(expected[k]), sorted(v)) + assert sorted(expected[k]) == sorted(v) def _render_and_read( self, network_config=None, state=None, nwkd_path=None, dir=None @@ -8488,35 +4599,45 @@ def _render_and_read( renderer.render_network_state(ns, target=dir) return dir2dict(dir) + @pytest.mark.parametrize( + "expected_name,yaml_version", + [ + ("v4_and_v6", "yaml_v1"), + ("v4_and_v6", "yaml_v2"), + ("v1_ipv4_and_ipv6_static", "yaml_v1"), + ("v2_ipv4_and_ipv6_static", "yaml_v2"), + ("dhcpv6_only", "yaml_v1"), + ("dhcpv6_only", "yaml_v2"), + ("dhcpv6_accept_ra", "yaml_v1"), + ("dhcpv6_accept_ra", "yaml_v2"), + ("dhcpv6_reject_ra", "yaml_v1"), + ("dhcpv6_reject_ra", "yaml_v2"), + ], + ) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_small_networkd_v1(self, m_chown): - nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network" - nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network" - entry = NETWORK_CONFIGS["small_v1"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - - actual = files[nwk_fn1].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd_eth99"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) + def test_config(self, _m_chown, expected_name, yaml_version): + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS[expected_name] + files = self._render_and_read( + network_config=yaml.safe_load(entry[yaml_version]) + ) - actual = files[nwk_fn2].splitlines() + actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry["expected_networkd_eth1"].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_small_networkd_v2(self, m_chown): + def testsimple_render_small_networkd_v1(self, m_chown): nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network" nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network" - entry = NETWORK_CONFIGS["small_v2"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + entry = NETWORK_CONFIGS["small_v1"] + files = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) actual = files[nwk_fn1].splitlines() actual = self.create_conf_dict(actual) @@ -8535,107 +4656,26 @@ def testsimple_render_small_networkd_v2(self, m_chown): self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_v4_and_v6(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["v4_and_v6"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_v4_and_v6_static(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["v4_and_v6_static"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def testsimple_render_dhcpv6_only(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_only"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def test_dhcpv6_accept_ra_config_v1(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) - ) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def test_dhcpv6_accept_ra_config_v2(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - - actual = files[nwk_fn].splitlines() - actual = self.create_conf_dict(actual) - - expected = entry["expected_networkd"].splitlines() - expected = self.create_conf_dict(expected) - - self.compare_dicts(actual, expected) - - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def test_dhcpv6_reject_ra_config_v1(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + def testsimple_render_small_networkd_v2(self, m_chown): + nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network" + nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network" + entry = NETWORK_CONFIGS["small_v2"] files = self._render_and_read( - network_config=yaml.load(entry["yaml_v1"]) + network_config=yaml.safe_load(entry["yaml"]) ) - actual = files[nwk_fn].splitlines() + actual = files[nwk_fn1].splitlines() actual = self.create_conf_dict(actual) - expected = entry["expected_networkd"].splitlines() + expected = entry["expected_networkd_eth99"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) - @mock.patch("cloudinit.net.util.chownbyname", return_value=True) - def test_dhcpv6_reject_ra_config_v2(self, m_chown): - nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" - entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] - files = self._render_and_read( - network_config=yaml.load(entry["yaml_v2"]) - ) - - actual = files[nwk_fn].splitlines() + actual = files[nwk_fn2].splitlines() actual = self.create_conf_dict(actual) - expected = entry["expected_networkd"].splitlines() + expected = entry["expected_networkd_eth1"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @@ -8647,7 +4687,9 @@ def test_dhcpv6_reject_ra_config_v2(self, m_chown): def test_v1_dns(self, m_chown): nwk_fn = "/etc/systemd/network/10-cloud-init-eth0.network" entry = NETWORK_CONFIGS["v1-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + files = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) actual = self.create_conf_dict(files[nwk_fn].splitlines()) expected = self.create_conf_dict( @@ -8660,7 +4702,9 @@ def test_v1_dns(self, m_chown): def test_v2_dns(self, m_chown): nwk_fn = "/etc/systemd/network/10-cloud-init-eth0.network" entry = NETWORK_CONFIGS["v2-dns"] - files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + files = self._render_and_read( + network_config=yaml.safe_load(entry["yaml"]) + ) actual = self.create_conf_dict(files[nwk_fn].splitlines()) expected = self.create_conf_dict( @@ -8738,7 +4782,7 @@ def test_valid_renderer_from_defaults_depending_on_availability( renderers.select(priority=renderers.DEFAULT_PRIORITY) -class TestNetRenderers(CiTestCase): +class TestNetRenderers: @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail): @@ -8746,13 +4790,13 @@ def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail): m_sysc_avail.return_value = True found = renderers.search(priority=["sysconfig", "eni"], first=False) names = [f[0] for f in found] - self.assertEqual(["sysconfig", "eni"], names) + assert ["sysconfig", "eni"] == names @mock.patch("cloudinit.net.renderers.eni.available") def test_search_returns_empty_on_none(self, m_eni_avail): m_eni_avail.return_value = False found = renderers.search(priority=["eni"], first=False) - self.assertEqual([], found) + assert [] == found @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") @@ -8761,7 +4805,7 @@ def test_first_in_priority(self, m_eni_avail, m_sysc_avail): m_eni_avail.return_value = True m_sysc_avail.side_effect = Exception("Should not call me") found = renderers.search(priority=["eni", "sysconfig"], first=True)[0] - self.assertEqual(["eni"], [found[0]]) + assert ["eni"] == [found[0]] @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") @@ -8769,7 +4813,7 @@ def test_select_positive(self, m_eni_avail, m_sysc_avail): m_eni_avail.return_value = True m_sysc_avail.return_value = False found = renderers.select(priority=["sysconfig", "eni"]) - self.assertEqual("eni", found[0]) + assert "eni" == found[0] @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") @@ -8778,7 +4822,7 @@ def test_select_none_found_raises(self, m_eni_avail, m_sysc_avail): m_eni_avail.return_value = False m_sysc_avail.return_value = False - self.assertRaises( + pytest.raises( net.RendererNotFoundError, renderers.select, priority=["sysconfig", "eni"], @@ -8800,20 +4844,20 @@ def test_sysconfig_available_uses_variant_mapping(self, m_info, m_avail): if hasattr(util.system_info, "cache_clear"): util.system_info.cache_clear() result = sysconfig.available() - self.assertTrue(result) + assert result @mock.patch("cloudinit.net.renderers.networkd.available") def test_networkd_available(self, m_nwkd_avail): m_nwkd_avail.return_value = True found = renderers.search(priority=["networkd"], first=False) - self.assertEqual("networkd", found[0][0]) + assert "networkd" == found[0][0] @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestGetInterfaces(CiTestCase): +class TestGetInterfaces: _data = { "bonds": ["bond1"], "bridges": ["bridge1"], @@ -8885,10 +4929,11 @@ def _se_is_bond(self, name): def _se_is_netfailover(self, name): return False - def _mock_setup(self): + @pytest.fixture + def mocks(self, mocker): self.data = copy.deepcopy(self._data) self.data["devices"] = set(list(self.data["macs"].keys())) - mocks = ( + mock_list = ( "get_devicelist", "get_interface_mac", "get_master", @@ -8900,35 +4945,32 @@ def _mock_setup(self): "is_bond", "is_netfailover", ) - self.mocks = {} - for n in mocks: - m = mock.patch( - "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) + all_mocks = {} + for name in mock_list: + all_mocks[name] = mocker.patch( + "cloudinit.net." + name, + side_effect=getattr(self, "_se_" + name), ) - self.addCleanup(m.stop) - self.mocks[n] = m.start() + yield all_mocks - def test_gi_includes_duplicate_macs(self): - self._mock_setup() + def test_gi_includes_duplicate_macs(self, mocks): ret = net.get_interfaces() - self.assertIn("enp0s1", self._se_get_devicelist()) - self.assertIn("eth1", self._se_get_devicelist()) + assert "enp0s1" in self._se_get_devicelist() + assert "eth1" in self._se_get_devicelist() found = [ent for ent in ret if "aa:aa:aa:aa:aa:01" in ent] - self.assertEqual(len(found), 2) + assert len(found) == 2 - def test_gi_excludes_any_without_mac_address(self): - self._mock_setup() + def test_gi_excludes_any_without_mac_address(self, mocks): ret = net.get_interfaces() - self.assertIn("tun0", self._se_get_devicelist()) + assert "tun0" in self._se_get_devicelist() found = [ent for ent in ret if "tun0" in ent] - self.assertEqual(len(found), 0) + assert len(found) == 0 - def test_gi_excludes_stolen_macs(self): - self._mock_setup() + def test_gi_excludes_stolen_macs(self, mocks): ret = net.get_interfaces() - self.mocks["interface_has_own_mac"].assert_has_calls( + mocks["interface_has_own_mac"].assert_has_calls( [mock.call("enp0s1"), mock.call("bond1")], any_order=True ) expected = [ @@ -8938,10 +4980,9 @@ def test_gi_excludes_stolen_macs(self): ("lo", "00:00:00:00:00:00", None, "0x8"), ("bridge1-nic", "aa:aa:aa:aa:aa:03", None, "0x3"), ] - self.assertEqual(sorted(expected), sorted(ret)) + assert sorted(expected) == sorted(ret) - def test_gi_excludes_bridges(self): - self._mock_setup() + def test_gi_excludes_bridges(self, mocks): # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a bridge. # then expect b1 is the only thing left. @@ -8952,8 +4993,8 @@ def test_gi_excludes_bridges(self): self.data["own_macs"] = self.data["devices"] self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces() - self.assertEqual([("b1", "aa:aa:aa:aa:aa:b1", None, "0x0")], ret) - self.mocks["is_bridge"].assert_has_calls( + assert [("b1", "aa:aa:aa:aa:aa:b1", None, "0x0")] == ret + mocks["is_bridge"].assert_has_calls( [ mock.call("bridge1"), mock.call("enp0s1"), @@ -8964,7 +5005,7 @@ def test_gi_excludes_bridges(self): ) -class TestInterfaceHasOwnMac(CiTestCase): +class TestInterfaceHasOwnMac: """Test interface_has_own_mac. This is admittedly a bit whitebox.""" @mock.patch("cloudinit.net.read_sys_net_int", return_value=None) @@ -8982,11 +5023,11 @@ def test_non_strict_with_no_addr_assign_type(self, m_read_sys_net_int): tx_queue_len:1 type:1 """ - self.assertTrue(interface_has_own_mac("eth0")) + assert interface_has_own_mac("eth0") @mock.patch("cloudinit.net.read_sys_net_int", return_value=None) def test_strict_with_no_addr_assign_type_raises(self, m_read_sys_net_int): - with self.assertRaises(ValueError): + with pytest.raises(ValueError): interface_has_own_mac("eth0", True) @mock.patch("cloudinit.net.read_sys_net_int") @@ -8994,20 +5035,17 @@ def test_expected_values(self, m_read_sys_net_int): msg = "address_assign_type=%d said to not have own mac" for address_assign_type in (0, 1, 3): m_read_sys_net_int.return_value = address_assign_type - self.assertTrue( - interface_has_own_mac("eth0", msg % address_assign_type) - ) + assert interface_has_own_mac("eth0", msg % address_assign_type) m_read_sys_net_int.return_value = 2 - self.assertFalse(interface_has_own_mac("eth0")) + assert not interface_has_own_mac("eth0") @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestGetInterfacesByMac(CiTestCase): - with_logs = True +class TestGetInterfacesByMac: _data = { "bonds": ["bond1"], "bridges": ["bridge1"], @@ -9048,6 +5086,27 @@ class TestGetInterfacesByMac(CiTestCase): } data: dict = {} + @pytest.fixture + def mocks(self, mocker): + self.data = copy.deepcopy(self._data) + self.data["devices"] = set(list(self.data["macs"].keys())) + mock_list = ( + "get_devicelist", + "device_driver", + "get_interface_mac", + "is_bridge", + "interface_has_own_mac", + "is_vlan", + "get_ib_interface_hwaddr", + ) + all_mocks = {} + for name in mock_list: + all_mocks[name] = mocker.patch( + "cloudinit.net." + name, + side_effect=getattr(self, "_se_" + name), + ) + yield all_mocks + def _se_get_devicelist(self): return list(self.data["devices"]) @@ -9070,62 +5129,34 @@ def _se_get_ib_interface_hwaddr(self, name, ethernet_format): ib_hwaddr = self.data.get("ib_hwaddr", {}) return ib_hwaddr.get(name, {}).get(ethernet_format) - def _mock_setup(self): - self.data = copy.deepcopy(self._data) - self.data["devices"] = set(list(self.data["macs"].keys())) - mocks = ( - "get_devicelist", - "device_driver", - "get_interface_mac", - "is_bridge", - "interface_has_own_mac", - "is_vlan", - "get_ib_interface_hwaddr", - ) - self.mocks = {} - for n in mocks: - m = mock.patch( - "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) - ) - self.addCleanup(m.stop) - self.mocks[n] = m.start() - - def test_raise_exception_on_duplicate_macs(self): - self._mock_setup() + def test_raise_exception_on_duplicate_macs(self, mocks): self.data["macs"]["bridge1-nic"] = self.data["macs"]["enp0s1"] - self.assertRaises(RuntimeError, net.get_interfaces_by_mac) + pytest.raises(RuntimeError, net.get_interfaces_by_mac) - def test_raise_exception_on_duplicate_netvsc_macs(self): - self._mock_setup() + def test_raise_exception_on_duplicate_netvsc_macs(self, mocks): self.data["macs"]["netvsc0"] = self.data["macs"]["netvsc1"] - self.assertRaises(RuntimeError, net.get_interfaces_by_mac) + pytest.raises(RuntimeError, net.get_interfaces_by_mac) - def test_excludes_any_without_mac_address(self): - self._mock_setup() + def test_excludes_any_without_mac_address(self, mocks): ret = net.get_interfaces_by_mac() - self.assertIn("tun0", self._se_get_devicelist()) - self.assertNotIn("tun0", ret.values()) + assert "tun0" in self._se_get_devicelist() + assert "tun0" not in ret.values() - def test_excludes_stolen_macs(self): - self._mock_setup() + def test_excludes_stolen_macs(self, mocks): ret = net.get_interfaces_by_mac() - self.mocks["interface_has_own_mac"].assert_has_calls( + mocks["interface_has_own_mac"].assert_has_calls( [mock.call("enp0s1"), mock.call("bond1")], any_order=True ) - self.assertEqual( - { - "aa:aa:aa:aa:aa:01": "enp0s1", - "aa:aa:aa:aa:aa:02": "enp0s2", - "aa:aa:aa:aa:aa:03": "bridge1-nic", - "00:00:00:00:00:00": "lo", - "aa:aa:aa:aa:aa:04": "netvsc0", - "aa:aa:aa:aa:aa:05": "netvsc1", - }, - ret, - ) - - def test_excludes_bridges(self): - self._mock_setup() + assert { + "aa:aa:aa:aa:aa:01": "enp0s1", + "aa:aa:aa:aa:aa:02": "enp0s2", + "aa:aa:aa:aa:aa:03": "bridge1-nic", + "00:00:00:00:00:00": "lo", + "aa:aa:aa:aa:aa:04": "netvsc0", + "aa:aa:aa:aa:aa:05": "netvsc1", + } == ret + + def test_excludes_bridges(self, mocks): # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a bridge. # then expect b1 is the only thing left. @@ -9135,8 +5166,8 @@ def test_excludes_bridges(self): self.data["own_macs"] = self.data["devices"] self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces_by_mac() - self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret) - self.mocks["is_bridge"].assert_has_calls( + assert {"aa:aa:aa:aa:aa:b1": "b1"} == ret + mocks["is_bridge"].assert_has_calls( [ mock.call("bridge1"), mock.call("enp0s1"), @@ -9146,8 +5177,7 @@ def test_excludes_bridges(self): any_order=True, ) - def test_excludes_vlans(self): - self._mock_setup() + def test_excludes_vlans(self, mocks): # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a vlan. # then expect b1 is the only thing left. @@ -9158,8 +5188,8 @@ def test_excludes_vlans(self): self.data["own_macs"] = self.data["devices"] self.data["vlans"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces_by_mac() - self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret) - self.mocks["is_vlan"].assert_has_calls( + assert {"aa:aa:aa:aa:aa:b1": "b1"} == ret + mocks["is_vlan"].assert_has_calls( [ mock.call("bridge1"), mock.call("enp0s1"), @@ -9169,20 +5199,18 @@ def test_excludes_vlans(self): any_order=True, ) - def test_duplicates_of_empty_mac_are_ok(self): + def test_duplicates_of_empty_mac_are_ok(self, mocks): """Duplicate macs of 00:00:00:00:00:00 should be skipped.""" - self._mock_setup() empty_mac = "00:00:00:00:00:00" addnics = ("greptap1", "lo", "greptap2") self.data["macs"].update(dict((k, empty_mac) for k in addnics)) self.data["devices"].update(set(addnics)) self.data["own_macs"].extend(list(addnics)) ret = net.get_interfaces_by_mac() - self.assertEqual("lo", ret[empty_mac]) + assert "lo" == ret[empty_mac] - def test_skip_all_zeros(self): + def test_skip_all_zeros(self, mocks): """Any mac of 00:... should be skipped.""" - self._mock_setup() emac1, emac2, emac4, emac6 = ( "00", "00:00", @@ -9200,12 +5228,11 @@ def test_skip_all_zeros(self): self.data["devices"].update(set(addnics)) self.data["own_macs"].extend(addnics.keys()) ret = net.get_interfaces_by_mac() - self.assertEqual("lo", ret["00:00:00:00:00:00"]) + assert "lo" == ret["00:00:00:00:00:00"] - def test_ib(self): + def test_ib(self, mocks): ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56" ib_addr_eth_format = "00:11:22:33:44:56" - self._mock_setup() self.data["devices"] = ["enp0s1", "ib0"] self.data["own_macs"].append("ib0") self.data["macs"]["ib0"] = ib_addr @@ -9218,7 +5245,7 @@ def test_ib(self): ib_addr_eth_format: "ib0", ib_addr: "ib0", } - self.assertEqual(expected, result) + assert expected == result @pytest.mark.parametrize("driver", ("mscc_felix", "fsl_enetc", "qmi_wwan")) @@ -9255,25 +5282,33 @@ def test_duplicate_ignored_macs( assert re.search(pattern, caplog.text) -class TestInterfacesSorting(CiTestCase): +class TestInterfacesSorting: def test_natural_order(self): data = ["ens5", "ens6", "ens3", "ens20", "ens13", "ens2"] - self.assertEqual( - sorted(data, key=natural_sort_key), - ["ens2", "ens3", "ens5", "ens6", "ens13", "ens20"], - ) + assert sorted(data, key=natural_sort_key) == [ + "ens2", + "ens3", + "ens5", + "ens6", + "ens13", + "ens20", + ] data2 = ["enp2s0", "enp2s3", "enp0s3", "enp0s13", "enp0s8", "enp1s2"] - self.assertEqual( - sorted(data2, key=natural_sort_key), - ["enp0s3", "enp0s8", "enp0s13", "enp1s2", "enp2s0", "enp2s3"], - ) + assert sorted(data2, key=natural_sort_key) == [ + "enp0s3", + "enp0s8", + "enp0s13", + "enp1s2", + "enp2s0", + "enp2s3", + ] @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestGetIBHwaddrsByInterface(CiTestCase): +class TestGetIBHwaddrsByInterface: _ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56" _ib_addr_eth_format = "00:11:22:33:44:56" _data = { @@ -9302,22 +5337,23 @@ class TestGetIBHwaddrsByInterface(CiTestCase): } data: dict = {} - def _mock_setup(self): + @pytest.fixture + def mocks(self, mocker): self.data = copy.deepcopy(self._data) - mocks = ( + mock_list = ( "get_devicelist", "get_interface_mac", "is_bridge", "interface_has_own_mac", "get_ib_interface_hwaddr", ) - self.mocks = {} - for n in mocks: - m = mock.patch( - "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) + all_mocks = {} + for name in mock_list: + all_mocks[name] = mocker.patch( + "cloudinit.net." + name, + side_effect=getattr(self, "_se_" + name), ) - self.addCleanup(m.stop) - self.mocks[n] = m.start() + yield all_mocks def _se_get_devicelist(self): return self.data["devices"] @@ -9335,18 +5371,16 @@ def _se_get_ib_interface_hwaddr(self, name, ethernet_format): ib_hwaddr = self.data.get("ib_hwaddr", {}) return ib_hwaddr.get(name, {}).get(ethernet_format) - def test_ethernet(self): - self._mock_setup() + def test_ethernet(self, mocks): self.data["devices"].remove("ib0") result = net.get_ib_hwaddrs_by_interface() expected = {} - self.assertEqual(expected, result) + assert expected == result - def test_ib(self): - self._mock_setup() + def test_ib(self, mocks): result = net.get_ib_hwaddrs_by_interface() expected = {"ib0": self._ib_addr} - self.assertEqual(expected, result) + assert expected == result def _gzip_data(data): @@ -9357,7 +5391,7 @@ def _gzip_data(data): return iobuf.getvalue() -class TestRenameInterfaces(CiTestCase): +class TestRenameInterfaces: @mock.patch("cloudinit.subp.subp") def test_rename_all(self, mock_subp): renames = [ @@ -9383,16 +5417,13 @@ def test_rename_all(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ mock.call( ["ip", "link", "set", "ens3", "name", "interface0"], - capture=True, ), mock.call( ["ip", "link", "set", "ens5", "name", "interface2"], - capture=True, ), ] ) @@ -9422,16 +5453,13 @@ def test_rename_no_driver_no_device_id(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ mock.call( ["ip", "link", "set", "eth0", "name", "interface0"], - capture=True, ), mock.call( ["ip", "link", "set", "eth1", "name", "interface1"], - capture=True, ), ] ) @@ -9461,25 +5489,18 @@ def test_rename_all_bounce(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ - mock.call(["ip", "link", "set", "ens3", "down"], capture=True), + mock.call(["ip", "link", "set", "dev", "ens3", "down"]), mock.call( ["ip", "link", "set", "ens3", "name", "interface0"], - capture=True, ), - mock.call(["ip", "link", "set", "ens5", "down"], capture=True), + mock.call(["ip", "link", "set", "dev", "ens5", "down"]), mock.call( ["ip", "link", "set", "ens5", "name", "interface2"], - capture=True, - ), - mock.call( - ["ip", "link", "set", "interface0", "up"], capture=True - ), - mock.call( - ["ip", "link", "set", "interface2", "up"], capture=True ), + mock.call(["ip", "link", "set", "dev", "interface0", "up"]), + mock.call(["ip", "link", "set", "dev", "interface2", "up"]), ] ) @@ -9508,12 +5529,9 @@ def test_rename_duplicate_macs(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ - mock.call( - ["ip", "link", "set", "eth1", "name", "vf1"], capture=True - ), + mock.call(["ip", "link", "set", "eth1", "name", "vf1"]), ] ) @@ -9542,12 +5560,9 @@ def test_rename_duplicate_macs_driver_no_devid(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ - mock.call( - ["ip", "link", "set", "eth1", "name", "vf1"], capture=True - ), + mock.call(["ip", "link", "set", "eth1", "name", "vf1"]), ] ) @@ -9585,15 +5600,10 @@ def test_rename_multi_mac_dups(self, mock_subp): }, } net._rename_interfaces(renames, current_info=current_info) - print(mock_subp.call_args_list) mock_subp.assert_has_calls( [ - mock.call( - ["ip", "link", "set", "eth1", "name", "vf1"], capture=True - ), - mock.call( - ["ip", "link", "set", "eth2", "name", "vf2"], capture=True - ), + mock.call(["ip", "link", "set", "eth1", "name", "vf1"]), + mock.call(["ip", "link", "set", "eth2", "name", "vf2"]), ] ) @@ -9637,23 +5647,16 @@ def test_rename_macs_case_insensitive(self, mock_subp): expected = [ mock.call( ["ip", "link", "set", "eth%d" % i, "name", "en%d" % i], - capture=True, ) for i in range(len(renames)) ] mock_subp.assert_has_calls(expected) -class TestNetworkState(CiTestCase): +class TestNetworkState: def test_bcast_addr(self): """Test mask_and_ipv4_to_bcast_addr proper execution.""" bcast_addr = mask_and_ipv4_to_bcast_addr - self.assertEqual( - "192.168.1.255", bcast_addr("255.255.255.0", "192.168.1.1") - ) - self.assertEqual( - "128.42.7.255", bcast_addr("255.255.248.0", "128.42.5.4") - ) - self.assertEqual( - "10.1.21.255", bcast_addr("255.255.255.0", "10.1.21.4") - ) + assert "192.168.1.255" == bcast_addr("255.255.255.0", "192.168.1.1") + assert "128.42.7.255" == bcast_addr("255.255.248.0", "128.42.5.4") + assert "10.1.21.255" == bcast_addr("255.255.255.0", "10.1.21.4") diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py index d53701efa..0ec6819b5 100644 --- a/tests/unittests/test_net_activators.py +++ b/tests/unittests/test_net_activators.py @@ -1,8 +1,10 @@ +import logging from collections import namedtuple from contextlib import ExitStack from unittest.mock import patch import pytest +import yaml from cloudinit.net.activators import ( DEFAULT_PRIORITY, @@ -16,7 +18,6 @@ select_activator, ) from cloudinit.net.network_state import parse_net_config_data -from cloudinit.safeyaml import load V1_CONFIG = """\ version: 1 @@ -234,8 +235,8 @@ def test_available(self, activator, available_calls, available_mocks): ] NETWORKD_BRING_UP_CALL_LIST: list = [ - ((["ip", "link", "set", "up", "eth0"],), {}), - ((["ip", "link", "set", "up", "eth1"],), {}), + ((["ip", "link", "set", "dev", "eth0", "up"],), {}), + ((["ip", "link", "set", "dev", "eth1", "up"],), {}), ((["systemctl", "restart", "systemd-networkd", "systemd-resolved"],), {}), ] @@ -250,6 +251,23 @@ def test_available(self, activator, available_calls, available_mocks): ], ) class TestActivatorsBringUp: + @patch("cloudinit.subp.subp", return_value=("", "Some warning condition")) + def test_bring_up_interface_log_level_on_stderr( + self, m_subp, activator, expected_call_list, available_mocks, caplog + ): + """Activator stderr logged debug for netplan and warning for others.""" + if activator == NetplanActivator: + log_level = logging.DEBUG + else: + log_level = logging.WARNING + with caplog.at_level(log_level): + activator.bring_up_interface("eth0") + index = 0 + for call in m_subp.call_args_list: + assert call == expected_call_list[index] + index += 1 + assert "Received stderr output: Some warning condition" in caplog.text + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_up_interface( self, m_subp, activator, expected_call_list, available_mocks @@ -274,7 +292,7 @@ def test_bring_up_interfaces( def test_bring_up_all_interfaces_v1( self, m_subp, activator, expected_call_list, available_mocks ): - network_state = parse_net_config_data(load(V1_CONFIG)) + network_state = parse_net_config_data(yaml.safe_load(V1_CONFIG)) activator.bring_up_all_interfaces(network_state) for call in m_subp.call_args_list: assert call in expected_call_list @@ -283,7 +301,7 @@ def test_bring_up_all_interfaces_v1( def test_bring_up_all_interfaces_v2( self, m_subp, activator, expected_call_list, available_mocks ): - network_state = parse_net_config_data(load(V2_CONFIG)) + network_state = parse_net_config_data(yaml.safe_load(V2_CONFIG)) activator.bring_up_all_interfaces(network_state) for call in m_subp.call_args_list: assert call in expected_call_list @@ -300,8 +318,8 @@ def test_bring_up_all_interfaces_v2( ] NETWORKD_BRING_DOWN_CALL_LIST: list = [ - ((["ip", "link", "set", "down", "eth0"],), {}), - ((["ip", "link", "set", "down", "eth1"],), {}), + ((["ip", "link", "set", "dev", "eth0", "down"],), {}), + ((["ip", "link", "set", "dev", "eth1", "down"],), {}), ] diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py index 4121e4044..5b21e0e76 100644 --- a/tests/unittests/test_net_freebsd.py +++ b/tests/unittests/test_net_freebsd.py @@ -1,8 +1,9 @@ import os +import yaml + import cloudinit.net import cloudinit.net.network_state -from cloudinit import safeyaml from tests.unittests.helpers import CiTestCase, dir2dict, mock, readResource SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") @@ -76,7 +77,7 @@ def test_render_output_has_yaml(self, m_is_freebsd, m_subp): entry = { "yaml": V1, } - network_config = safeyaml.load(entry["yaml"]) + network_config = yaml.safe_load(entry["yaml"]) ns = cloudinit.net.network_state.parse_net_config_data(network_config) files = self._render_and_read(state=ns) assert files == { diff --git a/tests/unittests/test_render_template.py b/tests/unittests/test_render_template.py index 150e61b1d..0ed946482 100644 --- a/tests/unittests/test_render_template.py +++ b/tests/unittests/test_render_template.py @@ -11,6 +11,7 @@ DISTRO_VARIANTS = [ "amazon", "arch", + "azurelinux", "centos", "debian", "eurolinux", diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py index bb5f40126..d1e27c444 100644 --- a/tests/unittests/test_stages.py +++ b/tests/unittests/test_stages.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init """Tests related to cloudinit.stages module.""" import json @@ -410,7 +411,7 @@ def test_warn_on_empty_network(self, m_cmdline, m_initramfs, caplog): } self.init.datasource = FakeDataSource(network_config={"network": None}) - self.init.distro.generate_fallback_config = lambda: {} + self.init.distro.generate_fallback_config = dict self.init._find_networking_config() assert "Empty network config found" in caplog.text diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py index d16167389..eca48800c 100644 --- a/tests/unittests/test_subp.py +++ b/tests/unittests/test_subp.py @@ -12,7 +12,7 @@ from tests.helpers import get_top_level_dir from tests.unittests.helpers import CiTestCase -BASH = subp.which("bash") +SH = "sh" BOGUS_COMMAND = "this-is-not-expected-to-be-a-program-name" @@ -93,24 +93,24 @@ def test_prepend_base_command_removes_first_item_when_none(self): class TestSubp(CiTestCase): allowed_subp = [ - BASH, + SH, "cat", CiTestCase.SUBP_SHELL_TRUE, BOGUS_COMMAND, sys.executable, + "env", ] - stdin2err = [BASH, "-c", "cat >&2"] + stdin2err = [SH, "-c", "cat >&2"] stdin2out = ["cat"] utf8_invalid = b"ab\xaadef" utf8_valid = b"start \xc3\xa9 end" utf8_valid_2 = b"d\xc3\xa9j\xc8\xa7" - printenv = [BASH, "-c", 'for n in "$@"; do echo "$n=${!n}"; done', "--"] - def printf_cmd(self, *args): - # bash's printf supports \xaa. So does /usr/bin/printf - # but by using bash, we remove dependency on another program. - return [BASH, "-c", 'printf "$@"', "printf"] + list(args) + @staticmethod + def printf_cmd(arg): + """print with builtin printf""" + return [SH, "-c", 'printf "$@"', "printf", arg] def test_subp_handles_bytestrings(self): """subp can run a bytestring command if shell is True.""" @@ -146,11 +146,21 @@ def test_subp_respects_decode_false(self): self.assertEqual(out, self.utf8_valid) def test_subp_decode_ignore(self): + """ensure that invalid utf-8 is ignored with the "ignore" kwarg""" # this executes a string that writes invalid utf-8 to stdout - (out, _err) = subp.subp( - self.printf_cmd("abc\\xaadef"), capture=True, decode="ignore" - ) - self.assertEqual(out, "abcdef") + with mock.patch.object( + subp.subprocess, + "Popen", + autospec=True, + ) as sp: + sp.return_value.communicate = mock.Mock( + return_value=(b"abc\xaadef", None) + ) + sp.return_value.returncode = 0 + assert ( + "abcdef" + == subp.subp([SH], capture=True, decode="ignore").stdout + ) def test_subp_decode_strict_valid_utf8(self): (out, _err) = subp.subp( @@ -189,20 +199,22 @@ def test_subp_capture_stderr(self): def test_subp_reads_env(self): with mock.patch.dict("os.environ", values={"FOO": "BAR"}): - out, _err = subp.subp(self.printenv + ["FOO"], capture=True) - self.assertEqual("FOO=BAR", out.splitlines()[0]) + assert {"FOO=BAR"}.issubset( + subp.subp("env", capture=True).stdout.splitlines() + ) def test_subp_update_env(self): + """test that subp's update_env argument updates the environment""" extra = {"FOO": "BAR", "HOME": "/root", "K1": "V1"} with mock.patch.dict("os.environ", values=extra): out, _err = subp.subp( - self.printenv + ["FOO", "HOME", "K1", "K2"], + "env", capture=True, update_env={"HOME": "/myhome", "K2": "V2"}, ) - self.assertEqual( - ["FOO=BAR", "HOME=/myhome", "K1=V1", "K2=V2"], out.splitlines() + assert {"FOO=BAR", "HOME=/myhome", "K1=V1", "K2=V2"}.issubset( + set(out.splitlines()) ) def test_subp_warn_missing_shebang(self): @@ -282,7 +294,7 @@ def test_c_lang_can_take_utf8_args(self): ] ) cmd = [ - BASH, + SH, "-c", 'echo -n "$@"', "--", diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py index d47852d66..6cbc37282 100644 --- a/tests/unittests/test_temp_utils.py +++ b/tests/unittests/test_temp_utils.py @@ -25,7 +25,6 @@ def fake_mkdtemp(*args, **kwargs): { "os.getuid": 1000, "tempfile.mkdtemp": {"side_effect": fake_mkdtemp}, - "_TMPDIR": {"new": None}, "os.path.isdir": True, }, mkdtemp, @@ -46,7 +45,6 @@ def fake_mkdtemp(*args, **kwargs): { "os.getuid": 1000, "tempfile.mkdtemp": {"side_effect": fake_mkdtemp}, - "_TMPDIR": {"new": None}, "os.path.isdir": True, "util.has_mount_opt": True, }, @@ -69,7 +67,6 @@ def fake_mkdtemp(*args, **kwargs): { "os.getuid": 0, "tempfile.mkdtemp": {"side_effect": fake_mkdtemp}, - "_TMPDIR": {"new": None}, "os.path.isdir": True, }, mkdtemp, @@ -90,7 +87,6 @@ def fake_mkstemp(*args, **kwargs): { "os.getuid": 1000, "tempfile.mkstemp": {"side_effect": fake_mkstemp}, - "_TMPDIR": {"new": None}, "os.path.isdir": True, }, mkstemp, @@ -111,7 +107,6 @@ def fake_mkstemp(*args, **kwargs): { "os.getuid": 0, "tempfile.mkstemp": {"side_effect": fake_mkstemp}, - "_TMPDIR": {"new": None}, "os.path.isdir": True, }, mkstemp, diff --git a/tests/unittests/test_upgrade.py b/tests/unittests/test_upgrade.py index 83ae517ee..5c8eef5a5 100644 --- a/tests/unittests/test_upgrade.py +++ b/tests/unittests/test_upgrade.py @@ -186,8 +186,12 @@ def previous_obj_pkl(self, request): "match_case_insensitive_module_name", lambda name: f"DataSource{name}", ) + @mock.patch( + "cloudinit.sources.DataSourceCloudStack.get_vr_address", + return_value="data-server.", + ) def test_all_ds_init_vs_unpickle_attributes( - self, mode, mocker, paths, tmpdir + self, m_get_vr_address, mode, mocker, paths, tmpdir ): """Unpickle resets any instance attributes created in __init__ @@ -279,14 +283,15 @@ def test_pkl_load_defines_all_init_side_effect_attributes( paths = previous_obj_pkl.paths ds = ds_class(sys_cfg, distro, paths) if ds.dsname == "NoCloud" and previous_obj_pkl.__dict__: - expected = ( - set({"seed_dirs"}), # LP: #1568150 handled with getattr checks - set(), - ) + # seed_dirs is covered by _unpickle + # _network_config and _network_eni were already initialized + # outside of __init__ so shouldn't need unpickling + expected = {"seed_dirs", "_network_config", "_network_eni"} else: expected = (set(),) missing_attrs = ds.__dict__.keys() - previous_obj_pkl.__dict__.keys() - assert missing_attrs in expected + for attr in missing_attrs: + assert attr in expected def test_networking_set_on_distro(self, previous_obj_pkl): """We always expect to have ``.networking`` on ``Distro`` objects.""" diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py index bc3d9857e..16f4775bd 100644 --- a/tests/unittests/test_url_helper.py +++ b/tests/unittests/test_url_helper.py @@ -20,6 +20,7 @@ dual_stack, oauth_headers, read_file_or_url, + readurl, wait_for_url, ) from tests.unittests.helpers import CiTestCase, mock, skipIf @@ -152,6 +153,11 @@ def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): m_response = mock.MagicMock() + class FakeSessionRaisesHttpError(requests.Session): + @classmethod + def request(cls, **kwargs): + raise requests.exceptions.HTTPError("broke") + class FakeSession(requests.Session): @classmethod def request(cls, **kwargs): @@ -171,8 +177,10 @@ def request(cls, **kwargs): return m_response with mock.patch(M_PATH + "requests.Session") as m_session: - error = requests.exceptions.HTTPError("broke") - m_session.side_effect = [error, FakeSession()] + m_session.side_effect = [ + FakeSessionRaisesHttpError(), + FakeSession(), + ] # assert no retries and check_status == True with self.assertRaises(UrlError) as context_manager: response = read_file_or_url(url) @@ -275,6 +283,71 @@ def assert_time(func, max_time=1): return out +class TestReadUrl: + @pytest.mark.parametrize("headers", [{}, {"Metadata": "true"}]) + def test_headers(self, headers): + url = "http://hostname/path" + m_response = mock.MagicMock() + + expected_headers = headers.copy() + expected_headers["User-Agent"] = "Cloud-Init/%s" % ( + version.version_string() + ) + + class FakeSession(requests.Session): + @classmethod + def request(cls, **kwargs): + expected_kwargs = { + "url": url, + "allow_redirects": True, + "method": "GET", + "headers": expected_headers, + "stream": False, + } + + assert kwargs == expected_kwargs + return m_response + + with mock.patch( + M_PATH + "requests.Session", side_effect=[FakeSession()] + ): + response = readurl(url, headers=headers) + + assert response._response == m_response + + @pytest.mark.parametrize("headers", [{}, {"Metadata": "true"}]) + def test_headers_cb(self, headers): + url = "http://hostname/path" + m_response = mock.MagicMock() + + expected_headers = headers.copy() + expected_headers["User-Agent"] = "Cloud-Init/%s" % ( + version.version_string() + ) + headers_cb = lambda _: headers + + class FakeSession(requests.Session): + @classmethod + def request(cls, **kwargs): + expected_kwargs = { + "url": url, + "allow_redirects": True, + "method": "GET", + "headers": expected_headers, + "stream": False, + } + + assert kwargs == expected_kwargs + return m_response + + with mock.patch( + M_PATH + "requests.Session", side_effect=[FakeSession()] + ): + response = readurl(url, headers_cb=headers_cb) + + assert response._response == m_response + + event = Event() @@ -534,7 +607,9 @@ def retry_mocks(self, mocker): m_sleep = mocker.patch( f"{M_PATH}time.sleep", side_effect=self.sleep_side_effect ) - mocker.patch(f"{M_PATH}time.time", side_effect=self.time_side_effect) + mocker.patch( + f"{M_PATH}time.monotonic", side_effect=self.time_side_effect + ) yield m_readurl, m_sleep @@ -684,9 +759,9 @@ def test_default_sleep_time(self, retry_mocks): assert actual_sleep_times == expected_sleep_times # These side effect methods are a way of having a somewhat predictable - # output for time.time(). Otherwise, we have to track too many calls - # to time.time() and unrelated changes to code being called could cause - # these tests to fail. + # output for time.monotonic(). Otherwise, we have to track too many calls + # to time.monotonic() and unrelated changes to code being called could + # cause these tests to fail. # 0.0000001 is added to simulate additional execution time but keep it # small enough for pytest.approx() to work def sleep_side_effect(self, sleep_time): diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 70edb40bb..d790bf4f1 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -400,6 +400,20 @@ """ ) +OS_RELEASE_AZURELINUX = dedent( + """\ + NAME="Microsoft Azure Linux" + VERSION="3.0.20240206" + ID=azurelinux + VERSION_ID="3.0" + PRETTY_NAME="Microsoft Azure Linux 3.0" + ANSI_COLOR="1;34" + HOME_URL="https://aka.ms/azurelinux" + BUG_REPORT_URL="https://aka.ms/azurelinux" + SUPPORT_URL="https://aka.ms/azurelinux" +""" +) + @pytest.mark.usefixtures("fake_filesystem") class TestUtil: @@ -1249,6 +1263,16 @@ def test_get_linux_mariner_os_release(self, m_os_release, m_path_exists): dist = util.get_linux_distro() self.assertEqual(("mariner", "2.0", ""), dist) + @mock.patch("cloudinit.util.load_text_file") + def test_get_linux_azurelinux_os_release( + self, m_os_release, m_path_exists + ): + """Verify we get the correct name and machine arch on Azure Linux""" + m_os_release.return_value = OS_RELEASE_AZURELINUX + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(("azurelinux", "3.0", ""), dist) + @mock.patch(M_PATH + "load_text_file") def test_get_linux_openmandriva(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on OpenMandriva""" @@ -1310,6 +1334,7 @@ class TestGetVariant: ({"system": "Linux", "dist": ("almalinux",)}, "almalinux"), ({"system": "linux", "dist": ("alpine",)}, "alpine"), ({"system": "linux", "dist": ("arch",)}, "arch"), + ({"system": "linux", "dist": ("azurelinux",)}, "azurelinux"), ({"system": "linux", "dist": ("centos",)}, "centos"), ({"system": "linux", "dist": ("cloudlinux",)}, "cloudlinux"), ({"system": "linux", "dist": ("debian",)}, "debian"), @@ -1700,6 +1725,8 @@ def preexec_fn(self, request): args = (test_string, None) elif request.param == "errfmt": args = (None, test_string) + else: + args = (None, None) with mock.patch(M_PATH + "subprocess.Popen") as m_popen: util.redirect_output(*args) @@ -2504,7 +2531,7 @@ def fake_response(url, timeout, retries): else: _url, _, md_type = parsed_url.netloc.partition("8008") path = f"/{md_type}" - return url_helper.StringResponse(f"{path}: 1") + return url_helper.StringResponse(f"{path}: 1", "http://url/") m_read.side_effect = fake_response @@ -3220,3 +3247,39 @@ def test_file_present(self, content, caplog, tmpdir): assert {"scopes": ["network"]} == util.read_hotplug_enabled_file( MockPath(target_file.strpath) ) + + +class TestLogExc: + def test_logexc(self, caplog): + try: + _ = 1 / 0 + except Exception as _: + util.logexc(LOG, "an error occurred") + + assert caplog.record_tuples == [ + ( + "tests.unittests.test_util", + logging.WARNING, + "an error occurred", + ), + ("tests.unittests.test_util", logging.DEBUG, "an error occurred"), + ] + + @pytest.mark.parametrize( + "log_level", + [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR], + ) + def test_logexc_with_log_level(self, caplog, log_level): + try: + _ = 1 / 0 + except Exception as _: + util.logexc(LOG, "an error occurred", log_level=log_level) + + assert caplog.record_tuples == [ + ( + "tests.unittests.test_util", + log_level, + "an error occurred", + ), + ("tests.unittests.test_util", logging.DEBUG, "an error occurred"), + ] diff --git a/tests/unittests/util.py b/tests/unittests/util.py index 0cb601ef1..f3ed746d2 100644 --- a/tests/unittests/util.py +++ b/tests/unittests/util.py @@ -161,7 +161,7 @@ def shutdown_command(self, *, mode, delay, message): def package_command(self, command, args=None, pkgs=None): pass - def update_package_sources(self): + def update_package_sources(self, *, force=False): return (True, "yay") def do_as(self, command, args=None, **kwargs): diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index bd50dc84f..d9accd114 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -3,9 +3,11 @@ aciba90 acourdavAkamai ader1990 adobley +afbjorklund ajmyyra akutz AlexBaranowski +alexsander-souza AlexSv04047 AliyevH Aman306 @@ -16,6 +18,7 @@ andrewlukoshko ani-sinha antonyc apollo13 +ashuntu aswinrajamannar bdrung beantaxi @@ -45,11 +48,14 @@ dankm dark2phoenix david-caro dbungert +ddstreet +ddstreetmicrosoft ddymko dermotbradley dhalturin dhensby Dorthu +eaglegai eandersson eb3095 ederst @@ -58,13 +64,17 @@ einsibjarni emmanuelthome eslerm esposem +fionn frantisekz +frikilax +frittentheke GabrielNagy garzdin giggsoff gilbsgilbs glyg hamalq +hcartiaux holmanb impl Indrranil @@ -76,10 +86,12 @@ izzyleung j5awry jacobsalmela jamesottinger +jcmoore3 Jehops jf jfroche Jille +jinkkkang JohnKepplers johnsonshi jordimassaguerpla @@ -95,6 +107,7 @@ landon912 ld9379435 licebmi linitio +LKHN lkundrak lucasmoura lucendio @@ -121,14 +134,17 @@ nicolasbock nishigori nkukard nmeyerhans +ogayot olivierlemasle omBratteng onitake orndorffgrant Oursin outscale-mdr +philsphicas phsm phunyguy +pneigel-ca qubidt r00ta RedKrieg diff --git a/tools/build-on-netbsd b/tools/build-on-netbsd index 0d4eb58be..b743d591b 100755 --- a/tools/build-on-netbsd +++ b/tools/build-on-netbsd @@ -19,7 +19,6 @@ pkgs=" ${py_prefix}-oauthlib ${py_prefix}-requests ${py_prefix}-setuptools - ${py_prefix}-netifaces ${py_prefix}-yaml ${py_prefix}-jsonschema sudo diff --git a/tools/build-on-openbsd b/tools/build-on-openbsd index bc551c0da..60a5fa565 100755 --- a/tools/build-on-openbsd +++ b/tools/build-on-openbsd @@ -16,10 +16,12 @@ pkgs=" py3-configobj py3-jinja2 py3-jsonschema - py3-netifaces + py3-jsonpatch + py3-jsonpointer py3-oauthlib py3-requests py3-setuptools + py3-serial py3-yaml sudo-- wget @@ -44,6 +46,7 @@ else RC_LOCAL="/etc/rc.local" RC_LOCAL_CONTENT=" +rm -rf /var/run/cloud-init /usr/local/lib/cloud-init/ds-identify cloud-init init --local diff --git a/tools/ds-identify b/tools/ds-identify index 8f8e56198..31a15fed9 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -74,6 +74,7 @@ PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block} PATH_DEV_DISK="${PATH_DEV_DISK:-${PATH_ROOT}/dev/disk}" PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" +PATH_DI_ENV="${PATH_DI_ENV:-${PATH_ROOT}/usr/libexec/ds-identify-env}" PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" @@ -123,6 +124,7 @@ DS_FOUND=0 DS_NOT_FOUND=1 DS_MAYBE=2 +DI_SYSTEMD_VIRTUALIZATION=${SYSTEMD_VIRTUALIZATION:-} DI_DSNAME="" # this has to match the builtin list in cloud-init, it is what will # be searched if there is no setting found in config. @@ -436,11 +438,18 @@ cached() { detect_virt() { local virt="${UNAVAILABLE}" r="" out="" - if [ -d /run/systemd ]; then - out=$(systemd-detect-virt 2>&1) - r=$? - if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then - virt="$out" + if [ -d "${PATH_ROOT}/run/systemd" ]; then + if [ -n "$DI_SYSTEMD_VIRTUALIZATION" ]; then + virt=${DI_SYSTEMD_VIRTUALIZATION#*:} + debug 2 "detected $virt via env variable SYSTEMD_VIRTUALIZATION" + else + # required for compatibility with systemd version <251 + out=$(systemd-detect-virt 2>&1) + r=$? + if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then + virt="$out" + fi + debug 2 "detected $virt via ds-identify" fi elif command -v virt-what >/dev/null 2>&1; then # Map virt-what's names to those systemd-detect-virt that @@ -475,7 +484,7 @@ detect_virt() { case "$out" in hv) virt="microsoft" ;; vbox) virt="oracle" ;; - generic) "vm-other";; + generic) virt="vm-other";; *) virt="$out" esac } @@ -558,7 +567,6 @@ read_dmi_product_serial() { DI_DMI_PRODUCT_SERIAL="$_RET" } -# shellcheck disable=2034 read_uname_info() { # run uname, and parse output. # uname is tricky to parse as it outputs always in a given order @@ -612,7 +620,7 @@ parse_yaml_array() { read_datasource_list() { cached "$DI_DSLIST" && return - local dslist="" + local dslist="" key="datasource_list" # if DI_DSNAME is set directly, then avoid parsing config. if [ -n "${DI_DSNAME}" ]; then dslist="${DI_DSNAME}" @@ -630,14 +638,14 @@ read_datasource_list() { dslist=${_RET} ;; esac - if [ -z "$dslist" ] && check_config datasource_list; then + if [ -z "$dslist" ] && check_config "$key" && get_single_line_flow_sequence "$key" "$_RET"; then debug 1 "$_RET_fname set datasource_list: $_RET" parse_yaml_array "$_RET" dslist=${_RET} fi if [ -z "$dslist" ]; then dslist=${DI_DSLIST_DEFAULT} - debug 1 "no datasource_list found, using default: $dslist" + warn "no datasource_list found, using default: $dslist" fi DI_DSLIST=$dslist return 0 @@ -719,9 +727,12 @@ nocase_equal() { [ "$1" = "$2" ] && return 0 local delim="-delim-" - # shellcheck disable=2018,2019 - out=$(echo "$1${delim}$2" | tr A-Z a-z) - [ "${out#*"${delim}"}" = "${out%"${delim}"*}" ] + out=$(echo "$1${delim}$2" | tr '[:upper:]' '[:lower:]') + # delim is known not to be a pattern, and some editors currently struggle + # with parsing the quoted output required to satisfy SC2295 + # shellcheck disable=2295 + # https://github.com/tree-sitter/tree-sitter-bash/issues/254 + [ "${out#*${delim}}" = "${out%${delim}*}" ] } check_seed_dir() { @@ -815,21 +826,48 @@ check_config() { if [ "$1" = "$files" -a ! -f "$1" ]; then return 1 fi - local fname="" line="" ret="" found=0 found_fn="" - # shellcheck disable=2094 - for fname in "$@"; do - [ -f "$fname" ] || continue - while read line; do - line=${line%%#*} - case "$line" in - $key:\ *|"${key}":) - ret=${line#*:}; - ret=${ret# }; - found=$((found+1)) - found_fn="$fname";; - esac - done <"$fname" + local line="" ret="" found=0 found_fn="" oifs="$IFS" out="" + # check for a yaml key/value pair on a single line + # + # note that: + # - keys may be single or double quoted + # - spaces and tabs may exist between key and colon + # + # the following are all valid under the yaml spec (as of 1.2.2): + # + # key: string + # key: "quoted string" + # key: 1 + # key: [ some_value ] + # key : [ "some value" ] + # key\t:\t[\tsome_value\t]\t + # + # The syntax warned about is not valid posix shell, and we are not + # attempting to access an index of arrays. Silence it. + # shellcheck disable=1087 + out=$(grep "$key[\"\']*[[:space:]]*:" "$@" 2>/dev/null) + IFS=${CR} + for line in $out; do + # drop '# comment' + line=${line%%#*} + # if more than one file was 'grep'ed, then grep will output filename: + # but if only one file, line will not be prefixed. + if [ $# -eq 1 ]; then + found_fn="$1" + if [ "${#line}" -eq 0 ]; then + continue + fi + else + found_fn="${line%%:*}" + line=${line#"$found_fn:"} + if [ "${#line}" -eq 0 ]; then + continue + fi + fi + ret=${line#*: }; + found=$((found+1)) done + IFS="$oifs" if [ $found -ne 0 ]; then _RET="$ret" _RET_fname="$found_fn" @@ -838,6 +876,65 @@ check_config() { return 1 } +get_value() { + # get_value(key, value) + # for a key / value pair, check that the value is non-empty and + # return the value if it exists + # + # This function is intended to be run on the output of check_config + # when the value for a key needs to be in the output. + # + # `check_config` returns true when no value is in the line, but this + # is insufficient when a value is required, as in the case of + # parsing 'datasource_list:' + local key="$1" value="$2" found=0 ret="" + + # remove everything before final ':' + ret="${value##*:}" + + # remove preceding and trailing whitespace + trim "$ret" + + # check value length + if [ "${#_RET}" -ne 0 ]; then + return 0 + fi + debug 1 "key $key didn't have a valid value" + return 1 +} + +get_single_line_flow_sequence() { + # get_single_line_flow_sequence(key, value) + # for a key / value pair, check that the value contains a single + # line flow sequence[1] with a value + # + # return 0 if a single line flow sequence is found, otherwise 1 + # does not modify _RET + # + # [1] https://yaml.org/spec/1.2.2/#741-flow-sequences + local ret="" tmp="" + get_value "$1" "$2" || return 1 + ret="$_RET" + tmp="$_RET" + + # remove smallest ] suffix + tmp="${tmp%]}" + + # remove smallest [ prefix + tmp="${tmp#[}" + + # remove preceding and trailing whitespace + trim "$tmp" + _RET="$ret" + + # check value length + if [ "${#_RET}" -ne 0 ]; then + return 0 + fi + debug 1 "key $key didn't have a valid single line flow sequence" + return 1 +} + dscheck_MAAS() { is_container && return "${DS_NOT_FOUND}" # heuristic check for ephemeral boot environment @@ -993,7 +1090,7 @@ vmware_guest_customization() { # (disable_vmware_customization=true). If it is set to false, then # user has requested customization. local key="disable_vmware_customization" - if check_config "$key"; then + if check_config "$key" && get_value "$key" "$_RET"; then debug 2 "${_RET_fname} set $key to $_RET" case "$_RET" in 0|false|False) return 0;; @@ -1124,7 +1221,7 @@ has_ovf_cdrom() { is_disabled() { if [ -f /etc/cloud/cloud-init.disabled ]; then - debug 1 "disabled by marker file /etc/cloud-init.disabled" + debug 1 "disabled by marker file /etc/cloud/cloud-init.disabled" return 0 fi if [ "${KERNEL_CMDLINE:-}" = "cloud-init=disabled" ]; then @@ -1250,16 +1347,23 @@ ec2_identify_platform() { return 0 fi - # product uuid and product serial start with case insensitive - local uuid="${DI_DMI_PRODUCT_UUID}" - case "$uuid:$serial" in - [Ee][Cc]2*:[Ee][Cc]2*) - # both start with ec2, now check for case insensitive equal - nocase_equal "$uuid" "$serial" && - { _RET="AWS"; return 0; };; + # keep only the first octet + local start_uuid="${DI_DMI_PRODUCT_UUID%%-*}" + case "$start_uuid" in + # example ec2 uuid: + # EC2E1916-9099-7CAF-FD21-012345ABCDEF + [Ee][Cc]2*) + _RET="AWS" + ;; + # example ec2 uuid: + # 45E12AEC-DCD1-B213-94ED-012345ABCDEF + *2[0-9a-fA-F][Ee][Cc]) + _RET="AWS" + ;; + *) + _RET="$default" + ;; esac - - _RET="$default" return 0; } @@ -1586,21 +1690,36 @@ dscheck_VMware() { return "${DS_NOT_FOUND}" } -WSL_cloudinit_dir_in() { +WSL_path() { + local params="$1" path="$2" val="" + val="$(wslpath "$params" "$1")" + _RET="$val" +} + +WSL_run_cmd() { + local val="" exepath="$1" + shift + _RET=$(/init "$exepath" /c "$@" 2>/dev/null) +} + +WSL_profile_dir() { + # Determine where a suitable user profile home is located _RET="" - local cmdexe="" cloudinitdir="" val="" - for m in "$@"; do - cmdexe="$m/Windows/System23/cmd.exe" + local cmdexe="" profiledir="" val="" + # shellcheck disable=SC2068 + for m in $@; do + cmdexe="$m/Windows/System32/cmd.exe" if command -v "$cmdexe" > /dev/null 2>&1; then # Here WSL's proprietary `/init` is used to start the Windows cmd.exe # to output the Windows user profile directory path, which is # held by the environment variable %USERPROFILE%. - cloudinitdir=$(/init "$cmdexe" /c echo %USERPROFILE% 2>/dev/null) - if [ -n "$cloudinitdir" ]; then + WSL_run_cmd "$cmdexe" "echo %USERPROFILE%" + profiledir="${_RET%%[[:cntrl:]]}" + if [ -n "$profiledir" ]; then # wslpath is a program supplied by WSL itself that translates Windows and Linux paths, # respecting the mountpoints where the Windows drives are mounted. # (in fact it's a symlink to /init). - val=$(wslpath -au "$cloudinitdir") && _RET="$val" + WSL_path "-au" "$profiledir" return $? fi fi @@ -1611,9 +1730,12 @@ WSL_cloudinit_dir_in() { WSL_instance_name() { local val="" instance_name="" - instance_name=$(wslpath -am /) - val="${instance_name##*/}" - _RET="${val}" + WSL_path "-am" "/" + instance_name="${_RET}" + # Extracts "Ubuntu/" from "//wsl.localhost/Ubuntu/" + val="${instance_name#//*/}" + # Extracts "Ubuntu" from "Ubuntu/" + _RET="${val%/}" } dscheck_WSL() { @@ -1640,22 +1762,43 @@ dscheck_WSL() { fi # We know we are under WSL and have acess to the host filesystem, - # so let's find the .cloud-init directory - WSL_cloudinit_dir_in "$mountpoints" - cloudinitdir="${_RET}" - if [ -z "$cloudinitdir" ]; then - debug 1 "%USERPROFILE/.cloud-init/ directory not found" + # so let's find the user's home directory + WSL_profile_dir "$mountpoints" + profile_dir="${_RET}" + if [ -z "$profile_dir" ]; then + debug 1 "%USERPROFILE% directory not found" + return "${DS_NOT_FOUND}" + fi + + # Then we can check for any .cloud-init folders for the user + if [ ! -d "$profile_dir/.cloud-init/" ] && [ ! -d "$profile_dir/.ubuntupro/.cloud-init/" ]; then + debug 1 "No .cloud-init directories found" return "${DS_NOT_FOUND}" fi - # and the applicable userdata file. Notice the ordering in the for-loop - # must match our expected precedence, so the file we find is what the - # datasource must process. WSL_instance_name instance_name="${_RET}" # shellcheck source=/dev/null . "${PATH_ROOT}/etc/os-release" - for userdatafile in "${instance_name}.user-data" "${ID:-linux}-${VERSION_ID:-${VERSION_CODENAME}}".user-data "${ID:-linux}-all.user-data" "default.user-data"; do + + # and the applicable userdata file. Notice the ordering in the for-loop + # must match our expected precedence, so the file we find is what the + # datasource must process. + # We only care about ubuntupro configs if the distro is an Ubuntu distro. + # shellcheck disable=SC2153 + if [ "$NAME" = "Ubuntu" ]; then + cloudinitdir="$profile_dir/.ubuntupro/.cloud-init" + for userdatafile in "${instance_name}.user-data" "agent.yaml"; do + candidate="$cloudinitdir/$userdatafile" + if [ -f "$candidate" ]; then + debug 1 "Found applicable pro data file for this instance at: $candidate" + return ${DS_FOUND} + fi + done + fi + + cloudinitdir="$profile_dir/.cloud-init" + for userdatafile in "${instance_name}.user-data" "${ID:-linux}-${VERSION_ID:-${VERSION_CODENAME}}.user-data" "${ID:-linux}-all.user-data" "default.user-data"; do candidate="$cloudinitdir/$userdatafile" if [ -f "$candidate" ]; then debug 1 "Found applicable user data file for this instance at: $candidate" @@ -1761,9 +1904,11 @@ found() { } trim() { - # shellcheck disable=2048,2086 - set -- $* - _RET="$*" + # trim all whitespace from the string, assign output to _RET + local tmp="" cur="$*" + until tmp="${cur#[[:space:]]}"; [ "$tmp" = "$cur" ]; do cur="$tmp"; done + until tmp="${cur%[[:space:]]}"; [ "$tmp" = "$cur" ]; do cur="$tmp"; done + _RET="$tmp" } unquote() { @@ -1953,6 +2098,16 @@ set_run_path() { DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" } +# set ds-identify internal variables by providing an env file +# testing only - NOT use for production code, it is NOT supported +get_environment() { + if [ -f "$PATH_DI_ENV" ]; then + debug 0 "WARN: loading environment file [${PATH_DI_ENV}]"; + # shellcheck source=/dev/null + . "$PATH_DI_ENV" + fi +} + _main() { local dscheck_fn="" ret_dis=1 ret_en=0 @@ -2083,6 +2238,7 @@ _main() { main() { local ret="" + get_environment ensure_sane_path read_uname_info set_run_path @@ -2112,7 +2268,14 @@ noop() { : } +get_environment case "${DI_MAIN}" in + # builtin DI_MAIN implementations main|print_info|noop) "${DI_MAIN}" "$@";; - *) error "unexpected value for DI_MAIN"; exit 1;; + + # side-load an alternate implementation + # testing only - NOT use for production code, it is NOT supported + *) + debug 0 "WARN: side-loading alternate implementation: [${DI_MAIN}]"; + exec "${DI_MAIN}" "$@";; esac diff --git a/tools/format_yaml_doc b/tools/format_yaml_doc new file mode 100755 index 000000000..697be5b3a --- /dev/null +++ b/tools/format_yaml_doc @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +"""Rewrite YAML file using standard yaml.dump index of 2 and sorting keys. + +Useful when creating standard rtd/module-docs/*/data.yaml files. +""" +import argparse +import yaml + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument("yaml_file", help="YAML documentation file to reformat.") +parser.add_argument( + "--cloud-config", + action="store_true", + default=False, + help="Append #cloud-config header to rendered content" +) +args = parser.parse_args() +dump_kwargs = { + "indent": 2, + "sort_keys": True, +} +if args.cloud_config: + dump_kwargs["default_flow_style"] = None +formatted_content = yaml.dump( + yaml.safe_load(open(args.yaml_file).read()), **dump_kwargs +) +with open(args.yaml_file, "w") as stream: + if args.cloud_config: + stream.write("#cloud-config\n") + stream.write(formatted_content) + diff --git a/tools/hook-hotplug b/tools/hook-hotplug index 3085ba86d..e3cd2a1f1 100755 --- a/tools/hook-hotplug +++ b/tools/hook-hotplug @@ -2,21 +2,26 @@ # This file is part of cloud-init. See LICENSE file for license information. # This script checks if cloud-init has hotplug hooked and if -# cloud-init has finished; if so invoke cloud-init hotplug-hook +# cloud-init is ready; if so invoke cloud-init hotplug-hook -is_finished() { - [ -e /run/cloud-init/result.json ] +fifo=/run/cloud-init/hook-hotplug-cmd + +should_run() { + if [ -d /run/systemd ]; then + # check that the socket is ready + [ -p $fifo ] + else + # on non-systemd, check cloud-init fully finished. + [ -e /run/cloud-init/result.json ] + fi } -if is_finished; then - # open cloud-init's hotplug-hook fifo rw - exec 3<>/run/cloud-init/hook-hotplug-cmd - env_params=" \ - --subsystem=${SUBSYSTEM} \ - handle \ - --devpath=${DEVPATH} \ - --udevaction=${ACTION} \ - " - # write params to cloud-init's hotplug-hook fifo - echo "${env_params}" >&3 +if ! should_run; then + exit 0 fi + +# open cloud-init's hotplug-hook fifo rw +exec 3<>$fifo +env_params=" --subsystem=${SUBSYSTEM} handle --devpath=${DEVPATH} --udevaction=${ACTION}" +# write params to cloud-init's hotplug-hook fifo +echo "${env_params}" >&3 diff --git a/tools/netplan_schema_check.py b/tools/netplan_schema_check.py new file mode 100644 index 000000000..ae188fb44 --- /dev/null +++ b/tools/netplan_schema_check.py @@ -0,0 +1,91 @@ +import argparse +import os +import yaml + +from jsonschema import Draft4Validator + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description=""" +This script validates netplan example files against the cloud-init networkv2 +schema. The goal is to provide an easy way to track drift between networkv2 +and netplan. + +Netplan does not provide a centralized schema file to compare against directly, +but does maintain a reasonably comprehensive examples directory. This script +relies on that directory to glean the netplan schema. + """, + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + "--schema-file", + required=True, + help=""" +The cloud-init networkv2 schema file found in +cloudinit/config/schemas/schema-network-config-v2.json +in this repository. + """, + ) + parser.add_argument( + "--netplan-examples", + required=True, + help=""" +The examples/ directory in the netplan repo. The most +recent netplan repo can be cloned from +https://github.com/canonical/netplan. + """, + ) + return parser.parse_args() + + +def validate_netplan_against_networkv2(schema_file, netplan_examples): + """ + This script validates netplan example files against the cloud-init + networkv2 schema. The goal is to provide an easy way to track drift + between networkv2 and netplan. + + Netplan does not provide a centralized schema file to compare against + directly, but does maintain a reasonably comprehensive examples directory. + This script relies on that directory to glean the netplan schema. + + There are two arguments required for this script. The first is schema_file + which should point to the cloud-init networkv2 schema file found in + cloudinit/config/schemas/schema-network-config-v2.json in this repository. + The second is netplan_examples which should point to the examples/ + directory in the netplan repo. The most recent netplan repo can be cloned + from https://github.com/canonical/netplan. + """ + networkv2_schema = None + with open(schema_file) as f: + networkv2_schema = yaml.safe_load(f) + validator = Draft4Validator(networkv2_schema) + + error_obj = {} + for walk_tuple in os.walk(netplan_examples): + filenames = walk_tuple[2] + for fname in filenames: + if fname.endswith(".yaml"): + with open(netplan_examples + fname) as netplan_f: + netplan_example = yaml.safe_load(netplan_f) + errors = validator.iter_errors(netplan_example) + for e in errors: + schema_path_str = "-".join(map(str, e.schema_path)) + if schema_path_str not in error_obj: + error_obj[schema_path_str] = {} + if e.message not in error_obj[schema_path_str]: + error_obj[schema_path_str][e.message] = set() + error_obj[schema_path_str][e.message].add(fname) + + # clean up error_obj for human readability + for schema_path in error_obj: + for message in error_obj[schema_path]: + error_obj[schema_path][message] = list( + error_obj[schema_path][message] + ) + print(yaml.dump(error_obj)) + + +if __name__ == "__main__": + args = parse_args() + validate_netplan_against_networkv2(args.schema_file, args.netplan_examples) diff --git a/tools/read-dependencies b/tools/read-dependencies index a3b2d7388..6e7bf4616 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -203,7 +203,7 @@ def parse_pip_requirements(requirements_path): with open(requirements_path, "r") as fp: for line in fp: line = line.strip() - if not line or line.startswith("#"): + if not line or line.startswith(("#", "-r ")): continue # remove pip-style markers @@ -212,9 +212,7 @@ def parse_pip_requirements(requirements_path): # remove version requirements version_comparison = re.compile(r"[~!>=.<]+") if version_comparison.search(dep): - dep_names.append( - version_comparison.split(dep)[0].strip() - ) + dep_names.append(version_comparison.split(dep)[0].strip()) else: dep_names.append(dep) return dep_names diff --git a/tools/render-template b/tools/render-template index 5ef5a374b..c3af642a0 100755 --- a/tools/render-template +++ b/tools/render-template @@ -15,6 +15,7 @@ def main(): "alpine", "amazon", "arch", + "azurelinux", "benchmark", "centos", "cloudlinux", diff --git a/tools/run-container b/tools/run-container index f27c02137..01b0fca87 100755 --- a/tools/run-container +++ b/tools/run-container @@ -9,6 +9,7 @@ VERBOSITY=0 KEEP=false CONTAINER="" WAIT_MAX=30 +LXC=${LXC:-lxc} error() { echo "$@" 1>&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } @@ -40,6 +41,7 @@ Usage: ${0##*/} [ options ] [images:]image-ref -u | --unittest run unit tests --vm use a VM instead of a container --wait-max max time to wait or a container or VM to be ready + --commitish commit to package and run, default: HEAD Example: * ${0##*/} --package --source-package --unittest centos/6 @@ -90,7 +92,7 @@ inside_as_cd() { inside() { local name="$1" shift - lxc exec "$name" -- "$@" + $LXC exec "$name" -- "$@" } inject_cloud_init(){ @@ -111,8 +113,8 @@ inject_cloud_init(){ esac # attempt to get branch name. - commitish=$(git rev-parse --abbrev-ref HEAD) || { - errorrc "Failed git rev-parse --abbrev-ref HEAD" + commitish=$(git rev-parse --abbrev-ref "$COMMITISH") || { + errorrc "Failed git rev-parse --abbrev-ref $COMMITISH" return } if [ "$commitish" = "HEAD" ]; then @@ -373,7 +375,7 @@ start_instance() { debug 1 "starting container $name from '$src'" launch_flags=() [ "$use_vm" == true ] && launch_flags+=(--vm) - lxc launch "$src" "$name" "${launch_flags[@]}" || { + $LXC launch "$src" "$name" "${launch_flags[@]}" || { errorrc "Failed to start container '$name' from '$src'"; return } @@ -383,7 +385,7 @@ start_instance() { delete_instance() { debug 1 "removing container $1 [--keep to keep]" - lxc delete --force "$1" + $LXC delete --force "$1" } run_self_inside() { @@ -401,7 +403,7 @@ run_self_inside_as_cd() { main() { local short_opts="a:hknpsuv" - local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose,vm,wait-max:" + local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose,vm,wait-max:,commitish:" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && @@ -427,10 +429,12 @@ main() { -v|--verbose) VERBOSITY=$((VERBOSITY+1));; --vm) use_vm=true;; --wait-max) WAIT_MAX="$next"; shift;; + --commitish) COMMITISH="$next"; shift;; --) shift; break;; esac shift; done + COMMITISH=${COMMITISH:-HEAD} [ $# -eq 1 ] || { bad_Usage "Expected 1 arg, got $# ($*)"; return; } local img_ref_in="$1" @@ -549,7 +553,7 @@ main() { } for art in $(inside "$name" sh -c "echo $cdir/*${pkg_ext}"); do - lxc file pull "$name/$art" "$artifact_d" || { + $LXC file pull "$name/$art" "$artifact_d" || { errorrc "Failed to pull '$name/$art' to ${artifact_d}" errors[${#errors[@]}]="artifact copy: $art" } diff --git a/tox.ini b/tox.ini index 34b87d018..a43ef53f3 100644 --- a/tox.ini +++ b/tox.ini @@ -6,12 +6,12 @@ envlist = isort, mypy, pylint -recreate = True [doc8] ignore-path-errors=doc/rtd/topics/faq.rst;D001 [testenv] +package = skip basepython = python3 setenv = LC_ALL = en_US.utf-8 @@ -25,10 +25,11 @@ hypothesis==6.31.6 hypothesis_jsonschema==0.20.1 isort==5.10.1 mypy==0.950 -pylint==2.13.9 +pylint==3.2.0 pytest==7.0.1 -ruff==0.0.285 +ruff==0.4.3 types-jsonschema==4.4.2 +types-Jinja2==2.11.9 types-oauthlib==3.1.6 types-passlib==1.7.7.12 types-PyYAML==6.0.4 @@ -40,43 +41,48 @@ typing-extensions==4.1.1 [files] schema = cloudinit/config/schemas/schema-cloud-config-v1.json version = cloudinit/config/schemas/versions.schema.cloud-config.json +network_v1 = cloudinit/config/schemas/schema-network-config-v1.json +network_v2 = cloudinit/config/schemas/schema-network-config-v2.json [testenv:ruff] deps = ruff=={[format_deps]ruff} -commands = {envpython} -m ruff {posargs:cloudinit/ tests/ tools/ packages/bddeb packages/brpm conftest.py setup.py} +commands = {envpython} -m ruff check {posargs:.} [testenv:pylint] deps = pylint=={[format_deps]pylint} -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt -commands = {envpython} -m pylint {posargs:cloudinit/ tests/ tools/ conftest.py setup.py} +commands = {envpython} -m pylint {posargs:.} [testenv:black] deps = black=={[format_deps]black} -commands = {envpython} -m black . --check +commands = {envpython} -m black --check {posargs:.} [testenv:isort] deps = isort=={[format_deps]isort} -commands = {envpython} -m isort . --check-only +commands = {envpython} -m isort --check-only --diff {posargs:.} [testenv:mypy] deps = + -r{toxinidir}/test-requirements.txt + -r{toxinidir}/integration-requirements.txt + -r{toxinidir}/doc-requirements.txt hypothesis=={[format_deps]hypothesis} hypothesis_jsonschema=={[format_deps]hypothesis_jsonschema} mypy=={[format_deps]mypy} - pytest=={[format_deps]pytest} types-jsonschema=={[format_deps]types-jsonschema} + types-Jinja2=={[format_deps]types-Jinja2} types-passlib=={[format_deps]types-passlib} types-pyyaml=={[format_deps]types-PyYAML} types-oauthlib=={[format_deps]types-oauthlib} types-requests=={[format_deps]types-requests} types-setuptools=={[format_deps]types-setuptools} typing-extensions=={[format_deps]typing-extensions} -commands = {envpython} -m mypy cloudinit/ tests/ tools/ +commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:check_format] deps = @@ -87,16 +93,18 @@ deps = isort=={[format_deps]isort} mypy=={[format_deps]mypy} pylint=={[format_deps]pylint} - pytest=={[format_deps]pytest} types-jsonschema=={[format_deps]types-jsonschema} + types-Jinja2=={[format_deps]types-Jinja2} types-oauthlib=={[format_deps]types-oauthlib} types-passlib=={[format_deps]types-passlib} types-pyyaml=={[format_deps]types-PyYAML} + types-oauthlib=={[format_deps]types-oauthlib} types-requests=={[format_deps]types-requests} types-setuptools=={[format_deps]types-setuptools} typing-extensions=={[format_deps]typing-extensions} -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt + -r{toxinidir}/doc-requirements.txt commands = {[testenv:black]commands} {[testenv:ruff]commands} @@ -113,15 +121,17 @@ deps = isort mypy pylint - pytest types-jsonschema + types-Jinja2 types-oauthlib + types-passlib types-pyyaml + types-oauthlib types-requests types-setuptools - typing-extensions -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt + -r{toxinidir}/doc-requirements.txt commands = {[testenv:check_format]commands} @@ -134,6 +144,8 @@ commands = {envpython} -m black . {envpython} -m json.tool --indent 2 {[files]schema} {[files]schema} {envpython} -m json.tool --indent 2 {[files]version} {[files]version} + {envpython} -m json.tool --indent 2 {[files]network_v1} {[files]network_v1} + {envpython} -m json.tool --indent 2 {[files]network_v2} {[files]network_v2} [testenv:do_format_tip] deps = @@ -149,7 +161,8 @@ commands = {envpython} -m pytest \ -vvvv --showlocals \ --durations 10 \ -m "not hypothesis_slow" \ - {posargs:--cov=cloudinit --cov-branch tests/unittests} + --cov=cloudinit --cov-branch \ + {posargs:tests/unittests} # experimental [testenv:py3-fast] @@ -166,15 +179,16 @@ deps = -r{toxinidir}/test-requirements.txt commands = {envpython} -m pytest \ -m hypothesis_slow \ - {posargs:--hypothesis-show-statistics tests/unittests} + --hypothesis-show-statistics \ + {posargs:tests/unittests} #commands = {envpython} -X tracemalloc=40 -Werror::ResourceWarning:cloudinit -m pytest \ [testenv:py3-leak] deps = {[testenv:py3]deps} commands = {envpython} -X tracemalloc=40 -Wall -m pytest \ --durations 10 \ - {posargs:--cov=cloudinit --cov-branch \ - tests/unittests} + --cov=cloudinit --cov-branch \ + {posargs:tests/unittests} [testenv:lowest-supported] @@ -194,7 +208,6 @@ deps = requests==2.18.4 jsonpatch==1.16 jsonschema==2.6.0 - netifaces==0.10.4 # test-requirements pytest==3.3.2 pytest-cov==2.5.1 @@ -213,6 +226,9 @@ commands = {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html} doc8 doc/rtd +[doc-lint] +ignore-path-errors=doc/rtd/topics/faq.rst;D001 + [testenv:doc-spelling] deps = -r{toxinidir}/doc-requirements.txt @@ -238,16 +254,24 @@ commands = {[testenv:ruff]commands} [testenv:tip-mypy] deps = + -r{toxinidir}/test-requirements.txt + -r{toxinidir}/integration-requirements.txt + -r{toxinidir}/doc-requirements.txt hypothesis hypothesis_jsonschema mypy pytest + types-Jinja2 types-jsonschema types-oauthlib types-PyYAML + types-passlib + types-pyyaml + types-oauthlib types-requests types-setuptools -commands = {[testenv:mypy]commands} + typing-extensions +commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:tip-pylint] deps = @@ -256,7 +280,8 @@ deps = # test-requirements -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt -commands = {[testenv:pylint]commands} +commands = {envpython} -m pylint {posargs:.} + [testenv:tip-black] deps = black