diff --git a/.github/workflows/lint-docs.yaml b/.github/workflows/lint-docs.yaml deleted file mode 100644 index 53a6339..0000000 --- a/.github/workflows/lint-docs.yaml +++ /dev/null @@ -1,35 +0,0 @@ -name: build-docs -on: - workflow_dispatch: - - pull_request: - branches: - - main - -jobs: - build-docs: - permissions: - contents: read - uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-pr.yml@main - with: - init-lenient: false - init-fail-on-error: true - - lint-docs: - name: Lint Docs - runs-on: ubuntu-latest - permissions: - contents: read - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - name: Install antsibull-docs - run: pip install antsibull-docs - - name: Lint Docs - run: | - antsibull-docs lint-collection-docs \ - --plugin-docs \ - --validate-collection-refs=all \ - --disallow-unknown-collection-refs . diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml deleted file mode 100644 index 0c2671b..0000000 --- a/.github/workflows/lint.yaml +++ /dev/null @@ -1,25 +0,0 @@ -name: ansible-lint -on: - pull_request: - branches: - - main - - release/v* - - workflow_dispatch: - -jobs: - lint: - name: Ansible Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-python@v5 - with: - cache: pip - - - name: Install python dependencies - run: pip install -r requirements.txt - - - name: Run ansible-lint - run: ansible-lint diff --git a/.github/workflows/publish-docs.yaml b/.github/workflows/publish-docs.yaml deleted file mode 100644 index a3bfcdf..0000000 --- a/.github/workflows/publish-docs.yaml +++ /dev/null @@ -1,35 +0,0 @@ -name: publish-docs -on: - push: - tags: - - v* - - pull_request: - branches: - - main - - release/v* - -jobs: - build-docs: - permissions: - contents: read - name: Build Docs - uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-push.yml@main - with: - init-lenient: false - init-fail-on-error: true - - publish-docs: - if: github.repository == 'crichez/crichez.secureboot' - name: Publish Docs - permissions: - contents: write - pages: write - id-token: write - needs: - - build-docs - uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main - with: - artifact-name: ${{ needs.build-docs.outputs.artifact-name }} - secrets: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml deleted file mode 100644 index 4ded3d0..0000000 --- a/.github/workflows/publish.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: publish -on: - release: - types: - - released - -jobs: - publish: - name: Publish to Galaxy - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - ref: ${{ github.ref }} - - - name: Build collection - run: ansible-galaxy collection build - - - name: Get expected version string - shell: echo "VERSION={$GITHUB_REF_NAME:1}" >> "$GITHUB_ENV" - - - name: Publish to Galaxy - run: | - ansible-galaxy collection publish \ - tofugarden-secureboot-${{ env.VERSION }}.tar.gz \ - --token ${{ secrets.GALAXY_API_KEY }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml deleted file mode 100644 index 7090377..0000000 --- a/.github/workflows/release.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: Release -on: - push: - tags: - - v* - -jobs: - release: - name: Release - runs-on: ubuntu-latest - permissions: - contents: write - steps: - - uses: actions/checkout@v4 - - - name: Install antsibull-changelog - run: pip install antsibull-changelog - - - name: Generate changelog - run: antsibull-changelog release - - - name: Build collection tarball - run: ansible-galaxy collection build - - - name: Get version string - run: echo "VERSION=${GITHUB_REF_NAME:1}" >> "$GITHUB_ENV" - - - uses: ncipollo/release-action@v1 - with: - artifacts: crichez-secureboot-${{ env.VERSION }}.tar.gz - bodyfile: CHANGELOG.md diff --git a/.gitignore b/.gitignore index 11f9449..e12e17d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ .vscode changelogs/.plugin-cache.yaml +.DS_Store +.build +.cache diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..8827fa1 --- /dev/null +++ b/Makefile @@ -0,0 +1,41 @@ +# + +.SUFFIXES: +.SUFFIXES: .yml .yaml .tar.gz .xml .xml.j2 yml.j2 yaml.j2 .j2 .sh + +# Version detection (used to build tarballs) +version_regex = 's/^version:\s((:?[0-9]*\.?){3})$$/\1/p' +version := $(shell sed -nE $(version_regex) galaxy.yml) +tarball := crichez-secureboot-$(version).tar.gz + +# Directories used for testing +workspace_dir = . +real_workspace_dir = $(abspath $(workspace_dir)) + +test: + ansible-playbook tests/integration/targets/role_uki/files/setup.yml \ + -e workspace_dir=$(real_workspace_dir) \ + -c local + + ansible-playbook $(workspace_dir)/.build/dependencies.yml \ + -i .build/inventory.yml \ + --key-file .build/id_ed25519 \ + -e workspace_dir=$(real_workspace_dir) + + ansible-playbook $(workspace_dir)/.build/test.yml \ + -i .build/inventory.yml \ + --key-file .build/id_ed25519 \ + -e workspace_dir=$(real_workspace_dir) + +$(tarball): + tar -c -f $(tarball) ./* --exclude ./.* + +all: $(tarball) + +teardown: + ansible-playbook tests/integration/targets/role_uki/files/teardown.yml \ + -e workspace_dir=$(real_workspace_dir) \ + -c local + +clean: teardown + rm -rf .cache diff --git a/README.md b/README.md index 780e848..657c50f 100644 --- a/README.md +++ b/README.md @@ -1,187 +1,195 @@ # crichez.secureboot -This repository contains an Ansible roles to configure secure boot. - -## Overview - -Currently, only one role is provided by this collection: `uki_config`. It does the following -high-level things: - -1. Enroll a valid machine owner key (MOK) for image signing -2. Configure `kernel-install` to generate a unified kernel image (UKI) instead of a separate - kernel, command line and initrd -3. Configure a tool to automatically sign generated UKIs using the enrolled MOK -4. Configure the host to boot straight from `shim` to the UKI, skipping GRUB entirely - - -## Requirements - -If you're using Fedora 40, you can satisfy this entire section with: - -```sh -dnf install openssl systemd-boot sbsigntools binutils systemd-ukify virt-firmware uki-direct expect +This repository contains Ansible roles to configure secure boot. Only one role +is currently provided: `uki`. + +## `crichez.secureboot.uki` + +The `uki` role configures the target host to boot from a unified kernel image +signed by a local machine owner key. + +### Host requirements + +1. The distribution uses systemd (no Alpine or BSDs, sorry :/) +2. Secure boot is enabled +3. The machine owner key is stored in a NSS database (see + [efikeygen](https://manpages.debian.org/testing/pesign/efikeygen.1.en.html)) +4. The machine owner key is enrolled in MokListRT (see + [mokutil](https://manpages.debian.org/testing/mokutil/mokutil.1.en.html)) +5. The following packages are installed: + * pesign + * mokutil + * systemd-boot + * systemd-ukify + * python3-cryptography + * python3-virt-firmware + * python3-importlib-resources (seems needed only on Debianish somehow) + +> [!CAUTION] +> Although systemd-boot must be installed, the actual bootloader +> must not be configured. On Debian-like platforms, you will likely need to +> reconfigure your bootloader after installing systemd-boot. For GRUB, use +> `sudo grub-update`. For other bootloaders, consult their documentation. +> Failing to do so could make the host unable to reboot after an error. On +> RedHat platforms, the systemd-boot package does not seem to break things. + +### Controller requirements + +1. On Debian-like platforms, `git` must be available +2. The following collections are available: + * `ansible.builtin` + * `community.general` + * `community.crypto` + +### Usage + +By default, the role assumes the machine owner key is stored in the NSS +database at `/etc/pki/pesign`, and its nickname/friendly name is `mok`. +If this is the case for you, you can include the role without any extra vars: +```yaml +- name: Reboot from a signed unified kernel image + ansible.builtin.include_role: + name: uki ``` -or: - +The role's arguments support a custom NSS database path, and MOK nickname: ```yaml -- name: Install dependencies - ansible.builtin.dnf: - name: - - openssl - - systemd-boot - - sbsigntools - - systemd-ukify - - virt-firmware - - uki-direct - - expect +- name: Reboot from a signed unified kernel image + ansible.builtin.include_role: + name: uki + vars: + uki_mok: + database_path: /etc/my/database/path + friendly_name: 'Bob Dorough Secure Boot Signing Key' ``` -For every other platform, the requirements for the `uki_config` role are described in the -following three sections: -1. System requirements -2. Package dependencies -3. Argument-specific dependencies - -### System requirements - -- A UEFI firmware platform with [secure boot](https://en.wikipedia.org/wiki/UEFI#Secure_Boot) - support. This is not technically a requirement for UKIs, but is considered so by this role. - -### Package dependencies - -**shim** - -This role assumes `shim` is used to authenticate binaries. This should alredy come packaged in any -modern Linux distribution. Support for skipping shim is not provided, but please submit an issue -or PR if you need this. - -**kernel-install** - -Although a single UKI build can be done without it, this role assumes the use of systemd -`kernel-install` *version 255 or greater*. This is typically provided by the "systemd-udev" -package on modern distributions, but may be too old. - -**virt-firmware** - -The virt-firmware python package is required to configure shim to boot straight to your UKI. On -Fedora 40, the tools needed are split into the `virt-firmware` main package and `uki-direct` -subpackage. - -On other platforms, you may be able to use your python package manager of choice, or -[get it from PyPI](https://pypi.org/project/virt-firmware/). If you do this without a system -package, you will need to make sure: - -- `kernel-bootcfg` is in your path -- `99-uki-uefi-setup.install` is executable in `/etc/kernel/install.d` -- `kernel-bootcfg-boot-successful.service` is loaded - -**openssl** - -Openssl is required by the `community.crypto` modules used by the role. Even if you choose to -import your own MOK, the role still checks it for validity. - -**systemd-stub** +**Change reporting:** -The systemd kernel boot stub is required to make the unified kernel image bootable. On Fedora, -this is shipped with the "systemd-boot" package. +The role only reports changes that are relevant to its *intent.* Temporary +directory creation for example does not report a change, but changing /etc +files will report a change *even if* the UKI was not rebuilt. -**sbsign** +**Error recovery:** -The `sbsign` utility is required to sign UKIs. This is usually provided by the "sbsigntools" -package. +If an error occurs during the configuration process, changes are reverted. +If an error occurs during recovery, please file a bug report issue as I really +strive to avoid bricking your system 🙂. -**ukify** - -`ukify` is the only supported UKI generator, you should have it installed. This is usually -provided by the "systemd-ukify" package. - -### Argument-dependent dependecies - -**dracut or other initrd generator** - -By default, `dracut` is the tool declared in `uki_config_initrd_generator`, so should be -installed if you don't modify this argument. Others may be used as long as they are supported -by your system *and* `kernel-install`. Note that some generators like `dracut` and `mkinitcpio` -have support for UKI generation on their own; this is not supported and will cause failures. -The role expects a regular inird to be generated, and builds a UKI with it using `ukify`. - -**mokutil** - -The `mokutil` tool is used to validate the enrollment status of your generated or provided MOK. - -**expect** - -This role uses the `expect` executable to interact with `mokutil` *only if you need to enroll a -new MOK*. If you run the role with a MOK already enrolled through MokManager, `expect` will -never be called. - -### Interaction - -This playbook may require manual administrator interaction. If you choose to generate a new MOK -(the default) or import a MOK that is not already enrolled, you will be prompted twice: -1. Once to define a MOK enrollment password. This should be something easy to type but still - secure, as explained in prompt 2. -2. Then to reboot into MokManager. The playbook will reboot for you and resume once complete, - but only the administrator with physical access to a console/display can complete a new - MOK enrollment. +To recover manually on RedHat: +```sh +# Remove kernel-install customizations +sudo rm -f /etc/kernel/install.conf -This means this role may not be suitable for any Fedora 40 environment. Most cloud providers -for example will never provide a pre-boot console or virtual display, and therefore cannot -support custom MOK enrollment. Most hypervisors will support this however (this author uses -Hyper-V). The reboot prompt is meant to pause and allow the caller time to bring up the proper -console or display. +# Reinstall kernel +sudo kernel-install add +``` -## Arguments +To recover manually on Debian: +```sh +# Remove postinst.d and postrm.d scripts +sudo rm -f /etc/kernel/postinst.d/zz-kernel-install +sudo rm -f /etc/kernel/postinst.d/zz-kernel-uninstall -All arguments have default values, reflected in the following example: +# Reinstall kernel +sudo apt reinstall linux-image-$(uname -r) +``` +### Limitations + +On Debian-like platforms, some components of the `virt-firmware` package will +not be updated on `apt upgrade`: +* The script at `/etc/kernel/install.d/99-uki-uefi-setup.install` +* The unit at `/etc/systemd/system/kernel-bootcfg-boot-successful.service` + +If you use unattended-upgrades, please mind your filters to avoid upgrading +virt-firmware inadvertently and breaking your next kernel upgrade. +These files can be updated to the version matching your package from +[source](https://gitlab.com/kraxel/virt-firmware), or by running the role +again after `apt upgrade python3-virt-firmware`. + +## Testing + +The test environment is really bulky because we need to emulate UEFI firmware, +which is only available in virtual machines as far as I know. I am very open to +suggestions in this regard. Be ready to spend ~10 minutes on your first test +run if you don't have access to KVM. + +### Dependencies + +**The following system tools are required:** +1. libvirt +2. qemu-system-x86_64 +3. libvirt-dev (Debian) / libvirt-devel (RedHat) +4. libvirt-python (can be venv-only, but mind version mismatches with 3.) +5. xorriso +6. qemu-img +7. openssl +8. GNU make (technically optional, but strongly recommended) + +**The following python libraries are required** (see requirements.txt): +1. ansible +2. virt-firmware +3. lxml +4. cryptography + +> [!IMPORTANT] +> *If testing on a Darwin aarch64 host*, networking depends on +> [socket_vmnet](https://github.com/lima-vm/socket_vmnet). The recommended +> installation method is via homebrew, and is the only time you need to be +> root: +> ```sh +> brew install socket_vmnet +> sudo brew services start socket_vmnet +> ``` + +### Configuration + +The file at `tests/integration/targets/role_uki/vars/platforms.yml` defines the +`platforms` variable. It is a list of dictionaries that describe each platform +the role will be tested against. ```yaml -- name: Test playbook - hosts: test - roles: - - role: uki_config - vars: - uki_config_initrd_generator: dracut - uki_config_cmdline: /etc/kernel/cmdline - uki_config_kernel_intall_config_root: /etc/kernel - uki_config_mok: - private_key: /etc/kernel/MOK.priv - certificate: /etc/kernel/MOK.cer - owner: root - group: root - mode: "0600" - selevel: s0 - seuser: system_u - serole: object_r - setype: cert_t +platforms: + # The name of the platform. In this example the system's image is + # downloaded to '.cache/Fedora_41.qcow2'. Platform-specific files are kept + # in '.build/Fedora_41/'. The libvirt machine's name is 'SB_Fedora_41'. The + # name of the host in the generated inventory file is 'Fedora_41'. +- name: Fedora_41 + # The URL to a qcow2 cloud image for this platform. The image is only + # downloaded if a file at '.cache/Fedora_41.qcow2' doesn't exist. For custom + # images (i.e. RHEL_10) or other trickery, just write the file yourself and + # this key can be omitted. + url: 'https://download.fedoraproject.org/path/to/qcow2/cloud/image' + # A random MAC address. This is only used internally for vm ip discovery. + mac_address: "{{ '54:52:00' | community.general.random_mac }}" + # Packages that will be installed by cloud-init before the vm becomes + # available to the host/controller. This is very slow, so I recommend using + # regular ansible modules wherever you can. Sadly in the case of Fedora_41, + # this is required. + init_packages: + - python3-libdnf5 ``` -### `uki_config_initrd_generator` - -Your initrd generator of choice, or `dracut` by default. - -### `uki_config_cmdline` - -You may substitute this path to any path readable by root. Passing the content of the kernel -command line is not supported. Please submit an issue/PR if you want support for this. - -> Note: The kernel command line is ignored when dracut is the UKI generator. Please configure - dracut yourself if you want a different command line. - -### `uki_config_kernel_install_config_root` - -These arguments allow you to specify where you custom configuration should be applied. You may for -example wish to keep it under `/usr/lib/kernel`. - -### `uki_config_mok` - -By default, a MOK is created at the path specified under `private_key` and `certificate` if -an adequate key/certificate pair does not already exist at that path. If you wish to bring -your own MOK instead of generating a new one for each host, either write your files to the -default paths, or provide custom paths. - -> Note: The author considered adding support for `pesign` as a signing engine (this would - only be available with the "ukify" UKI generator), but this was refused for time's - sake and for the ability to import private keys. If you want support for this, - submit an issue/PR and we'll talk about it. +### Running the test suite + +There are two playbooks that must run in order: +1. `tests/integration/targets/role_uki/setup.yml` defines, starts, and + configures the test machines using virt-firmware, xorriso, and libvirt. + This playbook will be run on the **host** and only needs a `srcdir` variable + that corresponds to the absolute path of the project root directory. +2. `.build/test.yml` installs dependencies and runs the role several times + on each test machine, each time with different success and failure + conditions. This file is copied into the .build directory from + `tests/integration/targets/role_uki/test.yml` for role discovery. This + playbook also needs a `scrdir` variable, the SSH key file at + `.build/id_ed25519`, and the inventory file at `.build/inventory.yml`. + +For your convenience, `make test` does these two things exactly. The setup +playbook takes time on first run, then finishes quickly on subsequent runs. +If you'd like to run these playbooks directly, consult the Makefile. + +### Resetting between tests + +The playbook at `tests/integration/targets/role_uki/teardown.yml` destroys and +undefines test machines, forgets their host keys, and removes built artifacts. +It does not remove downloaded platform images in `.cache`, so you can reset the +test environment without downloading 3GB worth of images each time. diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..0e4394d --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +remote_tmp=/tmp/ansible-$USER diff --git a/requirements.txt b/requirements.txt index ea8a68f..83ff1ed 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,6 @@ ansible ansible-lint +virt-firmware +lxml +cryptography +netaddr diff --git a/requirements.yml b/requirements.yml index 2822085..554b93f 100644 --- a/requirements.yml +++ b/requirements.yml @@ -1,3 +1,4 @@ +--- collections: - community.general - community.crypto diff --git a/roles/uki/files/zz-kernel-install b/roles/uki/files/zz-kernel-install new file mode 100644 index 0000000..c3ae6d8 --- /dev/null +++ b/roles/uki/files/zz-kernel-install @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +INSTALL_CONF='/etc/kernel/install.conf' + +if [ -e "INSTALL_CONF" ]; then + LAYOUT=$( + sed -nE 's/^layout\s?=\s?([a-z]*)$/\1/' '/etc/kernel/install.conf' + ) +fi + +if [ "$LAYOUT" = 'uki' ]; then + kernel-install --verbose add "$1" "$2" +fi diff --git a/roles/uki/files/zz-kernel-uninstall b/roles/uki/files/zz-kernel-uninstall new file mode 100644 index 0000000..9e831b9 --- /dev/null +++ b/roles/uki/files/zz-kernel-uninstall @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +INSTALL_CONF='/etc/kernel/install.conf' + +if [ -e "$INSTALL_CONF" ]; then + LAYOUT=$( + sed -nE 's/^layout\s?=\s?([a-z]*)$/\1/p' '/etc/kernel/install.conf' + ) +fi + +if [ "$LAYOUT" = 'uki' ]; then + kernel-install --verbose remove "$1" +fi diff --git a/roles/uki/meta/argument_specs.yml b/roles/uki/meta/argument_specs.yml new file mode 100644 index 0000000..d57f172 --- /dev/null +++ b/roles/uki/meta/argument_specs.yml @@ -0,0 +1,28 @@ +--- +argument_specs: + main: + short_description: Boot from signed UKIs + description: | + The `uki` role configures kernel-install to build unified kernel images + using systemd's `ukify`, and signs them using the provided key and + certificate. The certificate in question must already be enrolled in + shim. + version_added: "1.0.0" + author: Christopher Palmer-Richez @crichez + options: + uki_mok: + type: dict + description: > + A dictionary that describes the location of the machine owner key. + default: + database_path: /etc/pki/pesign + friendly_name: mok + options: + database_path: + type: path + description: The path to the NSS database used for signing. + default: /etc/pki/pesign + friendly_name: + type: str + description: The friendly name of the certificate used for singing. + default: mok diff --git a/roles/uki/tasks/main.yml b/roles/uki/tasks/main.yml new file mode 100644 index 0000000..a0e2607 --- /dev/null +++ b/roles/uki/tasks/main.yml @@ -0,0 +1,534 @@ +--- +- name: Get a temporary directory to export certificates to + become: true + ansible.builtin.tempfile: + state: directory + register: mok_dir + changed_when: false + +- name: Check for an existing machine owner key matching friendly_name + become: true + ansible.builtin.command: + cmd: > + certutil + -d '{{ uki_mok.database_path }}' + -n '{{ uki_mok.friendly_name }}' + -L + changed_when: false + register: mok_nick_check + failed_when: '"Could not find" in mok_nick_check.stderr' + + # We can't set pipefail in dash, but this shouldn't fail since we already + # checked for an existing certificate with the previous task. +- name: Extract the machine owner key's certificate # noqa: risky-shell-pipe + become: true + ansible.builtin.shell: + cmd: > + certutil + -d '{{ uki_mok.database_path }}' + -n '{{ uki_mok.friendly_name }}' + -Lr > '{{ mok_dir.path }}/MOK.der' + executable: /bin/sh + register: mok_export + changed_when: false + +- name: Check the machine owner certificate's enrollment status + become: true + ansible.builtin.command: + argv: + - mokutil + - -t + - "{{ mok_dir.path }}/MOK.der" + register: mok_test + changed_when: false + failed_when: + - mok_test.rc != 1 + - '"already enrolled" not in mok_test.stdout' + +- name: Check the installed version of virt-firmware with apt + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.command: + cmd: apt-cache show python3-virt-firmware + changed_when: false + register: virt_fw_info_apt + +- name: Get a temporary directory to clone virt-firmware + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.tempfile: + suffix: crichez.secureboot.uki + state: directory + delegate_to: localhost + register: clone_dir + changed_when: false + +- name: Clone virt-firmware + vars: + apt_v_regex: 'Version:\s([0-9]*\.?(?:[0-9]*\.?){0,2})(?:-[0-9]*)?' + git_repo_url: 'https://gitlab.com/kraxel/virt-firmware' + when: ansible_facts.distribution_file_variety == 'Debian' + block: + - name: Clone virt-firmware on Debian + vars: + version_matches: + "{{ virt_fw_info_apt.stdout | regex_search(apt_v_regex, '\\1') }}" + ansible.builtin.git: + repo: "{{ git_repo_url }}" + dest: "{{ clone_dir.path }}" + version: v{{ 0 | extract(version_matches) }} + delegate_to: localhost + changed_when: false + +- name: Set configuration and build UKI + vars: + install_conf_path: /etc/kernel/install.conf + uki_conf_path: /etc/kernel/uki.conf + install_script_path: /etc/kernel/install.d/99-uki-uefi-setup.install + boot_svc_name: kernel-bootcfg-boot-successful.service + boot_svc_path: /etc/systemd/system/{{ boot_svc_name }} + uki_name: "{{ ansible_facts.machine_id }}-{{ ansible_facts.kernel }}.efi" + uki_path: /boot/efi/EFI/Linux/{{ uki_name }} + uki_sig_der_path: "{{ mok_dir.path }}/uki_sig.der" + uki_sig_pem_path: "{{ mok_dir.path }}/uki_sig.pem" + become: true + block: + - name: Check for an existing install.conf file + ansible.builtin.stat: + path: "{{ install_conf_path }}" + get_attributes: false + get_checksum: false + get_mime: false + register: install_conf_search + + - name: Back up install.conf + when: install_conf_search.stat.exists + ansible.builtin.slurp: + src: "{{ install_conf_path }}" + register: install_conf_backup + changed_when: false + + - name: Set kernel-install layout to uki + community.general.ini_file: + path: "{{ install_conf_path }}" + option: layout + value: uki + owner: root + group: root + mode: "0644" + setype: etc_t + register: layout + + - name: Set kernel-install uki_generator to ukify + community.general.ini_file: + path: "{{ install_conf_path }}" + option: uki_generator + value: ukify + owner: root + group: root + mode: "0644" + setype: etc_t + register: generator + + - name: Check for an existing uki.conf file + ansible.builtin.stat: + path: "{{ uki_conf_path }}" + get_attributes: false + get_checksum: false + get_mime: false + register: uki_conf_search + + - name: Back up uki.conf + when: uki_conf_search.stat.exists + ansible.builtin.slurp: + src: "{{ uki_conf_path }}" + register: uki_conf_backup + changed_when: false + + - name: Set ukify signing tool to pesign + community.general.ini_file: + path: "{{ uki_conf_path }}" + section: UKI + option: SecureBootSigningTool + value: pesign + owner: root + group: root + mode: "0644" + setype: etc_t + register: signing_tool + + - name: Set ukify signing certificate database_path + community.general.ini_file: + path: "{{ uki_conf_path }}" + section: UKI + option: SecureBootCertificateDir + value: "{{ uki_mok.database_path }}" + owner: root + group: root + mode: "0644" + setype: etc_t + register: certificate_dir + + - name: Set signing certificate friendly_name + community.general.ini_file: + path: "{{ uki_conf_path }}" + section: UKI + option: SecureBootCertificateName + value: "{{ uki_mok.friendly_name }}" + owner: root + group: root + mode: "0644" + setype: etc_t + register: certificate + + - name: Check for an existing virt-firmware kernel-install script + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.stat: + path: "{{ install_script_path }}" + get_attributes: false + get_checksum: false + get_mime: false + register: install_script_search + + - name: Backup virt-firmware kernel-install script + when: + - ansible_facts.distribution_file_variety == 'Debian' + - install_script_search.stat.exists + ansible.builtin.slurp: + src: "{{ install_script_path }}" + register: install_script_backup + changed_when: false + + - name: Install new virt-firmware kernel-install script from git + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.copy: + src: "{{ clone_dir.path }}/systemd/99-uki-uefi-setup.install" + dest: "{{ install_script_path }}" + owner: root + group: root + mode: "0744" + setype: etc_t + register: install_script + + - name: Check for an existing virt-firmware boot validation service + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.stat: + path: "{{ boot_svc_path }}" + get_attributes: false + get_checksum: false + get_mime: false + register: boot_svc_search + + - name: Backup virt-firmware boot validation service + when: + - ansible_facts.distribution_file_variety == 'Debian' + - boot_svc_search.stat.exists + ansible.builtin.slurp: + src: "{{ boot_svc_path }}" + register: boot_svc_backup + changed_when: false + + - name: Install virt-firmware boot validation service from git + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.copy: + src: "{{ clone_dir.path }}/systemd/{{ boot_svc_name }}" + dest: "{{ boot_svc_path }}" + owner: root + group: root + mode: "0644" + setype: systemd_unit_file_t + + - name: Install virt-firmware scripts and services from dnf + when: ansible_facts.distribution_file_variety == 'RedHat' + ansible.builtin.dnf: + name: uki-direct + state: latest + + - name: Enable virt-firmware boot validation service + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.systemd_service: + daemon_reload: true + name: kernel-bootcfg-boot-successful + enabled: true + + - name: Check for an existing Debian postinst.d script + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.stat: + path: /etc/kernel/postinst.d/zz-kernel-install + get_attributes: false + get_checksum: false + get_mime: false + register: postinst_check + + - name: Backup Debian postinst.d script + when: + - ansible_facts.distribution_file_variety == 'Debian' + - postinst_check.stat.exists + ansible.builtin.slurp: + src: /etc/kernel/postinst.d/zz-kernel-install + register: postinst_backup + + - name: Copy Debian postinst.d script + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.copy: + src: files/zz-kernel-install + dest: /etc/kernel/postinst.d/zz-kernel-install + owner: root + group: root + mode: '0755' + register: postinst_script + + - name: Check for an existing Debian postrm.d script + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.stat: + path: /etc/kernel/postrm.d/zz-kernel-uninstall + register: postrm_script_check + + - name: Backup the existing Debian postrm.d script + when: + - ansible_facts.distribution_file_variety == 'Debian' + - postrm_script_check.stat.exists + ansible.builtin.slurp: + src: /etc/kernel/postrm.d/zz-kernel-uninstall + register: postrm_script_backup + + - name: Copy the new Debian postrm.d script + when: ansible_facts.distribution_file_variety == 'Debian' + ansible.builtin.copy: + src: files/zz-kernel-uninstall + dest: /etc/kernel/postrm.d/zz-kernel-uninstall + mode: '0755' + + - name: Stat the expected UKI path + ansible.builtin.stat: + path: "{{ uki_path }}" + register: uki_check + + - name: Verify the signature on the current UKI + when: uki_check.stat.exists + ansible.builtin.command: + cmd: pesigcheck -v -i '{{ uki_path }}' + changed_when: false + register: signature_check + ignore_errors: true + + - name: Reinstall the current kernel + when: > + layout is changed or generator is changed or + install_script is changed or postinst_script is changed or + not uki_check.stat.exists or signature_check is failed + vars: + kernel: "{{ ansible_facts.kernel }}" + block: + - name: Reinstall kernel on RedHat + when: ansible_facts.distribution_file_variety == 'RedHat' + become: true + ansible.builtin.command: + cmd: > + kernel-install add '{{ kernel }}' + '/usr/lib/modules/{{ kernel }}/vmlinuz' + changed_when: true + + - name: Reinstall kernel on Debian + when: ansible_facts.distribution_file_variety == 'Debian' + become: true + ansible.builtin.command: + cmd: kernel-install add '{{ kernel }}' '/boot/vmlinuz-{{ kernel }}' + changed_when: true + + - name: Stat the expected UKI path + ansible.builtin.stat: + path: "{{ uki_path }}" + register: uki_post_check + failed_when: not uki_post_check.stat.exists + + - name: Ensure BootNext is net to the UKI + vars: + kernel: "{{ ansible_facts.kernel }}" + bootnext_re_default: + 'N\s+-\s+[0-9,a-f]{4}\s+-\s+.*{{ kernel }}\s\(UKI\)' + bootnext_re_ubuntu: + 'N\s+-\s+[0-9,a-f]{4}\s+-\s+.*{{ kernel }}' + is_ubuntu: "{{ ansible_facts.distribution == 'Ubuntu' }}" + bootnext_regex: + "{{ bootnext_re_ubuntu if is_ubuntu else bootnext_re_default }}" + ansible.builtin.command: + cmd: kernel-bootcfg + changed_when: false + register: boot_check + failed_when: >- + boot_check.stdout is not search(bootnext_regex, multiline=false) + + - name: Verify the UKI was signed by the machine owner key + ansible.builtin.command: + cmd: pesigcheck -v -i '{{ uki_path }}' + changed_when: false + + - name: Reboot the host to try and boot from the UKI + become: true + ansible.builtin.reboot: + reboot_timeout: 300 + + - name: Ensure the virt-firmware boot validation service succeeded + become: true + ansible.builtin.command: + argv: + - systemctl + - show + - kernel-bootcfg-boot-successful.service + - --property=Result + register: boot_check + changed_when: false + failed_when: + - '"success" not in boot_check.stdout' + rescue: + - name: Restore install.conf + when: + - install_conf_backup is defined + - install_conf_backup is not failed + - install_conf_backup is not skipped + become: true + ansible.builtin.copy: + content: "{{ install_conf_backup.content | b64decode }}" + dest: "{{ install_conf_path }}" + owner: root + group: root + mode: "0644" + setype: etc_t + + - name: Remove install.conf + become: true + when: + - install_conf_backup is skipped + ansible.builtin.file: + path: "{{ install_conf_path }}" + state: absent + + - name: Restore uki.conf + become: true + when: + - uki_conf_backup is defined + - uki_conf_backup is not skipped + - uki_conf_backup is not failed + ansible.builtin.copy: + content: "{{ uki_conf_backup.content | b64decode }}" + dest: "{{ uki_conf_path }}" + owner: root + group: root + mode: "0644" + setype: etc_t + + - name: Remove uki.conf + become: true + when: + - uki_conf_backup is skipped + ansible.builtin.file: + path: "{{ uki_conf_path }}" + state: absent + + - name: Restore virt-firmware boot validation service + become: true + when: + - boot_svc_backup is defined + - boot_svc_backup is not skipped + - boot_svc_backup is not failed + ansible.builtin.copy: + content: "{{ boot_svc_backup.content | b64decode }}" + dest: "{{ boot_svc_path }}" + owner: root + group: root + mode: "0644" + setype: systemd_unit_file_t + + - name: Enable restored virt-firmware boot validation service + become: true + when: + - boot_svc_backup is skipped + ansible.builtin.systemd_service: + daemon_reload: true + enabled: true + name: kernel-bootcfg-boot-successful + + - name: Disable virt-firmware boot validation service + become: true + when: + - boot_svc_backup is defined + - boot_svc_backup is not skipped + - boot_svc_backup is not failed + ansible.builtin.systemd_service: + enabled: false + name: kernel-bootcfg-boot-successful + + - name: Remove virt-firmware boot validation service + become: true + when: + - boot_svc_backup is skipped + ansible.builtin.file: + path: "{{ boot_svc_path }}" + state: absent + + - name: Restore virt-firmware kernel install script + become: true + when: + - install_script_backup is defined + - install_script_backup is not skipped + - install_script_backup is not failed + ansible.builtin.copy: + content: "{{ install_script_backup.content | b64decode }}" + dest: "{{ install_script_path }}" + owner: root + group: root + mode: "0755" + setype: etc_t + + - name: Remove virt-firmware kernel install script + become: true + when: + - install_script_backup is skipped + ansible.builtin.file: + path: "{{ install_script_path }}" + state: absent + + - name: Restore postinst.d script on Debian + become: true + when: + - ansible_facts.distribution == 'Debian' + - postinst_backup is defined + - postinst_backup is not skipped + - postinst_backup is not failed + ansible.builtin.copy: + content: "{{ postinst_backup.content | b64decode }}" + dest: /etc/kernel/postinst.d/zz-kernel-install + mode: '0755' + + - name: Remove postinst.d script on Debian + become: true + when: + - ansible_facts.distribution_file_variety == 'Debian' + - postinst_backup is skipped + ansible.builtin.file: + path: /etc/kernel/postinst.d/zz-kernel-install + state: absent + + - name: Restore postrm.d script on Debian + become: true + when: + - ansible_facts.distribution_file_variety == 'Debian' + - postrm_script_backup is defined + - postrm_script_backup is not skipped + - postrm_script_backup is not failed + ansible.builtin.copy: + content: "{{ postrm_script_backup | b64decode }}" + dest: /etc/kernel/postrm.d/zz-kernel-uninstall + mode: '0755' + + - name: Remove postrm.d script on Debian + become: true + when: + - ansible_facts.distribution_file_variety == 'Debian' + - postrm_script_backup is skipped + ansible.builtin.file: + path: /etc/kernel/postrm.d/zz-kernel-uninstall + state: absent + + - name: Fail here, after settings are restored + ansible.builtin.fail: + msg: the uki role failed, but all settings have been restored diff --git a/roles/uki/vars/main.yml b/roles/uki/vars/main.yml new file mode 100644 index 0000000..c3b632d --- /dev/null +++ b/roles/uki/vars/main.yml @@ -0,0 +1,4 @@ +--- +uki_mok: + database_path: /etc/pki/pesign + friendly_name: mok diff --git a/roles/uki_config/defaults/main.yaml b/roles/uki_config/defaults/main.yaml deleted file mode 100644 index 0ce9543..0000000 --- a/roles/uki_config/defaults/main.yaml +++ /dev/null @@ -1,14 +0,0 @@ -uki_config_initrd_generator: dracut -uki_config_mok: - private_key: /etc/kernel/MOK.priv - certificate: /etc/kernel/MOK.cer - owner: root - group: root - mode: "0600" - seuser: system_u - serole: object_r - setype: cert_t - selevel: s0 - -uki_config_cmdline: /etc/kernel/cmdline -uki_config_kernel_install_config_root: /etc/kernel diff --git a/roles/uki_config/handlers/main.yaml b/roles/uki_config/handlers/main.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/roles/uki_config/meta/argument_specs.yaml b/roles/uki_config/meta/argument_specs.yaml deleted file mode 100644 index 861ed17..0000000 --- a/roles/uki_config/meta/argument_specs.yaml +++ /dev/null @@ -1,72 +0,0 @@ ---- -argument_specs: - main: - short_description: UKI Configuration - description: Configure kernel-install to build and sign UKIs - version_added: "1.0.0" - author: "Christopher Palmer-Richez (tofugarden)" - options: - uki_config_initrd_generator: - type: str - description: The tool kernel-install calls to generate an initramfs. - default: dracut - - uki_config_mok: - type: dict - description: | - Location and information for your MOK. - - Use this option to specify the path where the MOK certificate and - private key will be stored for UKI signing purposes. - options: - private_key: - type: path - description: The path to the MOK private key. - default: /etc/kernel/MOK.priv - - certificate: - type: path - description: The path to the MOK x509 certificate. - default: /etc/kernel/MOK.cer - - owner: - type: str - default: root - - group: - type: str - default: root - - mode: - type: str - default: "0600" - - selevel: - type: str - default: s0 - - seuser: - type: str - default: system_u - - serole: - type: str - default: object_r - - setype: - type: str - default: cert_t - - uki_config_cmdline: - type: path - description: The path to the kernel command line. - default: /etc/kernel/cmdline - - uki_config_kernel_install_config_root: - type: path - default: /etc/kernel - description: | - The path where configuration files are stored. - - This should usually be `/etc/kernel` (the default), but you may wish - to use `/usr/lib/kernel` instead. diff --git a/roles/uki_config/tasks/main.yaml b/roles/uki_config/tasks/main.yaml deleted file mode 100644 index 423151d..0000000 --- a/roles/uki_config/tasks/main.yaml +++ /dev/null @@ -1,300 +0,0 @@ -- name: Gather keys and certificates - become: true - block: - - name: Gather private key - community.crypto.openssl_privatekey: - path: "{{ uki_config_mok.private_key }}" - size: 2048 - mode: "{{ uki_config_mok.mode }}" - owner: "{{ uki_config_mok.owner }}" - group: "{{ uki_config_mok.group }}" - seuser: "{{ uki_config_mok.seuser }}" - serole: "{{ uki_config_mok.serole }}" - setype: "{{ uki_config_mok.setype }}" - selevel: "{{ uki_config_mok.selevel }}" - - - name: Gather x509 certificate - community.crypto.x509_certificate: - path: "{{ uki_config_mok.certificate }}" - privatekey_path: "{{ uki_config_mok.private_key }}" - provider: selfsigned - owner: "{{ uki_config_mok.owner }}" - group: "{{ uki_config_mok.group }}" - mode: "{{ uki_config_mok.mode }}" - seuser: "{{ uki_config_mok.seuser }}" - serole: "{{ uki_config_mok.serole }}" - setype: "{{ uki_config_mok.setype }}" - selevel: "{{ uki_config_mok.selevel }}" - - - name: Gather x509 certificate in DER format - community.crypto.x509_certificate_convert: - src_path: "{{ uki_config_mok.certificate }}" - dest_path: "{{ uki_config_mok_der_path }}" - format: der - owner: "{{ uki_config_mok.owner }}" - group: "{{ uki_config_mok.group }}" - mode: "{{ uki_config_mok.mode }}" - seuser: "{{ uki_config_mok.seuser }}" - serole: "{{ uki_config_mok.serole }}" - setype: "{{ uki_config_mok.setype }}" - selevel: "{{ uki_config_mok.selevel }}" - -- name: Query MOK keyring - become: true - ansible.builtin.command: - argv: - - mokutil - - -t - - "{{ uki_config_mok_der_path }}" - changed_when: false - register: test_key_request - failed_when: - - test_key_request.stdout is not ansible.builtin.match('^.*\sis\snot\senrolled$') - - test_key_request.stdout is not ansible.builtin.match('^.*\sis\salready\sin\sthe\senrollment\srequest$') - - test_key_request.stdout is not ansible.builtin.match('^.*\sis\salready\senrolled$') - -- name: Import new MOK - when: test_key_request.rc == 0 - block: - - name: Get MOK password - register: mok_password_prompt - ansible.builtin.pause: - prompt: Enter MOK password - echo: false - - - name: Import MOK - become: true - ansible.builtin.shell: - cmd: | - spawn /usr/bin/mokutil --import {{ uki_config_mok_der_path }} - expect "input password:" - send -- "{{ mok_password_prompt.user_input }}\n" - expect "input password again:" - send -- "{{ mok_password_prompt.user_input }}\n" - expect eof - executable: /usr/bin/expect - changed_when: true - -- name: Reboot into MokManager - when: - test_key_request.stdout is ansible.builtin.match('^.*\sis\salready\sin\sthe\senrollment\srequest$') or - test_key_request.rc == 0 - block: - - name: MokManager warning - ansible.builtin.debug: - msg: > - A new Machine Owner Key (MOK) was enrolled. Before this playbook can - continue, the import will need to be manually validated in a program - called MokManager. This will happen automatically when the machine is - rebooted, but requires an administrator with access to a local console. - - MokManager is very easy to use. Once in the menu select "Enroll MOK," - enter the same password you just provided, and reboot. This playbook - will resume when the host is back online, or will automatically fail - after ten minutes. If something strange happens, just run the play - again. - - - name: Reboot prompt - ansible.builtin.pause: - prompt: Ready to reboot? (y/n) - echo: true - register: reboot_prompt - changed_when: false - failed_when: reboot_prompt.user_input != 'y' - - - name: Reboot - become: true - ansible.builtin.reboot: - - - name: Verify MOK was enrolled - become: true - ansible.builtin.command: - argv: - - mokutil - - -t - - "{{ uki_config_mok_der_path }}" - register: validate_mok_request - changed_when: false - failed_when: validate_mok_request.rc == 0 - -- name: Configure kernel-install to generate and sign UKIs - become: true - vars: - install_conf_path: "{{ uki_config_kernel_install_config_root }}/install.conf" - ukify_conf_path: "{{ uki_config_kernel_install_config_root }}/uki.conf" - block: - - name: Configure kernel-install to generate UKIs - block: - - name: Save the original file - ansible.builtin.slurp: - src: "{{ install_conf_path }}" - register: kernel_install_config_backup - changed_when: false - ignore_errors: true - - - name: Set the install layout to UKI - community.general.ini_file: - path: "{{ install_conf_path }}" - option: layout - value: uki - mode: '0644' - owner: root - group: root - seuser: system_u - serole: object_r - setype: etc_t - register: kernel_install_layout - - - name: Configure kernel-install to use the requested initrd generator - community.general.ini_file: - path: "{{ install_conf_path }}" - option: initrd_generator - value: "{{ uki_config_initrd_generator }}" - mode: '0644' - owner: root - group: root - seuser: system_u - serole: object_r - setype: etc_t - register: kernel_install_initrd_generator - - - name: Configure kernel-install to use the requested UKI generator - community.general.ini_file: - path: "{{ install_conf_path }}" - option: uki_generator - value: ukify - mode: '0644' - owner: root - group: root - seuser: system_u - serole: object_r - setype: etc_t - register: kernel_install_uki_generator - - - name: Configure ukify to sign generated UKIs - block: - - name: Save original file - ansible.builtin.slurp: - src: "{{ ukify_conf_path }}" - register: ukify_config_backup - changed_when: false - ignore_errors: true - - - name: Set singing tool - community.general.ini_file: - path: "{{ ukify_conf_path }}" - section: UKI - option: SecureBootSigningTool - value: sbsign - mode: '0644' - owner: root - group: root - seuser: system_u - serole: object_r - setype: etc_t - register: ukify_signing_tool - - - name: Set kernel command line - community.general.ini_file: - path: "{{ ukify_conf_path }}" - section: UKI - option: Cmdline - value: "@{{ uki_config_cmdline }}" - mode: '0644' - owner: root - group: root - seuser: system_u - serole: object_r - setype: etc_t - register: ukify_kernel_command_line - - - name: Set signing private key - community.general.ini_file: - path: "{{ ukify_conf_path }}" - section: UKI - option: SecureBootPrivateKey - value: "{{ uki_config_mok.private_key }}" - mode: '0644' - owner: root - group: root - seuser: system_u - serole: object_r - setype: etc_t - register: ukify_singing_private_key - - - name: Set signing certificate - community.general.ini_file: - path: "{{ ukify_conf_path }}" - section: UKI - option: SecureBootCertificate - value: "{{ uki_config_mok.certificate }}" - mode: '0644' - owner: root - group: root - seuser: system_u - serole: object_r - setype: etc_t - register: ukify_signing_certificate - - - name: Rebuild UKI - when: - kernel_install_layout is changed or - kernel_install_initrd_generator is changed or - kernel_install_uki_generator is changed or - (ukify_signing_tool is defined and ukify_signing_tool is changed) or - (ukify_signing_private_key is defined and ukify_signing_private_key is changed) or - (ukify_signing_certificate is defined and ukify_signing_certificate is changed) or - (ukify_kernel_command_line is defined and ukify_kernel_command_line is changed) - ansible.builtin.command: - argv: - - kernel-install - - --verbose - - add - changed_when: true - - rescue: - - name: Restore ukify config - block: - - name: Restore ukify configuration file - when: - - ukify_config_backup is not failed - - ukify_config_backup is not skipped - ansible.builtin.copy: - content: "{{ ukify_config_backup.content | b64decode }}" - dest: "{{ ukify_conf_path }}" - owner: root - group: root - mode: "0644" - seuser: system_u - serole: object_r - setype: etc_t - changed_when: false - - - name: Delete new ukify configuration file - when: ukify_config_backup is failed - ansible.builtin.file: - path: "{{ ukify_conf_path }}" - state: absent - changed_when: false - - - name: Restore kernel-install config - block: - - name: Restore original kernel-install configuration file - when: kernel_install_config_backup is not failed - ansible.builtin.copy: - content: "{{ kernel_install_config_backup.content | b64decode }}" - dest: "{{ install_conf_path }}" - owner: root - group: root - mode: "0644" - seuser: system_u - serole: object_r - setype: etc_t - changed_when: false - - - name: Remove new kernel-install configuration file - when: kernel_install_config_backup is failed - ansible.builtin.file: - path: "{{ install_conf_path }}" - state: absent - changed_when: false diff --git a/roles/uki_config/vars/main.yaml b/roles/uki_config/vars/main.yaml deleted file mode 100644 index 8c3dece..0000000 --- a/roles/uki_config/vars/main.yaml +++ /dev/null @@ -1 +0,0 @@ -uki_config_mok_der_path: "{{ uki_config_mok.certificate | ansible.builtin.splitext | first }}.der" diff --git a/tests/integration/requirements.yml b/tests/integration/requirements.yml new file mode 100644 index 0000000..2752450 --- /dev/null +++ b/tests/integration/requirements.yml @@ -0,0 +1,5 @@ +--- +collections: + - community.general + - community.crypto + - community.libvirt diff --git a/tests/integration/targets/role_uki/files/dependencies.yml b/tests/integration/targets/role_uki/files/dependencies.yml new file mode 100644 index 0000000..2d2e10d --- /dev/null +++ b/tests/integration/targets/role_uki/files/dependencies.yml @@ -0,0 +1,13 @@ +--- +- name: Install dependencies to test guests + hosts: test + vars: + test_src: "{{ workspace_dir }}/.build/tests/integration/targets/role_uki" + tasks: + - name: Import dependency vars + ansible.builtin.include_vars: + file: "{{ test_src }}/vars/dependencies.yml" + + - name: Import dependency tasks + ansible.builtin.import_tasks: + file: "{{ test_src }}/tasks/dependencies.yml" diff --git a/tests/integration/targets/role_uki/files/setup.yml b/tests/integration/targets/role_uki/files/setup.yml new file mode 100644 index 0000000..fff6ac9 --- /dev/null +++ b/tests/integration/targets/role_uki/files/setup.yml @@ -0,0 +1,17 @@ +--- +- name: Run test setup tasks + hosts: localhost + vars: + test_src: "{{ workspace_dir }}/tests/integration/targets/role_uki" + tasks: + - name: Import platforms list + ansible.builtin.include_vars: + file: "{{ test_src }}/vars/platforms.yml" + + - name: Import setup vars + ansible.builtin.include_vars: + file: "{{ test_src }}/vars/setup.yml" + + - name: Import setup tasks + ansible.builtin.import_tasks: + file: "{{ test_src }}/tasks/setup.yml" diff --git a/tests/integration/targets/role_uki/files/teardown.yml b/tests/integration/targets/role_uki/files/teardown.yml new file mode 100644 index 0000000..3f701e6 --- /dev/null +++ b/tests/integration/targets/role_uki/files/teardown.yml @@ -0,0 +1,13 @@ +--- +- name: Teardown test environment + hosts: localhost + vars: + test_src: "{{ workspace_dir }}/.build/tests/integration/targets/role_uki" + tasks: + - name: Import platforms list + ansible.builtin.include_vars: + file: "{{ test_src }}/vars/platforms.yml" + + - name: Import teardown tasks + ansible.builtin.import_tasks: + file: "{{ test_src }}/tasks/teardown.yml" diff --git a/tests/integration/targets/role_uki/files/test.yml b/tests/integration/targets/role_uki/files/test.yml new file mode 100644 index 0000000..d0c3ee6 --- /dev/null +++ b/tests/integration/targets/role_uki/files/test.yml @@ -0,0 +1,18 @@ +--- +- name: Run test tasks + hosts: test + vars: + test_support: "{{ workspace_dir }}/.build" + test_src: "{{ test_support }}/tests/integration/targets/role_uki" + tasks: + - name: Import test variables + ansible.builtin.include_vars: + file: "{{ test_src }}/vars/test.yml" + + - name: Import test tasks + ansible.builtin.import_tasks: + file: "{{ test_src }}/tasks/test.yml" + handlers: + - name: Import handlers + ansible.builtin.import_tasks: + file: "{{ test_src }}/handlers/test.yml" diff --git a/tests/integration/targets/role_uki/handlers/test.yml b/tests/integration/targets/role_uki/handlers/test.yml new file mode 100644 index 0000000..eea03d1 --- /dev/null +++ b/tests/integration/targets/role_uki/handlers/test.yml @@ -0,0 +1,6 @@ +--- +- name: This handler runs if an unexpected change occured + ansible.builtin.debug: + msg: The second role run still reported changes + failed_when: true + listen: idempotency_failure diff --git a/tests/integration/targets/role_uki/tasks/dependencies.yml b/tests/integration/targets/role_uki/tasks/dependencies.yml new file mode 100644 index 0000000..18e89ed --- /dev/null +++ b/tests/integration/targets/role_uki/tasks/dependencies.yml @@ -0,0 +1,137 @@ +--- +- name: Install package dependencies + vars: + distro: "{{ ansible_facts.distribution_file_variety }}" + packages: "{{ deps_debian if distro == 'Debian' else deps_redhat }}" + become: true + block: + - name: Install dependencies from cached packages + ansible.builtin.package: + update_cache: false + name: "{{ packages }}" + rescue: + - name: Install dependencies from updated cache + ansible.builtin.package: + update_cache: true + name: "{{ packages }}" + +- name: Check whether systemd-boot is installed + when: ansible_facts.distribution_file_variety == 'Debian' + become: true + ansible.builtin.command: + cmd: bootctl is-installed + changed_when: false + failed_when: false + register: sdboot_check + +- name: On Debian-like platforms, disable systemd-boot + when: + - ansible_facts.distribution_file_variety == 'Debian' + - sdboot_check.stdout == 'yes' + become: true + ansible.builtin.command: + cmd: bootctl remove + register: sdboot_remove + changed_when: sdboot_remove.stdout != '' + failed_when: + - sdboot_remove.rc != 0 + - sdboot_remove.stderr != '' + +- name: On Debian-like platforms, reconfigure grub + when: + - ansible_facts.distribution_file_variety == 'Debian' + - sdboot_check.stdout == 'yes' + become: true + ansible.builtin.command: + cmd: grub-install + changed_when: true + +- name: Get a temporary directory + become: true + ansible.builtin.tempfile: + state: directory + register: test_tempdir + changed_when: false + +- name: Ensure the MOK is available on the test guest + block: + - name: Try to find the MOK certificate + become: true + ansible.builtin.command: + cmd: certutil -d '{{ pesign_dbdir }}' -n '{{ mok_nickname }}' -L + changed_when: false + register: existing_mok_export + failed_when: '"Could not find" in existing_mok_export.stderr' + + # We can't set pipefail in dash, but this shouldn't fail since we already + # ensured the certificate exists in the previous task. + - name: Export the MOK certificate in DER format # noqa: risky-shell-pipe + become: true + ansible.builtin.shell: + cmd: > + certutil -d '{{ pesign_dbdir }}' -n '{{ mok_nickname }}' -Lr + > '{{ test_tempdir.path }}/MOK.der' + executable: /bin/sh + creates: "{{ test_tempdir.path }}/MOK.der" + + - name: Convert the certificate to PEM + become: true + community.crypto.x509_certificate_convert: + src_path: "{{ test_tempdir.path }}/MOK.der" + format: pem + dest_path: "{{ test_tempdir.path }}/MOK.pem" + mode: '0644' + + - name: Get the details of the existing certificate + become: true + community.crypto.x509_certificate_info: + path: "{{ test_tempdir.path }}/MOK.pem" + register: existing_mok + + - name: Ensure the existing and generated certificate match + block: + - name: Get the details of the generated certificate + delegate_to: localhost + community.crypto.x509_certificate_info: + path: "{{ test_support }}/MOK.pem" + register: generated_mok + failed_when: > + generated_mok.fingerprints.sha256 != + existing_mok.fingerprints.sha256 + rescue: + - name: Remove the existing MOK + become: true + ansible.builtin.command: + cmd: certutil -d '{{ pesign_dbdir }}' -n '{{ mok_nickname }}' -F + register: mok_removal + changed_when: mok_removal is not failed + + - name: Fail anyway, since we need to copy the new one + ansible.builtin.fail: + msg: > + Certificate with nickname \'{{ mok_nickname }}\' did not match + the certificate generated for this test, and was removed. + rescue: + - name: Copy the pkcs12 archive + become: true + ansible.builtin.copy: + src: "{{ output_dir }}/MOK.p12" + dest: "{{ test_tempdir.path }}" + owner: root + group: root + mode: '0600' + changed_when: false + + - name: Import the pkcs12 archive to the pesign database + become: true + ansible.builtin.command: + argv: + - pk12util + - -i + - "{{ test_tempdir.path }}/MOK.p12" + - -d + - "{{ pesign_dbdir }}" + - -W + - '' + register: mok_import + changed_when: mok_import.rc == 0 diff --git a/tests/integration/targets/role_uki/tasks/setup.yml b/tests/integration/targets/role_uki/tasks/setup.yml new file mode 100644 index 0000000..0bda849 --- /dev/null +++ b/tests/integration/targets/role_uki/tasks/setup.yml @@ -0,0 +1,415 @@ +--- +- name: Create the output directory + ansible.builtin.file: + path: "{{ output_dir }}" + state: directory + mode: '0744' + +- name: Package the collection + community.general.archive: + path: "{{ workspace_dir }}/*" + exclusion_patterns: "{{ workspace_dir }}/.*" + dest: "{{ workspace_dir }}/{{ tarball }}" + +- name: Install the collection to the output directory + ansible.builtin.unarchive: + src: "{{ workspace_dir }}/{{ tarball }}" + dest: "{{ output_dir }}" + mode: '0744' + +- name: Generate a new ssh keypair + community.crypto.openssh_keypair: + path: "{{ output_dir }}/id_ed25519" + type: ed25519 + passphrase: '' + mode: '0600' + register: ssh_keygen + +- name: Generate machine owner keys + vars: + pk_path: "{{ output_dir }}/MOK.priv" + csr_path: "{{ output_dir }}/MOK.csr" + cert_path: "{{ output_dir }}/MOK.pem" + p12_path: "{{ output_dir }}/MOK.p12" + block: + - name: Generate machine owner private key + community.crypto.openssl_privatekey: + path: "{{ pk_path }}" + type: RSA + size: 2048 + mode: '0600' + + - name: Generate a certificate signing request + community.crypto.openssl_csr: + path: "{{ csr_path }}" + privatekey_path: "{{ pk_path }}" + common_name: mok + basic_constraints: "CA:FALSE" + basic_constraints_critical: true + extended_key_usage: "Code Signing" + mode: '0644' + + - name: Generate a certificate + community.crypto.x509_certificate: + path: "{{ cert_path }}" + csr_path: "{{ csr_path }}" + privatekey_path: "{{ pk_path }}" + provider: selfsigned + mode: '0644' + return_content: true + + - name: Export the certificate and key as a pkcs12 archive + ansible.builtin.command: + argv: + - openssl + - pkcs12 + - -export + - -in + - "{{ cert_path }}" + - -inkey + - "{{ pk_path }}" + - -passin + - 'pass:' + - -out + - "{{ p12_path }}" + - -name + - "{{ mok_nickname }}" + - -passout + - 'pass:' + creates: "{{ p12_path }}" + +- name: Include the platforms list + ansible.builtin.include_vars: + file: "{{ test_src }}/vars/platforms.yml" + +- name: Check for a cloud-init binary on the test host + ansible.builtin.stat: + path: /usr/bin/cloud-init + register: cloud_init_bin + +- name: Create the cache directory + ansible.builtin.file: + path: "{{ cache_dir }}" + state: directory + mode: '0744' + +- name: Create platform specific configuration files + vars: + test_support: "{{ output_dir }}" + distro: "{{ ansible_facts.distribution }}" + fw_path_linux: /usr/share/OVMF/OVMF_CODE.secboot.fd + fw_path_darwin: /opt/homebrew/share/qemu/edk2-x86_64-secure-code.fd + vars_linux: /usr/share/OVMF/OVMF_VARS.fd + vars_darwin: /opt/homebrew/share/qemu/edk2-i386-vars.fd + fw_path: "{{ fw_path_darwin if distro == 'MacOSX' else fw_path_linux }}" + vars_path: "{{ vars_darwin if distro == 'MacOSX' else vars_linux }}" + block: + - name: Create one directory per platform + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + ansible.builtin.file: + path: "{{ platform_dir }}" + state: directory + mode: '0744' + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Template the cloud-init user-data file + vars: + ssh_pub: "{{ ssh_keygen.public_key }}" + platform_dir: "{{ output_dir }}/{{ platform.name }}" + user_data_path: "{{ platform_dir }}/user-data.yml" + ansible.builtin.template: + src: "{{ test_src }}/templates/user-data.yml.j2" + dest: "{{ user_data_path }}" + mode: '0644' + register: user_data + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Validate the cloud-init user-data file + when: cloud_init_bin.stat.exists + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + user_data_path: "{{ platform_dir }}/user-data.yml" + ansible.builtin.command: + argv: + - cloud-init + - schema + - -c + - "{{ user_data_path }}" + changed_when: false + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Build the nocloud datasource image + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + nocloud_path: "{{ platform_dir }}/nocloud.iso" + user_data_path: "{{ platform_dir }}/user-data.yml" + ansible.builtin.command: + argv: + - xorriso + - -outdev + - "{{ nocloud_path }}" + - -volid + - cidata + - -joliet + - 'on' + - -map + - "{{ user_data_path }}" + - /user-data + - -map + - "{{ test_src }}/templates/meta-data" + - /meta-data + creates: "{{ nocloud_path }}" + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Build the firmware images + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + vars_output: "{{ platform_dir }}/OVMF_VARS.custom.fd" + ansible.builtin.command: + argv: + - virt-fw-vars + - --input + - "{{ vars_path }}" + - --output + - "{{ vars_output }}" + - --enroll-redhat + - --secure-boot + - --add-mok + - f0c54306-e762-41a6-8dd4-4901c73e805b + - "{{ output_dir }}/MOK.pem" + creates: "{{ vars_output }}" + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Template machine configurations + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + ansible.builtin.template: + src: "{{ test_src }}/templates/domain.xml.j2" + dest: "{{ platform_dir }}/domain.xml" + lstrip_blocks: true + mode: '0644' + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Ensure platform cloud images are available + vars: + platform_image_path: "{{ cache_dir }}/{{ platform.name }}.qcow2" + block: + - name: Check for an existing platform cloud image + ansible.builtin.stat: + path: "{{ platform_image_path }}" + register: platform_image + failed_when: not platform_image.stat.exists + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + rescue: + - name: Get platform images + ansible.builtin.get_url: + dest: "{{ platform_image_path }}" + url: "{{ platform.url }}" + mode: '0744' + retries: 3 + until: image_download is not failed + register: image_download + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Destroy the existing domain + when: + - user_data.results[ansible_loop.index0] is changed + - platform.name in lookup('ansible.builtin.pipe', 'virsh list') + - image_download.results[ansible_loop.index0] is not changed + community.libvirt.virt: + command: destroy + name: "SB_{{ platform.name }}" + uri: "qemu:///session" + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Remove the now invalid disk image + when: + - user_data.results[ansible_loop.index0] is changed + - image_download.results[ansible_loop.index0] is not changed + ansible.builtin.file: + path: "{{ platform_dir }}/system.qcow2" + state: absent + mode: '0744' + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Check for an existing derived image + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + derived_image_path: "{{ platform_dir }}/system.qcow2" + ansible.builtin.stat: + path: "{{ derived_image_path }}" + register: derived_image + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Derive a new image from the platform image + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + derived_image_path: "{{ platform_dir }}/system.qcow2" + platform_image_path: "{{ cache_dir }}/{{ platform.name }}.qcow2" + when: not derived_image.results[ansible_loop.index0].stat.exists + ansible.builtin.command: + cmd: >- + qemu-img create -f qcow2 + -b '{{ platform_image_path }}' -F qcow2 + '{{ derived_image_path }}' + changed_when: true + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Slurp the domain definition + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + ansible.builtin.slurp: + src: "{{ platform_dir }}/domain.xml" + register: domain + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Define a virtual machine for each platform + community.libvirt.virt: + uri: 'qemu:///session' + command: define + name: "SB_{{ platform.name }}" + xml: "{{ domain.results[ansible_loop.index0].content | b64decode }}" + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Start each platform's virtual machine + community.libvirt.virt: + uri: 'qemu:///session' + name: "SB_{{ platform.name }}" + state: running + register: machine_state + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Wait for IP addresses to print to console on macOS + when: distro == 'MacOSX' + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + ci: 'ci-info:\s+\|' + ifname: '\s+(?:eth0|enp[0-9]s[0-9])\s+\|' + bool: '\s+(?:True|False)\s+\|' + ip: '\s+(([0-9]{1,3}\.?){4})\s+\|' + nm: '\s+255\.255\.255\.0\s+\|' + scope: '\s+global' + ansible.builtin.wait_for: + path: "{{ platform_dir }}/console.log" + search_regex: "{{ ci + ifname + bool + ip + nm + scope }}" + register: addr + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Wait for the host keys to print to console + vars: + platform_dir: "{{ output_dir }}/{{ platform.name }}" + ansible.builtin.wait_for: + path: "{{ platform_dir }}/console.log" + search_regex: '^(ssh-ed25519.*)$' + register: host_key + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + + - name: Copy host keys to known_hosts + vars: + key_match: "{{ ansible_loop.index0 | extract(host_key.results) }}" + key: "{{ 0 | extract(key_match.match_groups) }}" + addr_match: >- + {{ ansible_loop.index0 | extract(addr.results) if addr is defined }} + addr_darwin: + "{{ 0 | extract(addr_match.match_groups) if addr is defined }}" + addr_linux: "127.0.0.1" + address: "{{ addr_darwin if distro == 'MacOSX' else addr_linux }}" + hport: "{{ 22 if distro == 'MacOSX' else 2022 + i }}" + port_str: "{{ ':' + port if hport != '22' else '' }}" + addr_str: "{{ '[' + address + ']' if hport != '22' else address }}" + ansible.builtin.known_hosts: + key: "{{ addr_str }}{{ port_str }} {{ key }}" + name: "{{ addr_str }}{{ port_str }}" + loop: "{{ platforms }}" + loop_control: + extended: true + loop_var: platform + label: "{{ platform.name }}" + +- name: Template the inventory file + ansible.builtin.template: + src: "{{ test_src }}/templates/inventory.yml.j2" + dest: "{{ output_dir }}/inventory.yml" + lstrip_blocks: true + mode: '0644' + +- name: Move the dependencies playbook in place + ansible.builtin.copy: + src: "{{ test_src }}/files/dependencies.yml" + dest: "{{ output_dir }}/dependencies.yml" + mode: '0644' + +- name: Move the test playbook in place + ansible.builtin.copy: + src: "{{ test_src }}/files/test.yml" + dest: "{{ output_dir }}/test.yml" + mode: '0644' diff --git a/tests/integration/targets/role_uki/tasks/teardown.yml b/tests/integration/targets/role_uki/tasks/teardown.yml new file mode 100644 index 0000000..a5d491e --- /dev/null +++ b/tests/integration/targets/role_uki/tasks/teardown.yml @@ -0,0 +1,50 @@ +--- +- name: Destroy all running test machines + community.libvirt.virt: + uri: 'qemu:///session' + command: destroy + name: "SB_{{ platform.name }}" + failed_when: false + loop: "{{ platforms }}" + loop_control: + loop_var: platform + extended: true + label: "{{ platform.name }}" + +- name: Undefine all test machines + community.libvirt.virt: + uri: 'qemu:///session' + command: undefine + force: true + name: "SB_{{ platform.name }}" + loop: "{{ platforms }}" + loop_control: + loop_var: platform + label: "{{ platform.name }}" + failed_when: false + +- name: Forget host keys + vars: + inventory_path: "{{ workspace_dir }}/.build/inventory.yml" + inv: "{{ lookup('file', inventory_path) | from_yaml }}" + key: "{{ ansible_loop.index0 | extract(host_key.results) }}" + host: "{{ inv.test.hosts[platform.name] }}" + address: "{{ host.ansible_host }}" + hport: "{{ host.ansible_port }}" + port_str: "{{ ':' + port if hport != '22' else '' }}" + addr_str: "{{ '[' + address + ']' if hport != '22' else address }}" + ansible.builtin.known_hosts: + name: "{{ addr_str }}{{ port_str }}" + state: absent + failed_when: false + loop: "{{ platforms }}" + loop_control: + loop_var: platform + extended: true + label: "{{ platform.name }}" + +- name: Remove output files + ansible.builtin.file: + path: "{{ workspace_dir }}/.build" + state: absent + failed_when: false diff --git a/tests/integration/targets/role_uki/tasks/test.yml b/tests/integration/targets/role_uki/tasks/test.yml new file mode 100644 index 0000000..7e94f57 --- /dev/null +++ b/tests/integration/targets/role_uki/tasks/test.yml @@ -0,0 +1,119 @@ +--- +- name: Import the role + ansible.builtin.include_role: + name: uki + +- name: Run it again! + ansible.builtin.import_role: + name: uki + notify: idempotency_failure + +- name: Get a temporary directory + become: true + ansible.builtin.tempfile: + state: directory + register: test_tempdir + +- name: Generate a new machine owner key but don't enroll it + block: + - name: Try to extract new_mok # noqa: risky-shell-pipe + become: true + ansible.builtin.shell: + cmd: > + certutil -d /etc/pki/pesign -n new_mok -Lr > + "{{ test_tempdir.path }}/new_mok.der" + creates: "{{ test_tempdir.path }}/new_mok.der" + register: new_mok_check + rescue: + - name: Generate a new machine owner key + become: true + ansible.builtin.command: + cmd: efikeygen -S -n new_mok -c 'CN=new_mok' -k + changed_when: true + +- name: Run the role again with the name of the new key + block: + - name: Import the role + ansible.builtin.import_role: + name: uki + vars: + uki_mok: + database_path: /etc/pki/pesign + friendly_name: new_mok + rescue: + - name: Print a successful-sounding message + ansible.builtin.debug: + msg: success! + +- name: Export the same key we already have with a new name + delegate_to: localhost + ansible.builtin.command: + cmd: > + openssl pkcs12 -export -in "{{ test_support }}/MOK.pem" + -inkey "{{ test_support }}/MOK.priv" -passin 'pass:' + -out "{{ test_support }}/MOK_2.p12" -name 'mok_2' -passout 'pass:' + creates: "{{ test_support }}/MOK_2.p12" + +- name: Copy the key to the test host + become: true + ansible.builtin.copy: + src: "{{ test_support }}/MOK_2.p12" + dest: "{{ test_tempdir.path }}/MOK_2.p12" + mode: '0600' + +- name: Remove the existing key with the original name + become: true + ansible.builtin.command: + cmd: certutil -d /etc/pki/pesign -n mok -F + changed_when: true + +- name: Import the key to the pesign database + become: true + ansible.builtin.command: + argv: + - pk12util + - -i + - "{{ test_tempdir.path }}/MOK_2.p12" + - -d + - /etc/pki/pesign + - -W + - '' + changed_when: true + +- name: Get the details of the current UKI + become: true + ansible.builtin.stat: + path: "{{ uki_path }}" + register: old_uki + +- name: Run the role again with the new key name + ansible.builtin.import_role: + name: uki + vars: + uki_mok: + database_path: /etc/pki/pesign + friendly_name: mok_2 + +- name: Check whether the UKI was rebuilt + become: true + ansible.builtin.stat: + path: "{{ uki_path }}" + register: new_uki + failed_when: old_uki.stat.ctime != new_uki.stat.ctime + +- name: Try removing the kernel package on Debian + when: ansible_facts.distribution_file_variety == 'Debian' + become: true + vars: + prefix: "{{ 'kernel-core' if distro == 'RedHat' else 'linux-image' }}" + ansible.builtin.package: + name: "{{ prefix }}-{{ kernel }}" + state: absent + +- name: Ensure the respective UKI was removed + when: distro == 'Debian' + become: true + ansible.builtin.stat: + path: "{{ uki_path }}" + register: removed_uki_check + failed_when: removed_uki_check.stat.exists diff --git a/tests/integration/targets/role_uki/templates/domain.xml.j2 b/tests/integration/targets/role_uki/templates/domain.xml.j2 new file mode 100644 index 0000000..23a361b --- /dev/null +++ b/tests/integration/targets/role_uki/templates/domain.xml.j2 @@ -0,0 +1,76 @@ + + SB_{{ platform.name }} + crichez.secureboot test machine + A transient test machine for the crichez.secureboot Ansible collection + 2 + 2 + + SandyBridge-IBRS + + + hvm + {{ fw_path | trim }} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {% if ansible_facts.distribution != 'MacOSX' %} + + + + + + + + + {% else %} + + + + + + + + {% endif %} + + {% if ansible_facts.distribution == 'MacOSX' %} + + + + + + + {% endif %} + diff --git a/tests/integration/targets/role_uki/templates/inventory.yml.j2 b/tests/integration/targets/role_uki/templates/inventory.yml.j2 new file mode 100644 index 0000000..92aa7bc --- /dev/null +++ b/tests/integration/targets/role_uki/templates/inventory.yml.j2 @@ -0,0 +1,15 @@ +--- +test: + hosts: + {% for platform in platforms %} + {{ platform.name }}: + {% if ansible_facts.distribution == 'MacOSX' %} + ansible_host: {{ addr.results[loop.index0].match_groups[0] }} + ansible_port: 22 + ansible_user: testuser + {% else %} + ansible_host: 127.0.0.1 + ansible_port: {{ 2022 + loop.index0 }} + ansible_user: testuser + {% endif %} + {% endfor %} diff --git a/tests/integration/targets/role_uki/templates/meta-data b/tests/integration/targets/role_uki/templates/meta-data new file mode 100644 index 0000000..dc54834 --- /dev/null +++ b/tests/integration/targets/role_uki/templates/meta-data @@ -0,0 +1,2 @@ +instance-id: "ansible-test:crichez.secureboot" +local-hostname: "crichez.secureboot" diff --git a/tests/integration/targets/role_uki/templates/user-data.yml.j2 b/tests/integration/targets/role_uki/templates/user-data.yml.j2 new file mode 100644 index 0000000..e229e5c --- /dev/null +++ b/tests/integration/targets/role_uki/templates/user-data.yml.j2 @@ -0,0 +1,16 @@ +#cloud-config + +users: + - name: testuser + sudo: "ALL=(ALL) NOPASSWD:ALL" + lock_passwd: false + plain_text_passwd: password + ssh_authorized_keys: + - "{{ ssh_pub }}" + +{% if platform.init_packages is defined %} +packages: +{% for package in platform.init_packages %} + - {{ package }} +{% endfor %} +{% endif %} diff --git a/tests/integration/targets/role_uki/vars/dependencies.yml b/tests/integration/targets/role_uki/vars/dependencies.yml new file mode 100644 index 0000000..f7fb38e --- /dev/null +++ b/tests/integration/targets/role_uki/vars/dependencies.yml @@ -0,0 +1,14 @@ +--- +output_dir: "{{ workspace_dir }}/.build" +mok_nickname: mok +packages_common: + - systemd-ukify + - systemd-boot + - python3-cryptography + - python3-virt-firmware + - pesign +deps_redhat: "{{ packages_common }}" +debian_extras: + - python3-importlib-resources +deps_debian: "{{ packages_common | ansible.builtin.union(debian_extras) }}" +pesign_dbdir: /etc/pki/pesign diff --git a/tests/integration/targets/role_uki/vars/platforms.yml b/tests/integration/targets/role_uki/vars/platforms.yml new file mode 100644 index 0000000..68ae5db --- /dev/null +++ b/tests/integration/targets/role_uki/vars/platforms.yml @@ -0,0 +1,19 @@ +--- +platforms: + - name: Fedora_41 + url: 'https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2' + init_packages: + - python3-libdnf5 + mac_address: "{{ '54:52:00' | community.general.random_mac }}" + + - name: Fedora_40 + url: 'https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2' + mac_address: "{{ '54:52:00' | community.general.random_mac }}" + + - name: Debian_Trixie + url: 'https://cloud.debian.org/images/cloud/trixie/daily/latest/debian-13-generic-amd64-daily.qcow2' + mac_address: "{{ '54:52:00' | community.general.random_mac }}" + + - name: Ubuntu_Noble + url: 'https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img' + mac_address: "{{ '54:52:00' | community.general.random_mac }}" diff --git a/tests/integration/targets/role_uki/vars/setup.yml b/tests/integration/targets/role_uki/vars/setup.yml new file mode 100644 index 0000000..79ef18b --- /dev/null +++ b/tests/integration/targets/role_uki/vars/setup.yml @@ -0,0 +1,8 @@ +--- +galaxy_path: "{{ workspace_dir }}/galaxy.yml" +galaxy_content: "{{ lookup('ansible.builtin.file', galaxy_path) }}" +galaxy: "{{ galaxy_content | ansible.builtin.from_yaml }}" +tarball: "{{ galaxy.namespace }}-{{ galaxy.name }}-{{ galaxy.version }}.tar.gz" +output_dir: "{{ workspace_dir }}/.build" +cache_dir: "{{ workspace_dir }}/.cache" +mok_nickname: mok diff --git a/tests/integration/targets/role_uki/vars/test.yml b/tests/integration/targets/role_uki/vars/test.yml new file mode 100644 index 0000000..4c02b4a --- /dev/null +++ b/tests/integration/targets/role_uki/vars/test.yml @@ -0,0 +1,6 @@ +--- +machine_id: "{{ ansible_facts.machine_id }}" +kernel: "{{ ansible_facts.kernel }}" +distro: "{{ ansible_facts.distribution_file_variety }}" +uki_name: "{{ machine_id }}-{{ kernel }}.efi" +uki_path: "/boot/efi/EFI/Linux/{{ uki_name }}"