Skip to content

Commit fec578a

Browse files
committed
feat: Enable CEX based LUKS encryption
1. Configure the CEX in KVM host 2. Use the right device type ignition for MCO 3. Attach the CEX mediated device to guest vm Signed-off-by: Madhu Pillai <madhupillai80@gmail.com>
1 parent 03d6d8d commit fec578a

26 files changed

+678
-0
lines changed

docs/set-variables-group-vars.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
:--- | :--- | :---
1313
**installation_type** | Can be of type kvm or lpar. Some packages will be ignored for installation in case of non lpar based installation. | kvm
1414
**controller_sudo_pass** | The password to the machine running Ansible (localhost). This will only be used for two things. To ensure you've installed the pre-requisite packages if you're on Linux, and to add the login URL to your /etc/hosts file. | Pas$w0rd!
15+
**cex** | Whether to enable cex based luks encryption, default to False.
16+
**luks_device** | Specify the storage device type used for LUKS encryption. This setting determines which MCO Ignition configuration will be applied. Use in combination with the cex parameter. [dasd, fcp, virt]
1517

1618
## 2 - LPAR(s)
1719
**Variable Name** | **Description** | **Example**
@@ -123,6 +125,7 @@
123125
**env.cluster.nodes.control.ipv6** | IPv6 address for the control nodes. Use iprovided list formatting (if use_ipv6 variable is 'True'). | fd00::5fd00::6fd00::7
124126
**env.cluster.nodes.control.mac** | MAC address for the control node if use_dhcp variable is 'True'. | 52:54:00:18:1A:2B
125127
**env.cluster.nodes.control.hostname** | Hostnames for control nodes. Must match the total number of IP addresses for control nodes (usually 3). If DNS is hosted on the bastion, this can be anything. If DNS is hosted elsewhere, this must match DNS definition. This will be combined with the metadata_name and base_domain to create a Fully Qualififed Domain Name (FQDN). | control-01control-02control-03
128+
**env.cluster.nodes.control.cex_card** | Configure the CCA Crypto Express (CEX) card and assign the domain number to correspond with the control hostname specified in the environment configuration. The UUID's are hardcoded. Add more as needed using the format: UUID:00.0028
126129

127130
## 9 - Compute Nodes
128131
**Variable Name** | **Description** | **Example**
@@ -136,6 +139,7 @@
136139
**env.cluster.nodes.control.ipv6** | IPv6 address for the compute nodes. Use iprovided list formatting (if use_ipv6 variable is 'True'). | fd00::8fd00::9
137140
**env.cluster.nodes.compute.mac** | MAC address for the compute node if use_dhcp variable is 'True'. | 52:54:00:18:1A:2B
138141
**env.cluster.nodes.compute.hostname** | Hostnames for compute nodes. Must match the total number of IP addresses and VM names for compute nodes. If DNS is hosted on the bastion, this can be anything. If DNS is hosted elsewhere, this must match DNS definition. This will be combined with the metadata_name and base_domain to create a Fully Qualififed Domain Name (FQDN). | compute-01compute-02
142+
**env.cluster.nodes.compute.cex_card** | Configure the CCA Crypto Express (CEX) card and assign the domain number to correspond with the compute hostname specified in the environment configuration. The UUID's are hardcoded. Add more as needed using the format: UUID:00.0028
139143

140144
## 10 - Infra Nodes
141145
**Variable Name** | **Description** | **Example**

inventories/default/group_vars/all.yaml.template

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
# Section 1 - Ansible Controller
77
installation_type: kvm
88
controller_sudo_pass: #X
9+
cex: False
10+
luks_device: #[dasd | fcp | virt ]
911

1012
env:
1113

@@ -140,6 +142,10 @@ env:
140142
# - X
141143
# - X
142144
# - X
145+
cex_card:
146+
- 68cd2d83-3eef-4e45-b22c-534f90b16cb9:xx.xxxx #xx.xxxx means the CEX cca card number from lszcrypt.
147+
- ba3f873c-3022-4534-9072-eb0d4530b137:xx.xxxx
148+
- 610e4724-c8c1-45ed-a498-ccfdadc1f2ca:xx.xxxx
143149

144150
# Section 9 - Compute Nodes
145151
compute:
@@ -162,6 +168,9 @@ env:
162168
hostname:
163169
# - X
164170
# - X
171+
cex_card:
172+
- 5c84eefb-cb45-4519-86d3-ba23e65e8896:xx.xxxx #xx.xxxx means the CEX cca card number from lszcrypt
173+
- 60cf6ec4-2741-44e2-84d2-0746f133db16:xx.xxxx
165174

166175
# Section 10 - Infra Nodes
167176
# infra:

playbooks/3_setup_kvm_host.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,3 +188,9 @@
188188
roles:
189189
- configure_storage
190190
- { role: macvtap, when: env.network_mode | upper != 'NAT' }
191+
192+
- hosts: kvm_host
193+
tags: setup, section_3
194+
become: true
195+
roles:
196+
- { role: configure_cex, when: cex | default(false) | bool }
Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
---
2+
3+
- name: CEX create nodes - find LPAR host files
4+
hosts: localhost
5+
vars_files:
6+
- "{{ inventory_dir }}/group_vars/all.yaml"
7+
tasks:
8+
- block:
9+
- name: Loop over node types and include role
10+
include_role:
11+
name: check_for_lpar_nodes
12+
loop:
13+
- bootstrap
14+
- control
15+
- compute
16+
loop_control:
17+
loop_var: node_type
18+
register: result
19+
ignore_errors: true
20+
21+
- name: Fail the play if the previous command did not succeed
22+
fail:
23+
msg: "Check for LPAR nodes failed for bootstrap or control node"
24+
when:
25+
- "'bootstrap' in result.msg"
26+
- "'control' in result.msg"
27+
28+
- name: CEX create nodes - prepare KVM guests
29+
hosts: kvm_host
30+
gather_facts: false
31+
vars_files:
32+
- "{{ inventory_dir }}/group_vars/all.yaml"
33+
roles:
34+
- { role: prep_kvm_guests, when: env.cluster.nodes.bootstrap.vm_name not in hosts_with_host_vars }
35+
- { role: delete_nodes, when: env.cluster.nodes.bootstrap.vm_name not in hosts_with_host_vars }
36+
37+
- name: CEX create nodes - create bootstrap
38+
hosts: kvm_host[0]
39+
gather_facts: false
40+
vars_files:
41+
- "{{ inventory_dir }}/group_vars/all.yaml"
42+
roles:
43+
- { role: common, when: env.cluster.nodes.bootstrap.vm_name not in hosts_with_host_vars }
44+
- { role: create_bootstrap, when: env.cluster.nodes.bootstrap.vm_name not in hosts_with_host_vars }
45+
46+
# CONTROL NODE CREATION
47+
- name: CEX create nodes - create control nodes
48+
hosts: kvm_host
49+
gather_facts: false
50+
vars_files:
51+
- "{{ inventory_dir }}/group_vars/all.yaml"
52+
roles:
53+
- role: common
54+
- role: create_cex_nodes
55+
vars:
56+
create_phase: "control"
57+
when: cex | bool
58+
59+
- name: CEX create nodes - wait for bootstrap to connect control plane
60+
hosts: bastion
61+
become: true
62+
gather_facts: true
63+
vars_files:
64+
- "{{ inventory_dir }}/group_vars/all.yaml"
65+
environment:
66+
KUBECONFIG: "{{ '/home/' if (env.bastion.access.user != 'root') else '/'}}{{ env.bastion.access.user }}/.kube/config"
67+
roles:
68+
- wait_for_bootstrap
69+
70+
- name: CEX create nodes - tear down bootstrap
71+
hosts: kvm_host[0]
72+
gather_facts: false
73+
tags: teardown_bootstrap
74+
vars_files:
75+
- "{{ inventory_dir }}/group_vars/all.yaml"
76+
tasks:
77+
- name: Destroy bootstrap VM
78+
community.libvirt.virt:
79+
name: "{{ env.cluster.nodes.bootstrap.vm_name }}"
80+
command: destroy
81+
ignore_errors: true
82+
when: env.cluster.nodes.bootstrap.vm_name not in hosts_with_host_vars
83+
84+
- name: Undefine bootstrap VM
85+
community.libvirt.virt:
86+
name: "{{ env.cluster.nodes.bootstrap.vm_name }}"
87+
command: undefine
88+
ignore_errors: true
89+
90+
# COMPUTE NODE CREATION
91+
- name: CEX create nodes - create compute nodes
92+
hosts: kvm_host
93+
gather_facts: false
94+
tags: create_cex_nodes
95+
vars_files:
96+
- "{{ inventory_dir }}/group_vars/all.yaml"
97+
roles:
98+
- role: common
99+
- role: create_cex_nodes
100+
vars:
101+
create_phase: "compute"
102+
when: env.cluster.nodes.bootstrap.vm_name not in hosts_with_host_vars
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
- name: Load vfio_ap kernel module
2+
ansible.builtin.modprobe:
3+
name: vfio_ap
4+
state: present
5+
6+
- name: Set defaults AP masks
7+
ansible.builtin.shell: |
8+
echo 0x0 > /sys/bus/ap/apmask
9+
echo 0x0 > /sys/bus/ap/aqmask
10+
args:
11+
executable: /bin/bash
12+
13+
- name: Combine all CEX card assignments from control and compute nodes
14+
set_fact:
15+
cex_cards: "{{ (env.cluster.nodes.control.cex_card | default([])) + (env.cluster.nodes.compute.cex_card | default([])) }}"
16+
17+
- name: Deduplicate CEX cards list (in case of shared UUIDs)
18+
set_fact:
19+
cex_cards: "{{ cex_cards | unique }}"
20+
21+
- name: Debug final list of CEX UUID assignments
22+
debug:
23+
var: cex_cards
24+
25+
- name: Create VFIO assignment script for all CEX cards
26+
template:
27+
src: assign_cards.sh.j2
28+
dest: /tmp/assign_all_cex_cards.sh
29+
mode: '0755'
30+
31+
- name: Execute VFIO assignment script
32+
shell: /tmp/assign_all_cex_cards.sh
33+
args:
34+
executable: /bin/bash
35+
36+
- name: Housekeep temporary assignment script
37+
file:
38+
path: /tmp/assign_all_cex_cards.sh
39+
state: absent
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
#!/bin/bash
2+
# Reference document for the cex configuration in zKVM
3+
# https://www.ibm.com/docs/en/linux-on-systems?topic=management-configuring-crypto-express-adapters-kvm-guests
4+
5+
# Configure each CEX card
6+
{% for entry in cex_cards %}
7+
{% set uuid = entry.split(':')[0] %}
8+
{% set matrix_val = entry.split(':')[1] %}
9+
{% set adapter = matrix_val.split('.')[0] %}
10+
{% set domain = matrix_val.split('.')[1] %}
11+
12+
uuid="{{ uuid }}"
13+
matrix_val="{{ matrix_val }}"
14+
adapter="{{ adapter }}"
15+
domain="{{ domain }}"
16+
17+
uuid_path="/sys/devices/vfio_ap/matrix/$uuid"
18+
matrix_file="$uuid_path/matrix"
19+
20+
if [ -d "$uuid_path" ]; then
21+
if grep -q "{{ matrix_val }}" "$matrix_file" 2>/dev/null; then
22+
echo "[INFO] UUID $uuid already configured with matrix {{ matrix_val }} — skipping."
23+
else
24+
echo "[WARN] UUID $uuid exists, but matrix entry '{{ matrix_val }}' not found!"
25+
echo "[WARN] Please reboot the node and try again."
26+
exit 1
27+
fi
28+
else
29+
echo "[INFO] Creating UUID $uuid with adapter $adapter and domain $domain"
30+
echo "$uuid" > /sys/devices/vfio_ap/matrix/mdev_supported_types/vfio_ap-passthrough/create
31+
echo "0x$adapter" > "$uuid_path/assign_adapter"
32+
echo "0x$domain" > "$uuid_path/assign_domain"
33+
fi
34+
35+
{% endfor %}
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
- name: Extract UUID from cex_pair
2+
set_fact:
3+
cex_uuid: "{{ cex_pair.split(':')[0] }}"
4+
cex_bdf: "{{ cex_pair.split(':')[1] }}"
5+
6+
- name: Debug CEX assignment
7+
debug:
8+
msg: "Attaching CEX UUID {{ cex_uuid }} to VM {{ vm_name }}"
9+
10+
- name: Generate hostdev XML from template
11+
template:
12+
src: hostdev.xml.j2
13+
dest: "/tmp/cex_{{ vm_name }}_{{ cex_uuid }}.xml"
14+
15+
- name: Check if CEX UUID already attached
16+
shell: |
17+
virsh dumpxml {{ vm_name }} | grep -q "{{ cex_uuid }}"
18+
register: cex_attached
19+
ignore_errors: true
20+
21+
- name: Attach CEX device if not already attached
22+
shell: |
23+
virsh attach-device {{ vm_name }} /tmp/cex_{{ vm_name }}_{{ cex_uuid }}.xml --persistent
24+
when: cex_attached.rc != 0
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
- name: Loop over compute node names and call per-node logic
2+
include_tasks: create_compute_node_single.yaml
3+
loop: "{{ env.cluster.nodes.compute.vm_name }}"
4+
loop_control:
5+
loop_var: compute_node_name
6+
index_var: control_index
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
- name: Set facts for current compute node
2+
set_fact:
3+
vm_name: "{{ compute_node_name }}"
4+
vm_hostname: "{{ env.cluster.nodes.compute.hostname[control_index] }}"
5+
vm_mac: "{{ env.cluster.nodes.compute.mac[control_index] | default('') }}"
6+
vm_ip: "{{ env.cluster.nodes.compute.ip[control_index] }}"
7+
vm_ipv6: "{{ env.cluster.nodes.compute.ipv6[control_index] | default('') }}"
8+
9+
- name: Check if VM {{ vm_name }} already exists
10+
command: "virsh dominfo {{ vm_name }}"
11+
register: vm_check
12+
failed_when: false
13+
changed_when: false
14+
15+
- name: Skip creation of VM {{ vm_name }} if it already exists
16+
debug:
17+
msg: "VM {{ vm_name }} already exists, skipping creation."
18+
when: vm_check.rc == 0
19+
20+
- name: Create CoreOS compute node VM {{ vm_name }}
21+
shell: |
22+
virt-install \
23+
--name {{ vm_name }} \
24+
--osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
25+
--disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.compute.disk_size }},cache=none,io=native \
26+
--ram {{ env.cluster.nodes.compute.ram }} \
27+
{{ env.cluster.nodes.compute.vcpu_model_option | default('--cpu host') }} \
28+
--vcpus {{ env.cluster.nodes.compute.vcpu }} \
29+
--network network={{ env.vnet_name }}{% if vm_mac and env.use_dhcp %},mac={{ vm_mac }}{% endif %} \
30+
--location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
31+
--extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda" \
32+
--extra-args "coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }}" \
33+
{% if env.use_dhcp and vm_mac %}
34+
--extra-args "ip=dhcp" \
35+
{% else %}
36+
--extra-args "ip={{ vm_ip }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ vm_hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:{{ env.cluster.networking.interface }}:none:1500" \
37+
--extra-args "{{ ('ip=[' + vm_ipv6 + ']::[' + env.cluster.networking.ipv6_gateway +']:' + env.cluster.networking.ipv6_prefix | string + '::' + env.cluster.networking.interface + ':none' ) if env.use_ipv6 == True else '' }}" \
38+
{% endif %}
39+
--extra-args "nameserver={{ env.cluster.networking.nameserver1 }}{% if env.cluster.networking.nameserver2 is defined %},{{ env.cluster.networking.nameserver2 }}{% endif %}" \
40+
--extra-args "coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign" \
41+
--extra-args "{{ _vm_console }}" \
42+
--memballoon none \
43+
--graphics none \
44+
--console pty,target_type=serial \
45+
--wait=-1 \
46+
--noautoconsole
47+
args:
48+
executable: /bin/bash
49+
50+
- name: Set CEX cards for {{ vm_name }}
51+
set_fact:
52+
cex_cards: ["{{ env.cluster.nodes.compute.cex_card[control_index] }}"]
53+
54+
- name: Attach each CEX device from assignment list
55+
include_tasks: attach_cex_device.yaml
56+
loop: "{{ cex_cards }}"
57+
loop_control:
58+
loop_var: cex_pair
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
- name: Loop over control node names and call per-node logic
2+
include_tasks: create_control_node_single.yaml
3+
loop: "{{ env.cluster.nodes.control.vm_name }}"
4+
loop_control:
5+
loop_var: control_node_name
6+
index_var: control_index

0 commit comments

Comments
 (0)