Skip to content

Commit d3ec636

Browse files
committed
Virtual disk: add migration case with vhostuser disk
Automate case: RHEL-200524: Migrate VM with a vhostuser disk Signed-off-by: Meina Li <meili@redhat.com>
1 parent 251d5bd commit d3ec636

File tree

2 files changed

+246
-0
lines changed

2 files changed

+246
-0
lines changed
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
- migration.migration_with_disk.migration_with_vhostuser:
2+
type = migration_with_vhostuser
3+
start_vm = "no"
4+
migration_setup = "yes"
5+
storage_type = "nfs"
6+
setup_local_nfs = "yes"
7+
disk_type = "file"
8+
disk_source_protocol = "netfs"
9+
mnt_path_name = ${nfs_mount_dir}
10+
# Console output can only be monitored via virsh console output
11+
only_pty = True
12+
take_regular_screendumps = no
13+
# Extra options to pass after <domain> <desturi>
14+
virsh_migrate_extra = ""
15+
# SSH connection time out
16+
ssh_timeout = 60
17+
virsh_migrate_connect_uri = "qemu:///system"
18+
virsh_migrate_dest_state = "running"
19+
virsh_migrate_src_state = "shut off"
20+
server_ip = "${migrate_dest_host}"
21+
server_user = "root"
22+
server_pwd = "${migrate_dest_pwd}"
23+
client_ip = "${migrate_dest_host}"
24+
client_pwd = "${migrate_source_pwd}"
25+
migrate_desturi_port = "22"
26+
migrate_desturi_type = "ssh"
27+
virsh_migrate_desturi = "qemu+ssh://${migrate_dest_host}/system"
28+
func_supported_since_libvirt_ver = (7, 0, 0)
29+
vm_attrs = {"mb": {"source_type":"memfd", "access_mode": "shared"}}
30+
source_file = "/tmp/vhost.sock"
31+
queues = 1
32+
disk_dict = {"type_name": "vhostuser", "device": "disk", "driver": {"name": "qemu", "type": "raw", "queues": ${queues}}, "source": {"attrs": {"type": "unix", "path": "${source_file}"}}, "target": {"dev": "vdb", "bus": "virtio"}}
33+
no ppc64le
34+
variants:
35+
- with_precopy:
Lines changed: 211 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,211 @@
1+
import os
2+
import ast
3+
4+
from avocado.utils import process
5+
6+
from virttest import libvirt_version
7+
from virttest import remote
8+
from virttest import virsh
9+
from virttest import utils_disk
10+
from virttest import data_dir
11+
12+
from virttest.libvirt_xml import vm_xml
13+
from virttest.utils_libvirt import libvirt_disk
14+
from virttest.utils_libvirt import libvirt_vmxml
15+
from virttest.utils_test import libvirt
16+
17+
from provider.migration import base_steps
18+
19+
20+
def start_vhost_sock_service_in_source(start_sock_service_cmd, image_path, sock_path):
21+
"""
22+
Start one vhost sock service in source host.
23+
24+
:param start_sock_service_cmd: command to start vhost service
25+
:param image_path: image file path
26+
:param sock_path: sock file path
27+
"""
28+
# Create backend image in source host
29+
libvirt.create_local_disk("file", image_path, size="100M")
30+
chown_cmd = "chown qemu:qemu %s" % image_path
31+
process.run(chown_cmd, ignore_status=False, shell=True)
32+
# Start vhost sock service in source host
33+
cmd_output = process.run(start_sock_service_cmd, ignore_status=False, shell=True).stdout_text.strip()
34+
# Set SELinux context in source host
35+
ch_seccontext_cmd = "chcon -t svirt_image_t %s" % sock_path
36+
process.run(ch_seccontext_cmd, ignore_status=False, shell=True)
37+
set_bool_mmap_cmd = "setsebool domain_can_mmap_files 1 -P"
38+
process.run(set_bool_mmap_cmd, ignore_status=False, shell=True)
39+
return cmd_output
40+
41+
42+
def start_vhost_sock_service_in_remote(start_sock_service_cmd, image_path, sock_path, params):
43+
"""
44+
Prepare and start one vhost sock service in remote host.
45+
46+
:param start_sock_service_cmd: command to start vhost service
47+
:param image_path: image file path
48+
:param sock_path: sock file path
49+
:param params: test parameters
50+
"""
51+
remote.run_remote_cmd(f"mkdir -p {os.path.dirname(image_path)}", params, ignore_status=True)
52+
# Create backend image in remote host
53+
remote_create_cmd = f"dd if=/dev/zero of={image_path} bs=1M count=100 && chown qemu:qemu {image_path}"
54+
remote.run_remote_cmd(remote_create_cmd, params, ignore_status=False)
55+
# Start vhost sock service in remote host
56+
remote_start_result = remote.run_remote_cmd(start_sock_service_cmd, params, ignore_status=False)
57+
remote_vsock_service_id = remote_start_result.stdout_text.strip() if hasattr(remote_start_result, 'stdout_text') else None
58+
# Set SELinux context in remote host
59+
remote_selinux_cmd = f"chcon -t svirt_image_t {sock_path} && setsebool domain_can_mmap_files 1 -P"
60+
remote.run_remote_cmd(remote_selinux_cmd, params, ignore_status=False)
61+
return remote_vsock_service_id
62+
63+
64+
def run(test, params, env):
65+
"""
66+
Test vhostuser disk migration.
67+
68+
1.Prepare vhostuser disk and start the domain.
69+
2.Perform migration operation.
70+
3.Verify vhostuser disk after migration.
71+
"""
72+
73+
def setup_test():
74+
"""
75+
Setup steps before migration
76+
"""
77+
nonlocal vsock_service_id, remote_vsock_service_id, image_path, sock_path
78+
79+
test.log.info("Setup steps for vhostuser disk migration.")
80+
81+
sock_path = params.get("source_file", "/tmp/vhost.sock")
82+
image_path = data_dir.get_data_dir() + '/test.img'
83+
disk_dict = ast.literal_eval(params.get("disk_dict", "{}"))
84+
device_target = params.get("target_dev", "vdb")
85+
vm_attrs = eval(params.get("vm_attrs", "{}"))
86+
87+
# Define start_sock_service_cmd
88+
start_sock_service_cmd = (
89+
'systemd-run --uid qemu --gid qemu /usr/bin/qemu-storage-daemon'
90+
' --blockdev \'{"driver":"file","filename":"%s","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}\''
91+
' --blockdev \'{"node-name":"libvirt-1-format","read-only":false,"driver":"raw","file":"libvirt-1-storage"}\''
92+
' --export vhost-user-blk,id=vhost-user-blk0,node-name=libvirt-1-format,addr.type=unix,addr.path=%s,writable=on'
93+
' --chardev stdio,mux=on,id=char0; sleep 3'
94+
% (image_path, sock_path))
95+
96+
# Start vhost service in source host
97+
vsock_service_id = start_vhost_sock_service_in_source(start_sock_service_cmd, image_path, sock_path)
98+
# Start vhost service in remote host
99+
remote_vsock_service_id = start_vhost_sock_service_in_remote(start_sock_service_cmd, image_path, sock_path, params)
100+
# Setup migration connection
101+
migration_obj.setup_connection()
102+
# Prepare the VM with memory backing and vhostuser disk.
103+
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
104+
vmxml.setup_attrs(**vm_attrs)
105+
disk_obj = libvirt_vmxml.create_vm_device_by_type("disk", disk_dict)
106+
test.log.debug("vhostuser disk xml is:\n%s" % disk_obj)
107+
vmxml.add_device(disk_obj)
108+
vmxml.sync()
109+
base_steps.sync_cpu_for_mig(params)
110+
vm.start()
111+
vm.wait_for_login().close()
112+
113+
# Check if vhostuser disk is accessible in VM
114+
if "vhostuser" not in virsh.dumpxml(vm_name).stdout_text:
115+
test.fail("Check vhostuser disk in VM failed")
116+
117+
test.log.info("Setup completed successfully.")
118+
119+
def verify_test():
120+
"""
121+
Verify steps after migration
122+
123+
"""
124+
test.log.info("Verify steps after vhostuser disk migration.")
125+
126+
device_target = params.get("target_dev", "vdb")
127+
desturi = params.get("virsh_migrate_desturi")
128+
129+
# Switch to destination host
130+
backup_uri, vm.connect_uri = vm.connect_uri, desturi
131+
vm.cleanup_serial_console()
132+
vm.create_serial_console()
133+
vm_session = vm.wait_for_serial_login(timeout=120)
134+
135+
try:
136+
# Verify vhostuser disk is still accessible after migration
137+
output = vm_session.cmd_output("lsblk")
138+
test.log.debug("lsblk output after migration: %s", output)
139+
if device_target not in output:
140+
test.fail(f'Vhostuser disk device {device_target} not found in VM after migration')
141+
# Read from the disk to ensure it's working
142+
utils_disk.dd_data_to_vm_disk(vm_session, "/dev/%s" % device_target)
143+
test.log.info(f"Vhostuser disk {device_target} is accessible after migration")
144+
145+
finally:
146+
vm_session.close()
147+
148+
# Restore original connection URI
149+
vm.connect_uri = backup_uri
150+
151+
# Run default migration verification
152+
migration_obj.verify_default()
153+
154+
test.log.info("Verification completed successfully.")
155+
156+
def cleanup_test():
157+
"""
158+
Cleanup steps for cases
159+
160+
"""
161+
test.log.info("Cleanup steps for vhostuser disk migration.")
162+
if vm.is_alive():
163+
vm.destroy(gracefully=False)
164+
vmxml_backup.sync()
165+
166+
migration_obj.cleanup_connection()
167+
168+
# Cleanup on remote host
169+
if remote_vsock_service_id:
170+
remote.run_remote_cmd(f"systemctl stop {remote_vsock_service_id}", params, ignore_status=True)
171+
remote.run_remote_cmd('pidof qemu-storage-daemon && killall qemu-storage-daemon', params, ignore_status=True)
172+
remote.run_remote_cmd(f"rm -rf {sock_path} {image_path}", params, ignore_status=True)
173+
174+
# Kill all qemu-storage-daemon process on local host
175+
process.run("pidof qemu-storage-daemon && killall qemu-storage-daemon",
176+
ignore_status=True, shell=True)
177+
178+
if vsock_service_id:
179+
stop_vsock_service_cmd = "systemctl stop %s" % vsock_service_id
180+
process.run(stop_vsock_service_cmd, ignore_status=True, shell=True)
181+
182+
# Clean up images
183+
for file_path in [image_path, sock_path]:
184+
if os.path.exists(file_path):
185+
os.remove(file_path)
186+
187+
test.log.info("Cleanup completed successfully.")
188+
189+
libvirt_version.is_libvirt_feature_supported(params)
190+
vm_name = params.get("migrate_main_vm")
191+
vm = env.get_vm(vm_name)
192+
193+
# Initialize variables
194+
vsock_service_id = None
195+
remote_vsock_service_id = None
196+
image_path = None
197+
sock_path = None
198+
199+
# Back up xml file.
200+
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
201+
202+
# Migration object
203+
migration_obj = base_steps.MigrationBase(test, vm, params)
204+
205+
try:
206+
setup_test()
207+
migration_obj.run_migration()
208+
verify_test()
209+
210+
finally:
211+
cleanup_test()

0 commit comments

Comments
 (0)