Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions qemu/tests/cfg/hotplug_mem_migration.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,17 @@
numa_test = 'numactl -m %s dd if=/dev/urandom of=/tmp/numa_test/test '
numa_test += 'bs=1k count=%d && rm -rf /tmp/numa_test/'
stress_args = '--cpu 4 --io 4 --vm 2 --vm-bytes 4096M'
take_regular_screendumps = no # FIXME:
store_vm_info = no # FIXME:
nodes = node1 node2
node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
pools = p1
vm_node = node1
mig_dest_node = node2
pool_selectors_p1 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": ["node1", "node2"]}]
image_pool_name_image1 = p1
image_name_image1 = /mnt/images/rhel950-64-virtio-scsi-ovmf.qcow2
variants with_cache:
- @default:
- enable_dirty_ring:
Expand Down
11 changes: 11 additions & 0 deletions qemu/tests/cfg/migrate.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,17 @@
# you can uncomment the following line to enable the state
# check
# vmstate_check = yes
take_regular_screendumps = no # FIXME:
store_vm_info = no # FIXME:
nodes = node1 node2
node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
pools = p1
vm_node = node1
mig_dest_node = node2
pool_selectors_p1 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": ["node1", "node2"]}]
image_pool_name_image1 = p1
image_name_image1 = /mnt/images/rhel950-64-virtio-scsi-ovmf.qcow2
variants:
- @default:
- with_filter_off:
Expand Down
70 changes: 70 additions & 0 deletions qemu/tests/cfg/multi_host_basic.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
- multi_host_basic:
virt_test_type = qemu
type = multi_host_basic
restart_vm = yes
kill_vm = yes
login_timeout = 240
check_vm_needs_restart = no # FIXME: Work around for it
take_regular_screendumps = no
store_vm_info = no
variants:
- @default:
- with_hotplug:
variants:
- with_block:
hotplug_images = "stg0"
images += " ${hotplug_images}"
boot_drive_stg0 = no
image_name_stg0 = /mnt/images/storage0
image_size_stg0 = 1G
remove_image_stg0 = yes
force_create_image_stg0 = yes

- with_unplug:
variants:
- with_block:
unplug_images = "stg0"
images += " ${unplug_images}"
image_name_stg0 = /mnt/images/storage0
image_size_stg0 = 1G
remove_image_stg0 = yes
force_create_image_stg0 = yes

variants:
- @default:
reboot = yes
nodes = node1 node2
vms = vm1 vm2
node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
vm_node_vm1 = node1
vm_node_vm2 = node2
- with_pool:
reboot = yes
nodes = node1 node2
vms = vm1 vm2
node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
vm_node_vm1 = node1
vm_node_vm2 = node2
image_format = qcow2

pools = p1 p2 p3
pool_selectors_p1 = [{"key": "type", "operator": "==", "values": "filesystem"}, {"key": "access.nodes", "operator": "==", "values": ['node1']}]
pool_selectors_p2 = [{"key": "type", "operator": "==", "values": "filesystem"}, {"key": "access.nodes", "operator": "==", "values": ['node2']}]
pool_selectors_p3 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": "node2"}]
image_pool_name_image1_vm1 = p1
image_pool_name_image1_vm2 = p2
variants:
- @default:
- with_migration:
node_selectors_node1 = [{"key": "cpu_model_name", "operator": "eq", "values": "Intel(R) Xeon(R) Silver 4116 CPU @ 2.10GHz"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
node_selectors_node2 = [{"key": "cpu_model_name", "operator": "==", "values": "Intel(R) Xeon(R) Silver 4116 CPU @ 2.10GHz"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
reboot = no
vms = vm4
vm_node_vm4 = node1
mig_dest_node_vm4 = node2
pool_selectors_p3 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": ["node1", "node2"]}]
image_pool_name_image1_vm4 = p3
image_name_image1_vm4 = /mnt/images/rhel950-64-virtio-scsi-ovmf.qcow2
image_format_image1_vm4 = qcow2
11 changes: 11 additions & 0 deletions qemu/tests/cfg/nic_hotplug.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,17 @@
requires_root = yes
type = migration_after_nichotplug
kill_vm = yes
take_regular_screendumps = no # FIXME:
store_vm_info = no # FIXME:
nodes = node1 node2
node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}]
pools = p1
vm_node = node1
mig_dest_node = node2
pool_selectors_p1 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": ["node1", "node2"]}]
image_pool_name_image1 = p1
image_name_image1 = /mnt/images/rhel950-64-virtio-scsi-ovmf.qcow2
variants:
- after_nichotplug:
with_unplug = no
Expand Down
3 changes: 2 additions & 1 deletion qemu/tests/hotplug_mem_migration.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,8 @@ def _compare_mem_size(online_mem, expect_mem_size):
# do migration
mig_timeout = params.get_numeric("mig_timeout", 1200, float)
mig_protocol = params.get("migration_protocol", "tcp")
vm.migrate(mig_timeout, mig_protocol, env=env)
dst_node = params.get("mig_dest_node")
vm.migrate(mig_timeout, mig_protocol, dest_host=dst_node, env=env)
for target_mem in target_mems.split():
hotplug_test.unplug_memory(vm, target_mem)
hotplug_test.check_memory(vm)
Expand Down
3 changes: 3 additions & 0 deletions qemu/tests/migration.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,8 +229,10 @@ def guest_stress_deamon():
for func in pre_migrate:
func(vm, params, test)
if i % 2 == 0:
dst_node = params.get("mig_dest_node")
test.log.info("Round %s ping...", str(i / 2))
else:
dst_node = params.get("vm_node")
test.log.info("Round %s pong...", str(i / 2))
try:
vm.migrate(
Expand All @@ -239,6 +241,7 @@ def guest_stress_deamon():
mig_cancel_delay,
offline,
check,
dest_host=dst_node,
migration_exec_cmd_src=mig_exec_cmd_src,
migration_exec_cmd_dst=mig_exec_cmd_dst,
migrate_capabilities=capabilities,
Expand Down
3 changes: 2 additions & 1 deletion qemu/tests/migration_after_nichotplug.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,8 @@ def check_nic_is_empty():
set_link(nic_name, up=True)

error_context.context("Migrate from source VM to Destination VM", test.log.info)
vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, env=env)
dst_node = params.get("mig_dest_node")
vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, dest_host=dst_node, env=env)

if with_unplug:
error_context.context(
Expand Down
18 changes: 13 additions & 5 deletions qemu/tests/migration_with_file_transfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,18 @@ def run(test, params, env):
host_path = "/tmp/file-%s" % utils_misc.generate_random_string(6)
host_path_returned = "%s-returned" % host_path
guest_path = params.get("guest_path", "/tmp/file")
file_size = params.get("file_size", "500")
file_size = params.get("file_size", "1000")
transfer_timeout = int(params.get("transfer_timeout", "240"))
if mig_protocol == "exec":
mig_file = os.path.join(
test.tmpdir, "tmp-%s" % utils_misc.generate_random_string(8)
)

cnt = 0
try:
process.run("dd if=/dev/urandom of=%s bs=1M count=%s" % (host_path, file_size))
process.run("dd if=/dev/zero of=%s bs=1M count=%s" % (host_path, file_size))

def run_and_migrate(bg):
def run_and_migrate(bg, cnt):
bg.start()
try:
while bg.is_alive():
Expand All @@ -54,21 +55,28 @@ def run_and_migrate(bg):
if mig_protocol == "exec" and migration_exec_cmd_src:
migration_exec_cmd_src %= mig_file # pylint: disable=E0606
migration_exec_cmd_dst %= mig_file
if cnt % 2 == 0:
dest_host = params.get("mig_dest_node")
else:
dest_host = params.get("vm_node")
vm.migrate(
mig_timeout,
mig_protocol,
mig_cancel_delay,
dest_host=dest_host,
env=env,
migration_exec_cmd_src=migration_exec_cmd_src,
migration_exec_cmd_dst=migration_exec_cmd_dst,
)
cnt += 1
except Exception:
# If something bad happened in the main thread, ignore
# exceptions raised in the background thread
bg.join(suppress_exception=True)
raise
else:
bg.join()
return cnt

error_context.context(
"transferring file to guest while migrating", test.log.info
Expand All @@ -78,7 +86,7 @@ def run_and_migrate(bg):
(host_path, guest_path),
dict(verbose=True, timeout=transfer_timeout),
)
run_and_migrate(bg)
cnt = run_and_migrate(bg, cnt)

error_context.context(
"transferring file back to host while migrating", test.log.info
Expand All @@ -88,7 +96,7 @@ def run_and_migrate(bg):
(guest_path, host_path_returned),
dict(verbose=True, timeout=transfer_timeout),
)
run_and_migrate(bg)
run_and_migrate(bg, cnt)

# Make sure the returned file is identical to the original one
error_context.context("comparing hashes", test.log.info)
Expand Down
12 changes: 11 additions & 1 deletion qemu/tests/migration_with_netperf.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,20 @@ def run(test, params, env):
m_count = 0
while netperf_client_h.is_netperf_running():
m_count += 1
if m_count % 2 == 0:
dest_host = params.get("vm_node")
else:
dest_host = params.get("mig_dest_node")
error_context.context(
"Start migration iterations: %s " % m_count, test.log.info
)
vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, env=env)
vm.migrate(
mig_timeout,
mig_protocol,
mig_cancel_delay,
dest_host=dest_host,
env=env,
)
finally:
if netperf_server_g:
if netperf_server_g.is_server_running():
Expand Down
8 changes: 8 additions & 0 deletions qemu/tests/migration_with_reboot.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,22 +48,30 @@ def run(test, params, env):
bg = utils_misc.InterruptedThread(
vm.reboot, kwargs={"session": session, "timeout": login_timeout}
)
bg.daemon = True
bg.start()
try:
cnt = 0
while bg.is_alive():
for func in pre_migrate:
func(vm, params, test)
if cnt % 2 == 0:
dest_host = params.get("mig_dest_node")
else:
dest_host = params.get("vm_node")
vm.migrate(
mig_timeout,
mig_protocol,
mig_cancel_delay,
dest_host=dest_host,
env=env,
migration_exec_cmd_src=migration_exec_cmd_src,
migration_exec_cmd_dst=migration_exec_cmd_dst,
)
# run some functions after migrate finish.
for func in post_migrate:
func(vm, params, test)
cnt += 1
except Exception:
# If something bad happened in the main thread, ignore exceptions
# raised in the background thread
Expand Down
5 changes: 4 additions & 1 deletion qemu/tests/migration_with_speed_measurement.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,10 @@ def get_migration_statistic(vm):
time.sleep(2)

clonevm = vm.migrate(
mig_timeout, mig_protocol, not_wait_for_migration=True, env=env
mig_timeout, mig_protocol,
not_wait_for_migration=True,
dest_host=params.get("mig_dest_node"),
env=env
)

mig_speed = int(float(utils_misc.normalize_data_size(mig_speed, "M")))
Expand Down
Loading
Loading