Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add case for migration with numa topology #5964

Draft
wants to merge 1 commit into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
- guest_numa_topology.migration_with_numa_topology:
type = migration_with_numa_topology
only aarch64
start_vm = 'no'
migration_setup = 'yes'
storage_type = 'nfs'
setup_local_nfs = 'yes'
disk_type = "file"
disk_source_protocol = "netfs"
mnt_path_name = ${nfs_mount_dir}
# Console output can only be monitored via virsh console output
only_pty = True
take_regular_screendumps = no
# Extra options to pass after <domain> <desturi>
virsh_migrate_extra = ''
# SSH connection time out
ssh_timeout = 60
# Local URI
virsh_migrate_connect_uri = 'qemu:///system'
virsh_migrate_dest_state = "running"
virsh_migrate_src_state = "failed to get domain"
image_convert = 'no'
server_ip = "${migrate_dest_host}"
server_user = "root"
server_pwd = "${migrate_dest_pwd}"
status_error = "no"
migrate_vm_back = "no"
migrateuri = "tcp://${migrate_dest_host}"
dest_persist_xml = "yes"
dest_xml = "yes"
migration_bandwidth = "1000"
base_migration_options = "--live --p2p --persistent --undefinesource --bandwidth ${migration_bandwidth}"
node_0_cpu = 0-1
node_1_cpu = 2-3
node_num = 4
vcpu_memory_dict = "'vcpu': 8, 'memory_unit':'KiB','memory':8388608,'current_mem':8388608,'current_mem_unit':'KiB'"
variants:
- one_cluster_on_numa:
topology_dict = {'sockets': '2', 'clusters':'2', 'cores': '2', 'threads': '1'}
node_2_cpu = 4-5
node_3_cpu = 6-7
numa_list = "[{'id': '0', 'cpus': '${node_0_cpu}', 'memory': '2097152', 'unit': 'KiB'}, {'id': '1', 'cpus': '${node_1_cpu}', 'memory': '2097152', 'unit': 'KiB'}, {'id': '2', 'cpus': '${node_2_cpu}', 'memory': '2097152', 'unit': 'KiB'}, {'id': '3', 'cpus': '${node_3_cpu}', 'memory': '2097152', 'unit': 'KiB'}]"
- multi_cluster_on_numa:
topology_dict = {'sockets': '1', 'clusters':'4', 'cores': '2', 'threads': '1'}
node_2_cpu = 4-7
numa_list = "[{'id': '0', 'cpus': '${node_0_cpu}', 'memory': '2097152', 'unit': 'KiB'}, {'id': '1', 'cpus': '${node_1_cpu}', 'memory': '2097152', 'unit': 'KiB'}, {'id': '2', 'cpus': '${node_2_cpu}', 'memory': '2097152', 'unit': 'KiB'}, {'id': '3', 'memory': '2097152', 'unit': 'KiB'}]"
variants:
- base_options:
add_options = ""
- addtional_options:
migration_connections = 3
add_options = "--auto-converge --parallel --parallel-connections ${migration_connections} --tls"
variants:
- precopy:
copy_type = ""
- postcopy:
only base_options
copy_type = "--postcopy"
vm_attrs = {${vcpu_memory_dict}, 'cpu': {'numa_cell': ${numa_list}}}
virsh_migrate_options = "${base_migration_options} ${add_options} ${copy_type}"

Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright Red Hat
#
# SPDX-License-Identifier: GPL-2.0
#
# Author: Liang Cong <lcong@redhat.com>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

from virttest import utils_misc
from virttest.migration_template import MigrationTemplate
from virttest.libvirt_xml import vm_xml


class MigrationWithNumaTopology(MigrationTemplate):

def __init__(self, test, env, params, *args, **dargs):
super().__init__(test, env, params, *args, **dargs)

def _check_guest_numa_cpu(self, session):
"""
Check cpu id on each guest numa node

:param session: ShellSession object
"""
node_num = int(self.params.get("node_num"))
exp_nodes_cpu_list = [utils_misc.cpu_str_to_list(self.params.get("node_%s_cpu" % i)) if self.params.get("node_%s_cpu" % i) else [] for i in range(node_num)]
guest_numa_info = utils_misc.NumaInfo(session=session)
act_nodes_cpu_list = []
for node_index in range(len(guest_numa_info.nodes)):
cpu_list = list(map(int, guest_numa_info.nodes[node_index].cpus))
act_nodes_cpu_list.append(cpu_list)
self.test.log.debug("guest node %s has cpus: %s" % (node_index, cpu_list))
if exp_nodes_cpu_list != act_nodes_cpu_list:
self.test.fail("Expect numa nodes cpu list is %s, but get %s" % (exp_nodes_cpu_list, act_nodes_cpu_list))

def _pre_start_vm(self):
"""
Operation before start guest on source host:
Define the guest
"""
vm_attrs = eval(self.params.get("vm_attrs", "{}"))
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(self.migrate_main_vm_name.strip())
vmxml.setup_attrs(**vm_attrs)
vmxml.cpu.topology = eval(self.params.get("topology_dict", "{}"))
vmxml.sync()

def _post_start_vm(self):
"""
Operation after start guest on source host:
Check guest numa cpu after guest starts
"""
session = self.main_vm.wait_for_login()
self._check_guest_numa_cpu(session)

def _post_migrate(self):
"""
Operation after migration:
Check guest numa cpu after migration
"""
backup_uri, self.main_vm.connect_uri = self.main_vm.connect_uri, self.dest_uri
self.main_vm.cleanup_serial_console()
self.main_vm.create_serial_console()
session = self.main_vm.wait_for_serial_login()
self._check_guest_numa_cpu(session)
self.main_vm.connect_uri = backup_uri


def run(test, params, env):
"""
1. Assign cpu topology with numa node
2. Start guest
3. Verify the guest cpu assignment for numa nodes on src host
4. Do migration
5. Verify the guest cpu assignment for numa nodes on dest host
"""

migrationobj = MigrationWithNumaTopology(test, env, params)
try:
migrationobj.runtest()
finally:
migrationobj.cleanup()
Loading