Skip to content

Commit 7a7c8c3

Browse files
authored
Merge branch 'master' into KVMAUTOMA-3534-cpuload
2 parents fd0cfe1 + 2639fb1 commit 7a7c8c3

20 files changed

+713
-76
lines changed

generic/tests/cfg/netperf.cfg

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
# bridge_nic1 =
3535
#numa configration
3636
netperf_with_numa = yes
37+
vdpa_add_flows = yes
3738
# configure netperf test parameters, some seconds will be took to
3839
# wait all the clients work, this wait time should be less than
3940
# 0.5 * l, the wait time will augments if you have move
@@ -88,7 +89,7 @@
8889
# log_guestinfo_script = scripts/rh_perf_log_guestinfo_script.bat
8990
# log_guestinfo_exec = cmd /c
9091
# log_guestinfo_path = C:\log_guestinfo.bat
91-
server_mtu_cmd = "netsh interface ipv4 set subinterface "%s" mtu=%s"
92+
server_mtu_cmd = "netsh interface ipv4 set interface "%s" mtu=%s"
9293
i386, x86_64:
9394
cpu_model_flags = ",hv_time,hv_relaxed,hv_vapic,hv_spinlocks=0xfff"
9495
windows_disable_firewall = "netsh advfirewall set allprofiles state off"

generic/tests/netperf.py

Lines changed: 12 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
virt_vm,
1616
)
1717

18-
from provider import netperf_base, win_driver_utils
18+
from provider import netperf_base, vdpa_utils, win_driver_utils
1919

2020
LOG_JOB = logging.getLogger("avocado.test")
2121

@@ -62,6 +62,7 @@ def mtu_set(mtu):
6262
server_mtu_cmd = params.get("server_mtu_cmd")
6363
client_mtu_cmd = params.get("client_mtu_cmd")
6464
host_mtu_cmd = params.get("host_mtu_cmd")
65+
client_physical_nic = params.get("client_physical_nic")
6566
error_context.context("Changing the MTU of guest", test.log.info)
6667
if params.get("os_type") == "linux":
6768
ethname = utils_net.get_linux_ifname(server_ctl, mac)
@@ -73,9 +74,8 @@ def mtu_set(mtu):
7374
netperf_base.ssh_cmd(server_ctl, server_mtu_cmd % (connection_id, mtu))
7475

7576
error_context.context("Changing the MTU of client", test.log.info)
76-
netperf_base.ssh_cmd(
77-
client, client_mtu_cmd % (params.get("client_physical_nic"), mtu)
78-
)
77+
if client_physical_nic:
78+
netperf_base.ssh_cmd(client, client_mtu_cmd % (client_physical_nic, mtu))
7979

8080
netdst = params.get("netdst", "switch")
8181
host_bridges = utils_net.Bridge()
@@ -84,52 +84,13 @@ def mtu_set(mtu):
8484
if netdst in br_in_use:
8585
ifaces_in_use = host_bridges.list_iface()
8686
target_ifaces = list(ifaces_in_use + br_in_use)
87-
if (
88-
process.system(
89-
"which ovs-vsctl && systemctl status openvswitch.service",
90-
ignore_status=True,
91-
shell=True,
92-
)
93-
== 0
94-
):
95-
ovs_br_all = netperf_base.ssh_cmd(host, "ovs-vsctl list-br")
96-
ovs_br = []
97-
if ovs_br_all:
98-
for nic in vm.virtnet:
99-
if nic.netdst in ovs_br_all:
100-
ovs_br.append(nic.netdst)
101-
elif nic.nettype == "vdpa":
102-
vf_pci = netperf_base.ssh_cmd(
103-
host,
104-
"vdpa dev show |grep %s | grep -o 'pci/[^[:space:]]*' | "
105-
"awk -F/ '{print $2}'" % nic.netdst,
106-
)
107-
pf_pci = netperf_base.ssh_cmd(
108-
host,
109-
"grep PCI_SLOT_NAME /sys/bus/pci/devices/%s/physfn/uevent |"
110-
" cut -d'=' -f2" % vf_pci,
111-
)
112-
port = netperf_base.ssh_cmd(
113-
host, "ls /sys/bus/pci/devices/%s/net/ | head -n 1" % pf_pci
114-
)
115-
ovs_br_vdpa = netperf_base.ssh_cmd(
116-
host, "ovs-vsctl port-to-br %s" % port
117-
)
118-
cmd = (
119-
f"ovs-ofctl add-flow {ovs_br_vdpa} '"
120-
"in_port=1,idle_timeout=0 actions=output:2'"
121-
)
122-
cmd += (
123-
f"&& ovs-ofctl add-flow {ovs_br_vdpa} '"
124-
"in_port=2,idle_timeout=0 actions=output:1'"
125-
)
126-
cmd += "&& ovs-ofctl dump-flows {}".format(ovs_br_vdpa)
127-
netperf_base.ssh_cmd(host, cmd)
128-
ovs_br.append(ovs_br_vdpa)
129-
for br in ovs_br:
130-
ovs_list = "ovs-vsctl list-ports %s" % br
131-
ovs_port = netperf_base.ssh_cmd(host, ovs_list)
132-
target_ifaces.extend(ovs_port.split() + [br])
87+
88+
add_flows = params.get("vdpa_ovs_add_flows", "yes") == "yes"
89+
ovs_handler = vdpa_utils.OVSHandler(vm)
90+
target_ifaces.extend(
91+
ovs_handler.get_vdpa_ovs_info(add_flows=add_flows, return_ports=True)
92+
)
93+
13394
if vm.virtnet[0].nettype == "macvtap":
13495
target_ifaces.extend([vm.virtnet[0].netdst, vm.get_ifname(0)])
13596
error_context.context("Change all Bridge NICs MTU to %s" % mtu, test.log.info)
@@ -339,6 +300,7 @@ def mtu_set(mtu):
339300
client = params.get("client", "localhost")
340301
client_ip = client
341302
clients = []
303+
client_pub_ip = None
342304
# client session 1 for control, session 2 for data communication
343305
for i in range(2):
344306
if client in params.get("vms"):

provider/vdpa_utils.py

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
import logging
2+
3+
from avocado.utils import process
4+
from virttest import openvswitch, utils_net
5+
6+
LOG_JOB = logging.getLogger("avocado.test")
7+
8+
9+
def check_ovs_status():
10+
"""
11+
Check if ovs-vsctl and openvswitch service are installed and running.
12+
:return: True if both are available and running, otherwise False
13+
:rtype: bool
14+
"""
15+
cmd = "which ovs-vsctl && systemctl status openvswitch.service"
16+
return process.system(cmd, ignore_status=True, shell=True) == 0
17+
18+
19+
def get_vf_pci_address(nic_netdst):
20+
"""
21+
Get vf pci address from a given network destination.
22+
23+
:param nic_netdst: Network destination address
24+
:type nic_netdst: str
25+
26+
:return: VF pci address
27+
:rtype: str
28+
"""
29+
cmd = (
30+
"vdpa dev show | grep {0} | grep -o 'pci/[^[:space:]]*' | "
31+
"awk -F/ '{{print $2}}'"
32+
).format(nic_netdst)
33+
return process.system_output(cmd, shell=True).decode().strip()
34+
35+
36+
def get_pf_pci_address(vf_pci):
37+
"""
38+
Get pf pci address using vf pci address.
39+
40+
:param vf_pci: VF pci address
41+
:type vf_pci: str
42+
43+
:return: VF pci address
44+
:rtype: str
45+
"""
46+
cmd = (
47+
"grep PCI_SLOT_NAME /sys/bus/pci/devices/{0}/physfn/uevent | cut -d'=' -f2"
48+
).format(vf_pci)
49+
return process.system_output(cmd, shell=True).decode().strip()
50+
51+
52+
def get_pf_port(pf_pci):
53+
"""
54+
Get the port for the pf pci address.
55+
56+
:param pf_pci: PF pci address
57+
:type pf_pci: str
58+
59+
:return: Port name
60+
:rtype: str
61+
"""
62+
cmd = "ls /sys/bus/pci/devices/{0}/net/ | head -n 1".format(pf_pci)
63+
return process.system_output(cmd, shell=True).decode().strip()
64+
65+
66+
def add_flows_to_ovs_bridge(bridge, ovs):
67+
"""
68+
Add flow rules to the given ovs bridge.
69+
70+
:parma bridge: OVS bridge name
71+
:type bridge: str
72+
:param ovs: OVS instance
73+
:type ovs: OpenVSwitch
74+
"""
75+
utils_net.openflow_manager(
76+
bridge, "add-flow", flow_options="in_port=1,idle_timeout=0,actions=output:2"
77+
)
78+
utils_net.openflow_manager(
79+
bridge, "add-flow", flow_options="in_port=2,idle_timeout=0,actions=output:1"
80+
)
81+
utils_net.openflow_manager(bridge, "dump-flows")
82+
83+
84+
class OVSHandler:
85+
def __init__(self, vm):
86+
self.vm = vm
87+
if check_ovs_status():
88+
self.ovs = openvswitch.OpenVSwitchControl()
89+
else:
90+
self.ovs = None
91+
92+
def get_vdpa_ovs_info(self, add_flows=True, return_ports=True):
93+
"""
94+
Get OVS bridge and port information.
95+
96+
:param add_flows: Whether to add flows rules to the ovs bridge
97+
:type add_flows: bool
98+
:param return_ports: Whether to return port names
99+
:type return_ports: bool
100+
101+
:return: list of target interfaces(bridges and ports) if return_port is Ture,
102+
else empty list
103+
:rtype: list
104+
"""
105+
if not self.ovs:
106+
LOG_JOB.error("Could not find existing Open vSwitch service")
107+
return []
108+
109+
target_ifaces = []
110+
111+
for nic in self.vm.virtnet:
112+
ovs_br = None
113+
if nic.nettype == "vdpa":
114+
vf_pci = get_vf_pci_address(nic.netdst)
115+
pf_pci = get_pf_pci_address(vf_pci)
116+
port = get_pf_port(pf_pci)
117+
manager, ovs_br = utils_net.find_current_bridge(port)
118+
else:
119+
try:
120+
manager, ovs_br = utils_net.find_current_bridge(nic.netdst)
121+
except NotImplementedError:
122+
ovs_br = None
123+
if ovs_br:
124+
if add_flows:
125+
add_flows_to_ovs_bridge(ovs_br, self.ovs)
126+
if return_ports:
127+
if manager:
128+
ports = set(manager.list_ports(ovs_br))
129+
target_ifaces.extend(ports)
130+
target_ifaces.append(ovs_br)
131+
132+
return target_ifaces

qemu/tests/balloon_check.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -649,15 +649,25 @@ def run(test, params, env):
649649
session = balloon_test.vm.wait_for_login()
650650
session.cmd_output_safe(memhog_cmd)
651651
res2 = float(normalize_data_size(process.getoutput(get_res_cmd)))
652-
time.sleep(30)
653-
res3 = float(normalize_data_size(process.getoutput(get_res_cmd)))
652+
release_timeout = int(params.get("balloon_release_time", 30))
653+
654+
def res_recovered():
655+
res3 = float(normalize_data_size(process.getoutput(get_res_cmd)))
656+
test.log.debug("Polling RES: %.2fM", res3)
657+
return abs(res3 - res1) <= res1 * 0.1
658+
659+
if not utils_misc.wait_for(res_recovered, timeout=release_timeout, step=5):
660+
test.fail("QEMU should consume same memory as before memhog")
661+
res4 = float(normalize_data_size(process.getoutput(get_res_cmd)))
654662
test.log.info(
655-
"The RES values are %sM, %sM, and %sM sequentially", res1, res2, res3
663+
"The RES values are %sM, %sM, and %sM sequentially", res1, res2, res4
656664
)
657665
if res2 - res1 < consumed_mem * 0.5:
658666
test.error("QEMU should consume more memory")
659-
if res3 - res1 > res1 * 0.1:
660-
test.fail("QEMU should consume same memory as before memhog ")
667+
catch_call_trace_cmd = params.get("catch_call_trace")
668+
call_trace = session.cmd_output_safe(catch_call_trace_cmd)
669+
if call_trace:
670+
test.fail("There's a Call trace:%s" % call_trace)
661671
# for windows guest, disable/uninstall driver to get memory leak based on
662672
# driver verifier is enabled
663673
if params.get("os_type") == "windows":

qemu/tests/cfg/balloon_check.cfg

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,10 @@
3030
no RHEL.8.3 RHEL.8.2 RHEL.8.1 RHEL.8.0 RHEL.7
3131
balloon_opt_free_page_reporting = yes
3232
get_res_cmd = "top -n1 -b -p %s | grep qemu-kvm | awk -F ' ' '{print $6}'"
33+
catch_call_trace = "dmesg -T | grep -i "Call Trace" -A 20"
34+
s390x:
35+
get_res_cmd = "ps -p %s -o rss= | awk '{printf "%%.2fM\n", $1/1024}'"
36+
balloon_release_time = 300
3337
consumed_mem = 2G
3438
ppc64,ppc64le:
3539
consumed_mem = 4G

qemu/tests/cfg/check_block_size.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
kill_vm_on_error = yes
1111
index_enable = no
1212
image_name_stg = "images/check_block_size_image"
13-
image_size_stg = 20G
13+
image_size_stg = 30G
1414
image_verify_bootable = no
1515
force_create_image_stg = yes
1616
drive_serial_stg = "TARGET_DISK0"

qemu/tests/cfg/dump_guest_core.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
gdb_command_file = "/home/gdb_command"
1515
crash_script = "/home/crash.cmd"
1616
vmcore_file = "/tmp/vmcore"
17-
gdb_command = "gdb --core ${core_file}%s --command=${gdb_command_file}"
17+
gdb_command = "gdb /usr/libexec/qemu-kvm --core ${core_file}%s --command=${gdb_command_file}"
1818
crash_cmd = "crash -i ${crash_script} /usr/lib/debug/lib/modules/%s/vmlinux ${vmcore_file}"
1919
dump_guest_memory_file = "/usr/share/qemu-kvm/dump-guest-memory.py"
2020
check_vmcore = 'yes'

qemu/tests/cfg/mlock_on_fault.cfg

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
- mlock_on_fault:
2+
virt_test_type = qemu
3+
type = mlock_on_fault
4+
required_qemu = [10.0,)
5+
vms = ""
6+
extra_qemu_options = ""
7+
s390x:
8+
extra_qemu_options = "--no-shutdown -nographic"
9+
qemu_cmd_memlock = "%s -enable-kvm -overcommit mem-lock=%s -cpu host ${extra_qemu_options}"
10+
memhog_extra_options = "-m %dG -object memory-backend-ram,id=mem0,size=%dG,prealloc=on"
11+
qemu_cmd_memhog = "${qemu_cmd_memlock} ${memhog_extra_options} ${extra_qemu_options}"
12+
memhog_cmd = "memhog %dG"
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
- netkvm_buffer_shortage:
2+
virt_test_type = qemu
3+
type = netkvm_buffer_shortage
4+
only Windows
5+
only virtio_net
6+
vhost = on
7+
timeout = 360
8+
cdroms += " virtio"
9+
vms += " vm2"
10+
image_snapshot = yes
11+
start_vm = yes
12+
start_vm_vm2 = no
13+
smp = 2
14+
queues = ${smp}
15+
vectors = 1024
16+
port_num = 12345
17+
copy_dest = "C:\"
18+
server_script = "server.py"
19+
client_script = "client.py"
20+
check_live_python = "tasklist | findstr /i python"
21+
copy_all_cmd = 'xcopy "WIN_UTILS:\packet_loss_scripts\*" ${copy_dest}'
22+
i386:
23+
psutil_whl = "psutil-6.1.1-cp37-abi3-win32.whl"
24+
x86_64:
25+
psutil_whl = "psutil-6.1.1-cp37-abi3-win_amd64.whl"
26+
pip_cmd = "py -m pip install ${psutil_whl}"
27+
dest_location = "pushd ${copy_dest}"
28+
server_cmd = "start cmd /c py ${server_script} ${port_num}"
29+
client_cmd = "start cmd /c py ${client_script} 99999 %s ${port_num}"
30+
param_name = "MinRxBufferPercent"
31+
param_values = "0 25 50 75 100"

0 commit comments

Comments
 (0)