passt/test/perf/passt_tcp
Stefano Brivio 1134ce88fe test/perf: Wait for neper servers in guest to be ready before starting client
Starting tcp_rr, tcp_crr, udp_rr servers in the guest takes a bit
longer than starting the corresponding clients on the host, and we
end up starting clients before servers unless we add a delay there.

Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-09-23 02:46:24 +02:00

230 lines
6.9 KiB
Text

# SPDX-License-Identifier: AGPL-3.0-or-later
#
# PASST - Plug A Simple Socket Transport
# for qemu/UNIX domain socket mode
#
# PASTA - Pack A Subtle Tap Abstraction
# for network namespace/tap device mode
#
# test/perf/passt_tcp - Check TCP performance in passt mode
#
# Copyright (c) 2021 Red Hat GmbH
# Author: Stefano Brivio <sbrivio@redhat.com>
gtools /sbin/sysctl ip jq nproc seq sleep iperf3 tcp_rr tcp_crr # From neper
nstools /sbin/sysctl ip jq nproc seq sleep iperf3 tcp_rr tcp_crr
htools bc head sed seq
# In this setup, virtio_net TX queue sometimes hangs, still under investigation
def virtio_net_workaround
guest modprobe -r virtio_net; modprobe virtio_net napi_tx=1; ip link set dev eth0 up; dhclient -4; dhclient -6; sleep 3
endef
test passt: throughput and latency
guest /sbin/sysctl -w net.core.rmem_max=536870912
guest /sbin/sysctl -w net.core.wmem_max=536870912
guest /sbin/sysctl -w net.core.rmem_default=33554432
guest /sbin/sysctl -w net.core.wmem_default=33554432
guest /sbin/sysctl -w net.ipv4.tcp_rmem="4096 131072 268435456"
guest /sbin/sysctl -w net.ipv4.tcp_wmem="4096 131072 268435456"
guest /sbin/sysctl -w net.ipv4.tcp_timestamps=0
ns /sbin/sysctl -w net.ipv4.tcp_rmem="4096 524288 134217728"
ns /sbin/sysctl -w net.ipv4.tcp_wmem="4096 524288 134217728"
ns /sbin/sysctl -w net.ipv4.tcp_timestamps=0
gout GW ip -j -4 route show|jq -rM '.[] | select(.dst == "default").gateway'
gout GW6 ip -j -6 route show|jq -rM '.[] | select(.dst == "default").gateway'
gout IFNAME ip -j link show | jq -rM '.[] | select(.link_type == "ether").ifname'
hout FREQ_PROCFS (echo "scale=1"; sed -n 's/cpu MHz.*: \([0-9]*\)\..*$/(\1+10^2\/2)\/10^3/p' /proc/cpuinfo) | bc -l | head -n1
hout FREQ_CPUFREQ (echo "scale=1"; printf '( %i + 10^5 / 2 ) / 10^6\n' $(cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq) ) | bc -l
hout FREQ [ -n "__FREQ_CPUFREQ__" ] && echo __FREQ_CPUFREQ__ || echo __FREQ_PROCFS__
set THREADS 1
set STREAMS 8
set TIME 10
hout OMIT echo __TIME__ / 6 | bc -l
set OPTS -Z -P __STREAMS__ -l 1M -O__OMIT__ --pacing-timer 1000000
info Throughput in Gbps, latency in µs, one thread at __FREQ__ GHz, __STREAMS__ streams
report passt tcp __THREADS__ __FREQ__
th MTU 256B 576B 1280B 1500B 9000B 65520B
tr TCP throughput over IPv6: guest to host
bw -
bw -
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 1280
iperf3 BW guest ns __GW6__%__IFNAME__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 4M
bw __BW__ 1.2 1.5
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 1500
iperf3 BW guest ns __GW6__%__IFNAME__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 4M
bw __BW__ 1.6 1.8
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 9000
iperf3 BW guest ns __GW6__%__IFNAME__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 8M
bw __BW__ 4.0 5.0
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 65520
iperf3 BW guest ns __GW6__%__IFNAME__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 16M
bw __BW__ 7.0 8.0
tl TCP RR latency over IPv6: guest to host
lat -
lat -
lat -
lat -
lat -
nsb tcp_rr --nolog -6
gout LAT tcp_rr --nolog -6 -c -H __GW6__%__IFNAME__ | sed -n 's/^throughput=\(.*\)/\1/p'
lat __LAT__ 200 150
tl TCP CRR latency over IPv6: guest to host
lat -
lat -
lat -
lat -
lat -
nsb tcp_crr --nolog -6
gout LAT tcp_crr --nolog -6 -c -H __GW6__%__IFNAME__ | sed -n 's/^throughput=\(.*\)/\1/p'
lat __LAT__ 500 400
tr TCP throughput over IPv4: guest to host
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 256
iperf3 BW guest ns __GW__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 1M
bw __BW__ 0.2 0.3
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 576
iperf3 BW guest ns __GW__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 1M
bw __BW__ 0.5 0.8
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 1280
iperf3 BW guest ns __GW__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 4M
bw __BW__ 1.2 1.5
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 1500
iperf3 BW guest ns __GW__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 4M
bw __BW__ 1.6 1.8
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 9000
iperf3 BW guest ns __GW__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 8M
bw __BW__ 4.0 5.0
virtio_net_workaround
guest ip link set dev __IFNAME__ mtu 65520
iperf3 BW guest ns __GW__ 100${i}2 __THREADS__ __TIME__ __OPTS__ -w 16M
bw __BW__ 7.0 8.0
tl TCP RR latency over IPv4: guest to host
lat -
lat -
lat -
lat -
lat -
nsb tcp_rr --nolog -4
gout LAT tcp_rr --nolog -4 -c -H __GW__ | sed -n 's/^throughput=\(.*\)/\1/p'
lat __LAT__ 200 150
tl TCP CRR latency over IPv4: guest to host
lat -
lat -
lat -
lat -
lat -
nsb tcp_crr --nolog -4
gout LAT tcp_crr --nolog -4 -c -H __GW__ | sed -n 's/^throughput=\(.*\)/\1/p'
lat __LAT__ 500 400
tr TCP throughput over IPv6: host to guest
bw -
bw -
ns ip link set dev lo mtu 1280
iperf3 BW ns guest ::1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 1.0 1.2
ns ip link set dev lo mtu 1500
iperf3 BW ns guest ::1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 2.0 3.0
ns ip link set dev lo mtu 9000
iperf3 BW ns guest ::1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 5.0 6.0
ns ip link set dev lo mtu 65520
iperf3 BW ns guest ::1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 6.0 6.8
ns ip link set dev lo mtu 65535
tl TCP RR latency over IPv6: host to guest
lat -
lat -
lat -
lat -
lat -
guestb tcp_rr --nolog -P 10001 -C 10011 -6
sleep 1
nsout LAT tcp_rr --nolog -P 10001 -C 10011 -6 -c -H ::1 | sed -n 's/^throughput=\(.*\)/\1/p'
lat __LAT__ 200 150
tl TCP CRR latency over IPv6: host to guest
lat -
lat -
lat -
lat -
lat -
guestb tcp_crr --nolog -P 10001 -C 10011 -6
sleep 1
nsout LAT tcp_crr --nolog -P 10001 -C 10011 -6 -c -H ::1 | sed -n 's/^throughput=\(.*\)/\1/p'
lat __LAT__ 500 350
tr TCP throughput over IPv4: host to guest
ns ip link set dev lo mtu 256
iperf3 BW ns guest 127.0.0.1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 0.3 0.5
ns ip link set dev lo mtu 576
iperf3 BW ns guest 127.0.0.1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 0.5 1.0
ns ip link set dev lo mtu 1280
ns ip addr add ::1 dev lo
iperf3 BW ns guest 127.0.0.1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 2.0 3.0
ns ip link set dev lo mtu 1500
iperf3 BW ns guest 127.0.0.1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 2.0 3.0
ns ip link set dev lo mtu 9000
iperf3 BW ns guest 127.0.0.1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 5.0 6.0
ns ip link set dev lo mtu 65520
iperf3 BW ns guest 127.0.0.1 100${i}1 __THREADS__ __TIME__ __OPTS__
bw __BW__ 6.0 6.8
ns ip link set dev lo mtu 65535
tl TCP RR latency over IPv4: host to guest
lat -
lat -
lat -
lat -
lat -
guestb tcp_rr --nolog -P 10001 -C 10011 -4
sleep 1
nsout LAT tcp_rr --nolog -P 10001 -C 10011 -4 -c -H 127.0.0.1 | sed -n 's/^throughput=\(.*\)/\1/p'
lat __LAT__ 200 150
tl TCP CRR latency over IPv6: host to guest
lat -
lat -
lat -
lat -
lat -
guestb tcp_crr --nolog -P 10001 -C 10011 -4
sleep 1
nsout LAT tcp_crr --nolog -P 10001 -C 10011 -4 -c -H 127.0.0.1 | sed -n 's/^throughput=\(.*\)/\1/p'
lat __LAT__ 500 300
te