passt: Relicense to GPL 2.0, or any later version
In practical terms, passt doesn't benefit from the additional
protection offered by the AGPL over the GPL, because it's not
suitable to be executed over a computer network.
Further, restricting the distribution under the version 3 of the GPL
wouldn't provide any practical advantage either, as long as the passt
codebase is concerned, and might cause unnecessary compatibility
dilemmas.
Change licensing terms to the GNU General Public License Version 2,
or any later version, with written permission from all current and
past contributors, namely: myself, David Gibson, Laine Stump, Andrea
Bolognani, Paul Holzinger, Richard W.M. Jones, Chris Kuhn, Florian
Weimer, Giuseppe Scrivano, Stefan Hajnoczi, and Vasiliy Ulyanov.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2023-04-05 20:11:44 +02:00
|
|
|
# SPDX-License-Identifier: GPL-2.0-or-later
|
2021-09-27 15:10:35 +02:00
|
|
|
#
|
|
|
|
# PASST - Plug A Simple Socket Transport
|
|
|
|
# for qemu/UNIX domain socket mode
|
|
|
|
#
|
|
|
|
# PASTA - Pack A Subtle Tap Abstraction
|
|
|
|
# for network namespace/tap device mode
|
|
|
|
#
|
|
|
|
# test/perf/passt_tcp - Check TCP performance in passt mode
|
|
|
|
#
|
|
|
|
# Copyright (c) 2021 Red Hat GmbH
|
|
|
|
# Author: Stefano Brivio <sbrivio@redhat.com>
|
|
|
|
|
2022-09-17 01:17:04 +02:00
|
|
|
gtools /sbin/sysctl ip jq nproc seq sleep iperf3 tcp_rr tcp_crr # From neper
|
|
|
|
nstools /sbin/sysctl ip jq nproc seq sleep iperf3 tcp_rr tcp_crr
|
2021-10-19 09:25:29 +02:00
|
|
|
htools bc head sed seq
|
2021-09-27 15:10:35 +02:00
|
|
|
|
|
|
|
test passt: throughput and latency
|
|
|
|
|
|
|
|
guest /sbin/sysctl -w net.core.rmem_max=536870912
|
|
|
|
guest /sbin/sysctl -w net.core.wmem_max=536870912
|
|
|
|
guest /sbin/sysctl -w net.core.rmem_default=33554432
|
|
|
|
guest /sbin/sysctl -w net.core.wmem_default=33554432
|
|
|
|
guest /sbin/sysctl -w net.ipv4.tcp_rmem="4096 131072 268435456"
|
|
|
|
guest /sbin/sysctl -w net.ipv4.tcp_wmem="4096 131072 268435456"
|
|
|
|
guest /sbin/sysctl -w net.ipv4.tcp_timestamps=0
|
|
|
|
|
2022-09-18 15:38:52 +02:00
|
|
|
ns /sbin/sysctl -w net.ipv4.tcp_rmem="4096 524288 134217728"
|
|
|
|
ns /sbin/sysctl -w net.ipv4.tcp_wmem="4096 524288 134217728"
|
|
|
|
ns /sbin/sysctl -w net.ipv4.tcp_timestamps=0
|
2021-09-27 15:10:35 +02:00
|
|
|
|
2022-06-10 04:32:42 +02:00
|
|
|
gout GW ip -j -4 route show|jq -rM '.[] | select(.dst == "default").gateway'
|
|
|
|
gout GW6 ip -j -6 route show|jq -rM '.[] | select(.dst == "default").gateway'
|
|
|
|
gout IFNAME ip -j link show | jq -rM '.[] | select(.link_type == "ether").ifname'
|
2021-09-27 15:10:35 +02:00
|
|
|
|
2021-10-21 01:24:22 +02:00
|
|
|
hout FREQ_PROCFS (echo "scale=1"; sed -n 's/cpu MHz.*: \([0-9]*\)\..*$/(\1+10^2\/2)\/10^3/p' /proc/cpuinfo) | bc -l | head -n1
|
|
|
|
hout FREQ_CPUFREQ (echo "scale=1"; printf '( %i + 10^5 / 2 ) / 10^6\n' $(cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq) ) | bc -l
|
|
|
|
hout FREQ [ -n "__FREQ_CPUFREQ__" ] && echo __FREQ_CPUFREQ__ || echo __FREQ_PROCFS__
|
2021-09-27 15:10:35 +02:00
|
|
|
|
2024-07-24 22:40:32 +02:00
|
|
|
set THREADS 4
|
2024-08-14 19:25:38 +02:00
|
|
|
set TIME 1
|
2023-11-06 08:08:33 +01:00
|
|
|
set OMIT 0.1
|
2024-07-24 22:40:32 +02:00
|
|
|
set OPTS -Z -P __THREADS__ -l 1M -O__OMIT__
|
2021-09-27 15:10:35 +02:00
|
|
|
|
2024-07-24 22:40:32 +02:00
|
|
|
info Throughput in Gbps, latency in µs, __THREADS__ threads at __FREQ__ GHz
|
2021-09-27 15:10:35 +02:00
|
|
|
report passt tcp __THREADS__ __FREQ__
|
|
|
|
|
|
|
|
th MTU 256B 576B 1280B 1500B 9000B 65520B
|
|
|
|
|
|
|
|
|
|
|
|
tr TCP throughput over IPv6: guest to host
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3s ns 10002
|
2023-11-06 08:08:28 +01:00
|
|
|
|
2021-09-27 15:10:35 +02:00
|
|
|
bw -
|
|
|
|
bw -
|
|
|
|
guest ip link set dev __IFNAME__ mtu 1280
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW6__%__IFNAME__ 10002 __TIME__ __OPTS__ -w 4M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 1.2 1.5
|
|
|
|
guest ip link set dev __IFNAME__ mtu 1500
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW6__%__IFNAME__ 10002 __TIME__ __OPTS__ -w 4M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 1.6 1.8
|
|
|
|
guest ip link set dev __IFNAME__ mtu 9000
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW6__%__IFNAME__ 10002 __TIME__ __OPTS__ -w 8M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 4.0 5.0
|
|
|
|
guest ip link set dev __IFNAME__ mtu 65520
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW6__%__IFNAME__ 10002 __TIME__ __OPTS__ -w 16M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 7.0 8.0
|
|
|
|
|
2023-11-06 08:08:28 +01:00
|
|
|
iperf3k ns
|
|
|
|
|
2021-09-27 15:10:35 +02:00
|
|
|
tl TCP RR latency over IPv6: guest to host
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
nsb tcp_rr --nolog -6
|
2024-08-14 19:25:38 +02:00
|
|
|
gout LAT tcp_rr --nolog -l1 -6 -c -H __GW6__%__IFNAME__ | sed -n 's/^throughput=\(.*\)/\1/p'
|
2021-09-27 15:10:35 +02:00
|
|
|
lat __LAT__ 200 150
|
|
|
|
|
|
|
|
tl TCP CRR latency over IPv6: guest to host
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
nsb tcp_crr --nolog -6
|
2024-08-14 19:25:38 +02:00
|
|
|
gout LAT tcp_crr --nolog -l1 -6 -c -H __GW6__%__IFNAME__ | sed -n 's/^throughput=\(.*\)/\1/p'
|
2021-09-27 15:10:35 +02:00
|
|
|
lat __LAT__ 500 400
|
|
|
|
|
|
|
|
tr TCP throughput over IPv4: guest to host
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3s ns 10002
|
2023-11-06 08:08:28 +01:00
|
|
|
|
2021-09-27 15:10:35 +02:00
|
|
|
guest ip link set dev __IFNAME__ mtu 256
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW__ 10002 __TIME__ __OPTS__ -w 1M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 0.2 0.3
|
|
|
|
guest ip link set dev __IFNAME__ mtu 576
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW__ 10002 __TIME__ __OPTS__ -w 1M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 0.5 0.8
|
|
|
|
guest ip link set dev __IFNAME__ mtu 1280
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW__ 10002 __TIME__ __OPTS__ -w 4M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 1.2 1.5
|
|
|
|
guest ip link set dev __IFNAME__ mtu 1500
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW__ 10002 __TIME__ __OPTS__ -w 4M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 1.6 1.8
|
|
|
|
guest ip link set dev __IFNAME__ mtu 9000
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW__ 10002 __TIME__ __OPTS__ -w 8M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 4.0 5.0
|
|
|
|
guest ip link set dev __IFNAME__ mtu 65520
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW guest __GW__ 10002 __TIME__ __OPTS__ -w 16M
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 7.0 8.0
|
|
|
|
|
2023-11-06 08:08:28 +01:00
|
|
|
iperf3k ns
|
|
|
|
|
2021-09-27 15:10:35 +02:00
|
|
|
tl TCP RR latency over IPv4: guest to host
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
nsb tcp_rr --nolog -4
|
2024-08-14 19:25:38 +02:00
|
|
|
gout LAT tcp_rr --nolog -l1 -4 -c -H __GW__ | sed -n 's/^throughput=\(.*\)/\1/p'
|
2021-09-27 15:10:35 +02:00
|
|
|
lat __LAT__ 200 150
|
|
|
|
|
|
|
|
tl TCP CRR latency over IPv4: guest to host
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
nsb tcp_crr --nolog -4
|
2024-08-14 19:25:38 +02:00
|
|
|
gout LAT tcp_crr --nolog -l1 -4 -c -H __GW__ | sed -n 's/^throughput=\(.*\)/\1/p'
|
2021-09-27 15:10:35 +02:00
|
|
|
lat __LAT__ 500 400
|
|
|
|
|
|
|
|
tr TCP throughput over IPv6: host to guest
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3s guest 10001
|
2023-11-06 08:08:28 +01:00
|
|
|
|
2021-09-27 15:10:35 +02:00
|
|
|
bw -
|
|
|
|
bw -
|
test/perf: "MTU" changes in passt_tcp host to guest aren't useful
The TCP packet size used on the passt L2 link (qemu socket) makes a huge
difference to passt/pasta throughput; many of passt's overheads (chiefly
syscalls) are per-packet.
That packet size is largely determined by the MTU on the L2 link, so we
benchmark for a number of different MTUs. That works well for the guest to
host transfers. For the host to guest transfers, we purport to test for
different MTUs, but we're not actually adjusting anything interesting.
The host to guest transfers adjust the MTU on the "host's" (actually ns)
loopback interface. However, that only affects the packet size for the
socket going to passt, not the packet size for the L2 link that passt
manages - passt can and will repack the stream into packets of its own
size. Since the depacketization on that socket is handled by the kernel it
doesn't have a lot of bearing on passt's performance.
We can't fix this by changing the L2 link MTU from the guest side (as we do
for guest to host), because that would only change the guest's view of the
MTU, passt would still think it has the large MTU. We could test this by
using the --mtu option to passt, but that would require restarting passt
for each run, which is awkward in the current setup. So, for now, drop all
the "small MTU" tests for host to guest.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2023-11-06 08:08:31 +01:00
|
|
|
bw -
|
|
|
|
bw -
|
|
|
|
bw -
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW ns ::1 10001 __TIME__ __OPTS__
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 6.0 6.8
|
|
|
|
|
2023-11-06 08:08:28 +01:00
|
|
|
iperf3k guest
|
|
|
|
|
2021-09-27 15:10:35 +02:00
|
|
|
tl TCP RR latency over IPv6: host to guest
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
guestb tcp_rr --nolog -P 10001 -C 10011 -6
|
2022-09-22 16:40:28 +02:00
|
|
|
sleep 1
|
2024-08-14 19:25:38 +02:00
|
|
|
nsout LAT tcp_rr --nolog -l1 -P 10001 -C 10011 -6 -c -H ::1 | sed -n 's/^throughput=\(.*\)/\1/p'
|
2021-09-27 15:10:35 +02:00
|
|
|
lat __LAT__ 200 150
|
|
|
|
|
|
|
|
tl TCP CRR latency over IPv6: host to guest
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
guestb tcp_crr --nolog -P 10001 -C 10011 -6
|
2022-09-22 16:40:28 +02:00
|
|
|
sleep 1
|
2024-08-14 19:25:38 +02:00
|
|
|
nsout LAT tcp_crr --nolog -l1 -P 10001 -C 10011 -6 -c -H ::1 | sed -n 's/^throughput=\(.*\)/\1/p'
|
2021-09-27 15:10:35 +02:00
|
|
|
lat __LAT__ 500 350
|
|
|
|
|
|
|
|
|
|
|
|
tr TCP throughput over IPv4: host to guest
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3s guest 10001
|
2023-11-06 08:08:28 +01:00
|
|
|
|
test/perf: "MTU" changes in passt_tcp host to guest aren't useful
The TCP packet size used on the passt L2 link (qemu socket) makes a huge
difference to passt/pasta throughput; many of passt's overheads (chiefly
syscalls) are per-packet.
That packet size is largely determined by the MTU on the L2 link, so we
benchmark for a number of different MTUs. That works well for the guest to
host transfers. For the host to guest transfers, we purport to test for
different MTUs, but we're not actually adjusting anything interesting.
The host to guest transfers adjust the MTU on the "host's" (actually ns)
loopback interface. However, that only affects the packet size for the
socket going to passt, not the packet size for the L2 link that passt
manages - passt can and will repack the stream into packets of its own
size. Since the depacketization on that socket is handled by the kernel it
doesn't have a lot of bearing on passt's performance.
We can't fix this by changing the L2 link MTU from the guest side (as we do
for guest to host), because that would only change the guest's view of the
MTU, passt would still think it has the large MTU. We could test this by
using the --mtu option to passt, but that would require restarting passt
for each run, which is awkward in the current setup. So, for now, drop all
the "small MTU" tests for host to guest.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2023-11-06 08:08:31 +01:00
|
|
|
bw -
|
|
|
|
bw -
|
|
|
|
bw -
|
|
|
|
bw -
|
|
|
|
bw -
|
2024-07-24 22:40:32 +02:00
|
|
|
iperf3 BW ns 127.0.0.1 10001 __TIME__ __OPTS__
|
2021-09-27 15:10:35 +02:00
|
|
|
bw __BW__ 6.0 6.8
|
|
|
|
|
2023-11-06 08:08:28 +01:00
|
|
|
iperf3k guest
|
|
|
|
|
2021-09-27 15:10:35 +02:00
|
|
|
tl TCP RR latency over IPv4: host to guest
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
guestb tcp_rr --nolog -P 10001 -C 10011 -4
|
2022-09-22 16:40:28 +02:00
|
|
|
sleep 1
|
2024-08-14 19:25:38 +02:00
|
|
|
nsout LAT tcp_rr --nolog -l1 -P 10001 -C 10011 -4 -c -H 127.0.0.1 | sed -n 's/^throughput=\(.*\)/\1/p'
|
2021-09-27 15:10:35 +02:00
|
|
|
lat __LAT__ 200 150
|
|
|
|
|
|
|
|
tl TCP CRR latency over IPv6: host to guest
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
lat -
|
|
|
|
guestb tcp_crr --nolog -P 10001 -C 10011 -4
|
2022-09-22 16:40:28 +02:00
|
|
|
sleep 1
|
2024-08-14 19:25:38 +02:00
|
|
|
nsout LAT tcp_crr --nolog -l1 -P 10001 -C 10011 -4 -c -H 127.0.0.1 | sed -n 's/^throughput=\(.*\)/\1/p'
|
2021-09-27 15:10:35 +02:00
|
|
|
lat __LAT__ 500 300
|
|
|
|
|
|
|
|
te
|