passt/test/lib/test

401 lines
9.5 KiB
Text
Raw Normal View History

#!/bin/sh
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# PASST - Plug A Simple Socket Transport
# for qemu/UNIX domain socket mode
#
# PASTA - Pack A Subtle Tap Abstraction
# for network namespace/tap device mode
#
# test/lib/test - List tests and run them, evaluating directives from files
#
# Copyright (c) 2021 Red Hat GmbH
# Author: Stefano Brivio <sbrivio@redhat.com>
# test_iperf3s() - Start iperf3 server
# $1: Destination/server context
# $2: Port number
test_iperf3s() {
__sctx="${1}"
__port="${2}"
pane_or_context_run_bg "${__sctx}" \
'iperf3 -s -p'${__port}' & echo $! > s.pid' \
sleep 1 # Wait for server to be ready
}
# test_iperf3k() - Kill iperf3 server
# $1: Destination/server context
test_iperf3k() {
__sctx="${1}"
pane_or_context_run "${__sctx}" 'kill -INT $(cat s.pid); rm s.pid'
sleep 1 # Wait for kernel to free up ports
}
# test_iperf3() - Ugly helper for iperf3 directive
# $1: Variable name: to put the measure bandwidth into
# $2: Source/client context
# $3: Destination name or address for client
# $4: Port number, ${i} is translated to process index
# $5: Run time, in seconds
# $@: Client options
test_iperf3() {
__var="${1}"; shift
__cctx="${1}"; shift
__dest="${1}"; shift
__port="${1}"; shift
__time="${1}"; shift
pane_or_context_run "${__cctx}" 'rm -f c.json'
test/perf: Get iperf3 stats from client side iperf3 generates statistics about its run on both the client and server sides. They don't have exactly the same information, but both have the pieces we need (AFAICT the server communicates some nformation to the client over the control socket, so the most important information is in the client side output, even if measured by the server). Currently we use the server side information for our measurements. Using the client side information has several advantages though: * We can directly wait for the client to complete and we know we'll have the output we want. We don't need to sleep to give the server time to write out the results. * That in turn means we can wrap up as soon as the client is done, we don't need to wait overlong to make sure everything is finished. * The slightly different organisation of the data in the client output means that we always want the same json value, rather than requiring slightly different onces for UDP and TCP. The fact that we avoid some extra delays speeds up the overal run of the perf tests by around 7 minutes (out of around 35 minutes) on my laptop. The fact that we no longer unconditionally kill client and server after a certain time means that the client could run indefinitely if the server doesn't respond. We mitigate that by setting 1s connect timeout on the client. This isn't foolproof - if we get an initial response, but then lose connectivity this could still run indefinitely, however it does cover by far the most likely failure cases. --snd-timeout would provide more robustness, but I've hit odd failures when trying to use it. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2023-11-06 08:08:27 +01:00
# A 1s wait for connection on what's basically a local link
# indicates something is pretty wrong
__timeout=1000
pane_or_context_run "${__cctx}" \
'iperf3 -J -c '${__dest}' -p '${__port} \
' --connect-timeout '${__timeout} \
' -t'${__time}' -i0 '"${@}"' > c.json' \
__jval=".end.sum_received.bits_per_second"
test/perf: Get iperf3 stats from client side iperf3 generates statistics about its run on both the client and server sides. They don't have exactly the same information, but both have the pieces we need (AFAICT the server communicates some nformation to the client over the control socket, so the most important information is in the client side output, even if measured by the server). Currently we use the server side information for our measurements. Using the client side information has several advantages though: * We can directly wait for the client to complete and we know we'll have the output we want. We don't need to sleep to give the server time to write out the results. * That in turn means we can wrap up as soon as the client is done, we don't need to wait overlong to make sure everything is finished. * The slightly different organisation of the data in the client output means that we always want the same json value, rather than requiring slightly different onces for UDP and TCP. The fact that we avoid some extra delays speeds up the overal run of the perf tests by around 7 minutes (out of around 35 minutes) on my laptop. The fact that we no longer unconditionally kill client and server after a certain time means that the client could run indefinitely if the server doesn't respond. We mitigate that by setting 1s connect timeout on the client. This isn't foolproof - if we get an initial response, but then lose connectivity this could still run indefinitely, however it does cover by far the most likely failure cases. --snd-timeout would provide more robustness, but I've hit odd failures when trying to use it. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2023-11-06 08:08:27 +01:00
__bw=$(pane_or_context_output "${__cctx}" \
'cat c.json | jq -rMs "map('${__jval}') | add"')
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__var}__" "${__bw}" )"
}
test_one_line() {
__line="${1}"
[ ${DEBUG} -eq 1 ] && info DEBUG: "${__line}"
# Strip comments
__line="${__line%%#*}"
if [ -n "${TEST_ONE_in_def}" ]; then
[ "${__line}" = "endef" ] && TEST_ONE_in_def= && return
# Append $__line to variable TEST_ONE_DEF_<definition name>
__ifs="${IFS}"
IFS=
eval TEST_ONE_DEF_$TEST_ONE_in_def=\"\$\(printf \"%s\\n%s\" \"\$TEST_ONE_DEF_$TEST_ONE_in_def\" \"$__line\"\)\"
IFS="${__ifs}"
return
fi
# tab-split command and arguments, apply variable substitutions
__cmd="${__line%%$(printf '\t')*}"
__arg="${__line#*$(printf '\t')*}"
__arg="$(subs_apply "${TEST_ONE_subs}" "${__arg}")"
[ ${TEST_ONE_nok} -eq 1 ] && [ "${__cmd}" != "test" ] && continue
case ${__cmd} in
"def")
TEST_ONE_in_def="${__arg}"
# Clear variable TEST_ONE_DEF_<definition name>
__ifs="${IFS}"
IFS= eval TEST_ONE_DEF_$TEST_ONE_in_def=
IFS="${__ifs}"
;;
"test")
[ ${TEST_ONE_perf_nok} -eq 0 ] || TEST_ONE_nok=1
[ ${TEST_ONE_nok} -eq 1 ] && status_test_fail
[ ${TEST_ONE_nok} -eq 0 ] && status_test_ok
status_test_start "${__arg}"
TEST_ONE_nok=0
TEST_ONE_perf_nok=0
;;
"host")
pane_or_context_run host "${__arg}" || TEST_ONE_nok=1
;;
"hostb")
pane_or_context_run_bg host "${__arg}"
;;
"hostw")
pane_or_context_wait host || TEST_ONE_nok=1
;;
"hint")
tmux send-keys -t ${PANE_HOST} "C-c"
;;
"htools")
pane_or_context_run host 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
;;
"passt")
pane_or_context_run passt "${__arg}" || TEST_ONE_nok=1
;;
"passtb")
pane_or_context_run_bg passt "${__arg}"
;;
"passtw")
pane_or_context_wait passt || TEST_ONE_nok=1
;;
test: Add log file tests for pasta plus corresponding layout and setup To test log files on a tmpfs mount, we need to unshare the mount namespace, which means using a context for the passt pane is not really practical at the moment, as we can't open a shell there, so we would have to encapsulate all the commands under 'unshare -rUm', plus the "inner" pasta command, running in turn a tcp_rr server. It might be worth fixing this by e.g. detecting we are trying to spawn an interactive shell and adding a special path in the context setup with some form of stdin redirection -- I'm not sure it's doable though. For this reason, add a new layout, using a context only for the host pane, while keeping the old command dispatch mechanism for the passt pane. We also need a new setup function that doesn't start pasta: we want to start and restart it with different options. Further, we need a 'pint' directive, to send an interrupt to the passt pane: add that in lib/test. All the tests before the one involving tmpfs and a detached mount namespace were also tested with the context mechanism. To make an eventual conversion easier, pass tcp_crr directly as a command on pasta's command line where feasible. While at it, fix the comment to the teardown_pasta() function. The new test set can be semi-conveniently run as: ./run pasta_options/log_to_file and it checks basic log creation, size of the log file after flooding it with debug entries, rotations, and basic consistency after rotations, on both an existing filesystem and a tmpfs, chosen as it doesn't support collapsing data ranges via fallocate(), hence triggering the fall-back mechanism for logging rotation. Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-10-07 02:16:08 +02:00
"pint")
tmux send-keys -t ${PANE_PASST} "C-c"
;;
"pout")
__varname="${__arg%% *}"
__output="$(pane_or_context_output passt "${__arg#* }")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
;;
"guest")
pane_or_context_run guest "${__arg}" || TEST_ONE_nok=1
;;
"guestb")
pane_or_context_run_bg guest "${__arg}"
;;
"guestw")
pane_or_context_wait guest || TEST_ONE_nok=1
;;
"guest1")
pane_or_context_run guest_1 "${__arg}" || TEST_ONE_nok=1
;;
"guest1b")
pane_or_context_run_bg guest_1 "${__arg}"
;;
"guest1w")
pane_or_context_wait guest_1 || TEST_ONE_nok=1
;;
"gtools")
pane_or_context_run guest 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
;;
"g1tools")
pane_or_context_run guest_1 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
;;
"g2tools")
pane_or_context_run guest_2 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
;;
"guest2")
pane_or_context_run guest_2 "${__arg}" || TEST_ONE_nok=1
;;
"guest2b")
pane_or_context_run_bg guest_2 "${__arg}"
;;
"guest2w")
pane_or_context_wait guest_2 || TEST_ONE_nok=1
;;
"ns")
pane_or_context_run ns "${__arg}" || TEST_ONE_nok=1
;;
"ns1")
pane_or_context_run ns1 "${__arg}" || TEST_ONE_nok=1
;;
"ns2")
pane_or_context_run ns2 "${__arg}" || TEST_ONE_nok=1
;;
"nsb")
pane_or_context_run_bg ns "${__arg}"
;;
"ns1b")
pane_or_context_run_bg ns1 "${__arg}"
;;
"ns2b")
pane_or_context_run_bg ns2 "${__arg}"
;;
"nsw")
pane_or_context_wait ns || TEST_ONE_nok=1
;;
"ns1w")
pane_or_context_wait ns1 || TEST_ONE_nok=1
;;
"ns2w")
pane_or_context_wait ns2 || TEST_ONE_nok=1
;;
"nstools")
pane_or_context_run ns 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
;;
"gout")
__varname="${__arg%% *}"
__output="$(pane_or_context_output guest "${__arg#* }")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
;;
"g1out")
__varname="${__arg%% *}"
__output="$(pane_or_context_output guest_1 "${__arg#* }")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
;;
"g2out")
__varname="${__arg%% *}"
__output="$(pane_or_context_output guest_2 "${__arg#* }")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
;;
"hout")
__varname="${__arg%% *}"
__output="$(pane_or_context_output host "${__arg#* }")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
;;
"nsout")
__varname="${__arg%% *}"
__output="$(pane_or_context_output ns "${__arg#* }")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
;;
"ns1out")
__varname="${__arg%% *}"
__output="$(pane_or_context_output ns1 "${__arg#* }")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
;;
"ns2out")
__varname="${__arg%% *}"
__output="$(pane_or_context_output ns2 "${__arg#* }")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
;;
"check")
info_check "${__arg}"
__nok=0
eval "${__arg} || __nok=1"
if [ ${__nok} -eq 1 ]; then
TEST_ONE_nok=1
info_check_failed
else
info_check_passed
fi
;;
"sleep")
sleep "${__arg}"
;;
"info")
info "${__arg}"
;;
"report")
perf_report ${__arg}
;;
"th")
table_header ${__arg}
;;
"tr")
table_row "${__arg}"
;;
"tl")
table_line "${__arg}"
;;
"te")
table_end
;;
"td")
table_value ${__arg} || TEST_ONE_perf_nok=1
;;
"bw")
table_value_throughput ${__arg} || TEST_ONE_perf_nok=1
;;
"lat")
table_value_latency ${__arg} || TEST_ONE_perf_nok=1
;;
"iperf3s")
test_iperf3s ${__arg}
;;
"iperf3k")
test_iperf3k ${__arg}
;;
"iperf3")
test_iperf3 ${__arg}
;;
"set")
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__arg%% *}__" "${__arg#* }")"
;;
# Demo commands
"say")
text_write "${__arg}"
;;
"em")
em_write "${__arg}"
;;
"nl")
info_nolog ""
;;
"hl")
pane_highlight "${__arg}"
;;
"bsp")
text_backspace "${__arg}"
;;
"killp")
pane_kill "${__arg}"
;;
"resize")
pane_resize ${__arg}
;;
*)
__def_body="$(eval printf \"\$TEST_ONE_DEF_$__cmd\")"
if [ -n "${__def_body}" ]; then
__ifs="${IFS}"
IFS='
'
for __def_line in ${__def_body}; do
IFS="${__ifs}" test_one_line "${__def_line}"
done
IFS="${__ifs}"
fi
;;
esac
}
# test_one() - Run a single test file evaluating directives
# $1: Name of test file, relative to test/ directory
test_one() {
TEST_ONE_dirclean=
__test_file="test/${1}"
__type="$(file -b --mime-type ${__test_file})"
if [ "${__type}" = "text/x-shellscript" ]; then
status_file_start "${1}" 1
"${__test_file}" && status_test_ok || status_test_fail
return
fi
if [ ${DEMO} -eq 0 ]; then
__ntests="$(grep -c "^test$(printf '\t')" "${__test_file}")"
status_file_start "${1}" "${__ntests}"
fi
[ ${CI} -eq 1 ] && video_link "${1}"
TEST_ONE_subs="$(list_add_pair "" "__BASEPATH__" "${BASEPATH}")"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__STATESETUP__" "${STATESETUP}")"
STATEDIR="${STATEBASE}/${1}"
mkdir -p "${STATEDIR}"
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__STATEDIR__" "${STATEDIR}")"
TEST_ONE_nok=-1
TEST_ONE_perf_nok=0
TEST_ONE_skip=0
TEST_ONE_in_def=
while IFS= read -r __line; do
test_one_line "${__line}"
[ ${TEST_ONE_skip} -eq 1 ] && break
done < "${__test_file}"
for __d in ${TEST_ONE_dirclean}; do
rm -rf ${__d}
done
[ ${DEMO} -eq 1 ] && return
[ ${TEST_ONE_skip} -eq 1 ] && status_test_skip && return
[ ${TEST_ONE_perf_nok} -eq 0 ] || TEST_ONE_nok=1
[ ${TEST_ONE_nok} -eq 0 ] && status_test_ok || status_test_fail
}
# test() - Build list of tests to run, in order, then issue test_one()
# $@: Test files to run, relative to test/
test() {
__list=
cd test
for __f; do
__type="$(file -b --mime-type ${__f})"
if [ "${__type}" = "text/x-shellscript" ]; then
__list="$(list_add "${__list}" "${__f}")"
continue
fi
__list="$(list_add "${__list}" "${__f}")"
done
cd ..
for __f in ${__list}; do
test_one "${__f}"
done
}