2021-09-27 15:10:35 +02:00
|
|
|
#!/bin/sh
|
|
|
|
#
|
passt: Relicense to GPL 2.0, or any later version
In practical terms, passt doesn't benefit from the additional
protection offered by the AGPL over the GPL, because it's not
suitable to be executed over a computer network.
Further, restricting the distribution under the version 3 of the GPL
wouldn't provide any practical advantage either, as long as the passt
codebase is concerned, and might cause unnecessary compatibility
dilemmas.
Change licensing terms to the GNU General Public License Version 2,
or any later version, with written permission from all current and
past contributors, namely: myself, David Gibson, Laine Stump, Andrea
Bolognani, Paul Holzinger, Richard W.M. Jones, Chris Kuhn, Florian
Weimer, Giuseppe Scrivano, Stefan Hajnoczi, and Vasiliy Ulyanov.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2023-04-05 20:11:44 +02:00
|
|
|
# SPDX-License-Identifier: GPL-2.0-or-later
|
2021-09-27 15:10:35 +02:00
|
|
|
#
|
|
|
|
# PASST - Plug A Simple Socket Transport
|
|
|
|
# for qemu/UNIX domain socket mode
|
|
|
|
#
|
|
|
|
# PASTA - Pack A Subtle Tap Abstraction
|
|
|
|
# for network namespace/tap device mode
|
|
|
|
#
|
|
|
|
# test/lib/test - List tests and run them, evaluating directives from files
|
|
|
|
#
|
|
|
|
# Copyright (c) 2021 Red Hat GmbH
|
|
|
|
# Author: Stefano Brivio <sbrivio@redhat.com>
|
|
|
|
|
2022-09-02 04:04:32 +02:00
|
|
|
# test_iperf3() - Ugly helper for iperf3 directive
|
|
|
|
# $1: Variable name: to put the measure bandwidth into
|
2022-09-12 12:56:17 +02:00
|
|
|
# $2: Source/client context
|
|
|
|
# $3: Destination/server context
|
2022-09-02 04:04:32 +02:00
|
|
|
# $4: Destination name or address for client
|
|
|
|
# $5: Port number, ${i} is translated to process index
|
|
|
|
# $6: Number of processes to run in parallel
|
2022-09-02 04:04:33 +02:00
|
|
|
# $7: Run time, in seconds
|
2022-09-02 04:04:32 +02:00
|
|
|
# $@: Client options
|
2021-09-27 15:10:35 +02:00
|
|
|
test_iperf3() {
|
2022-09-02 04:04:32 +02:00
|
|
|
__var="${1}"; shift
|
2022-09-12 12:56:17 +02:00
|
|
|
__cctx="${1}"; shift
|
|
|
|
__sctx="${1}"; shift
|
2022-09-02 04:04:32 +02:00
|
|
|
__dest="${1}"; shift
|
2021-09-27 15:10:35 +02:00
|
|
|
__port="${1}"; shift
|
|
|
|
__procs="$((${1} - 1))"; shift
|
2022-09-02 04:04:33 +02:00
|
|
|
__time="${1}"; shift
|
2021-09-27 15:10:35 +02:00
|
|
|
|
2022-11-16 14:17:05 +01:00
|
|
|
pane_or_context_run "${__sctx}" 'rm -f s*.json'
|
|
|
|
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg "${__sctx}" \
|
test/lib: Wait on iperf3 clients to be done, then send SIGINT to servers
An iperf3 client might fail to send the control message indicating
the end of the test, if the kernel buffer doesn't accept it, and exit
without having sent it, as the control socket is non-blocking. Should
this happen, the server will just wait forever for this message,
instead of terminating.
Restore some of the behaviour that went away with the
"test: Rewrite test_iperf3" patch: instead of waiting on servers to
terminate, wait on the clients. When they are done, wait 2 seconds,
and then send SIGINT to the servers, which make them still write
out the JSON report before terminating.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-09-21 01:00:39 +02:00
|
|
|
'for i in $(seq 0 '${__procs}'); do' \
|
|
|
|
' (iperf3 -s1J -p'${__port}' -i'${__time} \
|
|
|
|
' > s${i}.json) &' \
|
|
|
|
' echo $! > s${i}.pid &' \
|
|
|
|
'done' \
|
2022-09-02 04:04:34 +02:00
|
|
|
|
2022-09-12 12:56:17 +02:00
|
|
|
sleep 1 # Wait for server to be ready
|
|
|
|
|
test/lib: Run also iperf3 clients in background, revert to time-based wait
Unfortunately, this partially counters recent efforts by David to
speed up these tests, but it looks like iperf3 clients don't reliably
terminate, in some rare cases I couldn't isolate yet.
For the moment being, reintroduce the time-based wait approach, now
using the configurable test duration, and terminate the servers at
the end of it, in case they're stuck. There's no point in keeping
the 'sleep 2' later, so drop that, and while at it, make sure that
the stuck servers have time to flush the JSON output before we use
it.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-09-22 16:27:27 +02:00
|
|
|
pane_or_context_run_bg "${__cctx}" \
|
2022-09-02 04:04:34 +02:00
|
|
|
'(' \
|
|
|
|
' for i in $(seq 0 '${__procs}'); do' \
|
|
|
|
' iperf3 -c '${__dest}' -p '${__port} \
|
2022-09-21 01:21:32 +02:00
|
|
|
' -t'${__time}' -i0 -T s${i} '"${@}"' &' \
|
2022-09-02 04:04:34 +02:00
|
|
|
' done;' \
|
|
|
|
' wait' \
|
|
|
|
')'
|
|
|
|
|
test/lib: Run also iperf3 clients in background, revert to time-based wait
Unfortunately, this partially counters recent efforts by David to
speed up these tests, but it looks like iperf3 clients don't reliably
terminate, in some rare cases I couldn't isolate yet.
For the moment being, reintroduce the time-based wait approach, now
using the configurable test duration, and terminate the servers at
the end of it, in case they're stuck. There's no point in keeping
the 'sleep 2' later, so drop that, and while at it, make sure that
the stuck servers have time to flush the JSON output before we use
it.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-09-22 16:27:27 +02:00
|
|
|
sleep $((__time + 5))
|
|
|
|
|
test/lib: Wait on iperf3 clients to be done, then send SIGINT to servers
An iperf3 client might fail to send the control message indicating
the end of the test, if the kernel buffer doesn't accept it, and exit
without having sent it, as the control socket is non-blocking. Should
this happen, the server will just wait forever for this message,
instead of terminating.
Restore some of the behaviour that went away with the
"test: Rewrite test_iperf3" patch: instead of waiting on servers to
terminate, wait on the clients. When they are done, wait 2 seconds,
and then send SIGINT to the servers, which make them still write
out the JSON report before terminating.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-09-21 01:00:39 +02:00
|
|
|
# If client fails to deliver control message, tell server we're done
|
test/lib: Run also iperf3 clients in background, revert to time-based wait
Unfortunately, this partially counters recent efforts by David to
speed up these tests, but it looks like iperf3 clients don't reliably
terminate, in some rare cases I couldn't isolate yet.
For the moment being, reintroduce the time-based wait approach, now
using the configurable test duration, and terminate the servers at
the end of it, in case they're stuck. There's no point in keeping
the 'sleep 2' later, so drop that, and while at it, make sure that
the stuck servers have time to flush the JSON output before we use
it.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-09-22 16:27:27 +02:00
|
|
|
pane_or_context_run "${__sctx}" 'kill -INT $(cat s*.pid); rm s*.pid'
|
|
|
|
|
|
|
|
sleep 1 # ...and wait for output to be flushed
|
2022-09-02 04:04:32 +02:00
|
|
|
|
2022-09-02 04:04:34 +02:00
|
|
|
__jval=".end.sum_received.bits_per_second"
|
2022-09-02 04:04:32 +02:00
|
|
|
for __opt in ${@}; do
|
2022-09-02 04:04:34 +02:00
|
|
|
# UDP test
|
|
|
|
[ "${__opt}" = "-u" ] && __jval=".intervals[0].sum.bits_per_second"
|
2022-09-02 04:04:32 +02:00
|
|
|
done
|
2021-09-27 15:10:35 +02:00
|
|
|
|
2022-09-12 12:56:17 +02:00
|
|
|
__bw=$(pane_or_context_output "${__sctx}" \
|
|
|
|
'cat s*.json | jq -rMs "map('${__jval}') | add"')
|
2022-09-02 04:04:32 +02:00
|
|
|
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__var}__" "${__bw}" )"
|
2022-09-22 16:37:03 +02:00
|
|
|
|
|
|
|
sleep 3 # Wait for kernel to free up ports
|
2021-09-27 15:10:35 +02:00
|
|
|
}
|
|
|
|
|
2022-01-26 07:42:05 +01:00
|
|
|
test_one_line() {
|
|
|
|
__line="${1}"
|
|
|
|
|
2022-05-12 04:00:49 +02:00
|
|
|
[ ${DEBUG} -eq 1 ] && info DEBUG: "${__line}"
|
|
|
|
|
2022-01-26 07:42:05 +01:00
|
|
|
# Strip comments
|
|
|
|
__line="${__line%%#*}"
|
|
|
|
|
|
|
|
if [ -n "${TEST_ONE_in_def}" ]; then
|
|
|
|
[ "${__line}" = "endef" ] && TEST_ONE_in_def= && return
|
|
|
|
# Append $__line to variable TEST_ONE_DEF_<definition name>
|
|
|
|
__ifs="${IFS}"
|
|
|
|
IFS=
|
|
|
|
eval TEST_ONE_DEF_$TEST_ONE_in_def=\"\$\(printf \"%s\\n%s\" \"\$TEST_ONE_DEF_$TEST_ONE_in_def\" \"$__line\"\)\"
|
|
|
|
IFS="${__ifs}"
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
|
|
|
# tab-split command and arguments, apply variable substitutions
|
|
|
|
__cmd="${__line%%$(printf '\t')*}"
|
|
|
|
__arg="${__line#*$(printf '\t')*}"
|
|
|
|
__arg="$(subs_apply "${TEST_ONE_subs}" "${__arg}")"
|
|
|
|
|
|
|
|
[ ${TEST_ONE_nok} -eq 1 ] && [ "${__cmd}" != "test" ] && continue
|
|
|
|
case ${__cmd} in
|
|
|
|
"def")
|
|
|
|
TEST_ONE_in_def="${__arg}"
|
|
|
|
# Clear variable TEST_ONE_DEF_<definition name>
|
|
|
|
__ifs="${IFS}"
|
|
|
|
IFS= eval TEST_ONE_DEF_$TEST_ONE_in_def=
|
|
|
|
IFS="${__ifs}"
|
|
|
|
;;
|
|
|
|
"test")
|
|
|
|
[ ${TEST_ONE_perf_nok} -eq 0 ] || TEST_ONE_nok=1
|
|
|
|
[ ${TEST_ONE_nok} -eq 1 ] && status_test_fail
|
|
|
|
[ ${TEST_ONE_nok} -eq 0 ] && status_test_ok
|
|
|
|
|
|
|
|
status_test_start "${__arg}"
|
|
|
|
TEST_ONE_nok=0
|
|
|
|
TEST_ONE_perf_nok=0
|
|
|
|
;;
|
|
|
|
"host")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run host "${__arg}" || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"hostb")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg host "${__arg}"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"hostw")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_wait host || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"hint")
|
|
|
|
tmux send-keys -t ${PANE_HOST} "C-c"
|
|
|
|
;;
|
|
|
|
"htools")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run host 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"passt")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run passt "${__arg}" || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"passtb")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg passt "${__arg}"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"passtw")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_wait passt || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
test: Add log file tests for pasta plus corresponding layout and setup
To test log files on a tmpfs mount, we need to unshare the mount
namespace, which means using a context for the passt pane is not
really practical at the moment, as we can't open a shell there, so
we would have to encapsulate all the commands under 'unshare -rUm',
plus the "inner" pasta command, running in turn a tcp_rr server.
It might be worth fixing this by e.g. detecting we are trying to
spawn an interactive shell and adding a special path in the context
setup with some form of stdin redirection -- I'm not sure it's doable
though.
For this reason, add a new layout, using a context only for the host
pane, while keeping the old command dispatch mechanism for the passt
pane.
We also need a new setup function that doesn't start pasta: we want
to start and restart it with different options.
Further, we need a 'pint' directive, to send an interrupt to the
passt pane: add that in lib/test.
All the tests before the one involving tmpfs and a detached mount
namespace were also tested with the context mechanism. To make an
eventual conversion easier, pass tcp_crr directly as a command on
pasta's command line where feasible.
While at it, fix the comment to the teardown_pasta() function.
The new test set can be semi-conveniently run as:
./run pasta_options/log_to_file
and it checks basic log creation, size of the log file after flooding
it with debug entries, rotations, and basic consistency after
rotations, on both an existing filesystem and a tmpfs, chosen as
it doesn't support collapsing data ranges via fallocate(), hence
triggering the fall-back mechanism for logging rotation.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2022-10-07 02:16:08 +02:00
|
|
|
"pint")
|
|
|
|
tmux send-keys -t ${PANE_PASST} "C-c"
|
|
|
|
;;
|
2022-01-26 07:42:05 +01:00
|
|
|
"pout")
|
|
|
|
__varname="${__arg%% *}"
|
2022-09-12 12:56:17 +02:00
|
|
|
__output="$(pane_or_context_output passt "${__arg#* }")"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guest")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run guest "${__arg}" || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guestb")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg guest "${__arg}"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guestw")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_wait guest || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guest1")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run guest_1 "${__arg}" || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guest1b")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg guest_1 "${__arg}"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guest1w")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_wait guest_1 || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"gtools")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run guest 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"g1tools")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run guest_1 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"g2tools")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run guest_2 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guest2")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run guest_2 "${__arg}" || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guest2b")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg guest_2 "${__arg}"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"guest2w")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_wait guest_2 || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"ns")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run ns "${__arg}" || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
2022-02-21 13:35:45 +01:00
|
|
|
"ns1")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run ns1 "${__arg}" || TEST_ONE_nok=1
|
2022-02-21 13:35:45 +01:00
|
|
|
;;
|
|
|
|
"ns2")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run ns2 "${__arg}" || TEST_ONE_nok=1
|
2022-02-21 13:35:45 +01:00
|
|
|
;;
|
2022-01-26 07:42:05 +01:00
|
|
|
"nsb")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg ns "${__arg}"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
2022-02-21 13:35:45 +01:00
|
|
|
"ns1b")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg ns1 "${__arg}"
|
2022-02-21 13:35:45 +01:00
|
|
|
;;
|
|
|
|
"ns2b")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run_bg ns2 "${__arg}"
|
2022-02-21 13:35:45 +01:00
|
|
|
;;
|
2022-01-26 07:42:05 +01:00
|
|
|
"nsw")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_wait ns || TEST_ONE_nok=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
2022-02-21 13:35:45 +01:00
|
|
|
"ns1w")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_wait ns1 || TEST_ONE_nok=1
|
2022-02-21 13:35:45 +01:00
|
|
|
;;
|
|
|
|
"ns2w")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_wait ns2 || TEST_ONE_nok=1
|
2022-02-21 13:35:45 +01:00
|
|
|
;;
|
2022-01-26 07:42:05 +01:00
|
|
|
"nstools")
|
2022-09-12 12:56:17 +02:00
|
|
|
pane_or_context_run ns 'which '"${__arg}"' >/dev/null' || TEST_ONE_skip=1
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"gout")
|
|
|
|
__varname="${__arg%% *}"
|
2022-09-12 12:56:17 +02:00
|
|
|
__output="$(pane_or_context_output guest "${__arg#* }")"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"g1out")
|
|
|
|
__varname="${__arg%% *}"
|
2022-09-12 12:56:17 +02:00
|
|
|
__output="$(pane_or_context_output guest_1 "${__arg#* }")"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"g2out")
|
|
|
|
__varname="${__arg%% *}"
|
2022-09-12 12:56:17 +02:00
|
|
|
__output="$(pane_or_context_output guest_2 "${__arg#* }")"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"hout")
|
|
|
|
__varname="${__arg%% *}"
|
2022-09-12 12:56:17 +02:00
|
|
|
__output="$(pane_or_context_output host "${__arg#* }")"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"nsout")
|
|
|
|
__varname="${__arg%% *}"
|
2022-09-12 12:56:17 +02:00
|
|
|
__output="$(pane_or_context_output ns "${__arg#* }")"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
2022-02-21 13:35:45 +01:00
|
|
|
"ns1out")
|
|
|
|
__varname="${__arg%% *}"
|
2022-09-12 12:56:17 +02:00
|
|
|
__output="$(pane_or_context_output ns1 "${__arg#* }")"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
|
2022-02-21 13:35:45 +01:00
|
|
|
;;
|
|
|
|
"ns2out")
|
|
|
|
__varname="${__arg%% *}"
|
2022-09-12 12:56:17 +02:00
|
|
|
__output="$(pane_or_context_output ns2 "${__arg#* }")"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__varname}__" "${__output}")"
|
2022-02-21 13:35:45 +01:00
|
|
|
;;
|
2022-01-26 07:42:05 +01:00
|
|
|
"check")
|
|
|
|
info_check "${__arg}"
|
|
|
|
__nok=0
|
|
|
|
eval "${__arg} || __nok=1"
|
|
|
|
if [ ${__nok} -eq 1 ]; then
|
|
|
|
TEST_ONE_nok=1
|
|
|
|
info_check_failed
|
|
|
|
else
|
|
|
|
info_check_passed
|
|
|
|
fi
|
|
|
|
;;
|
|
|
|
"sleep")
|
|
|
|
sleep "${__arg}"
|
|
|
|
;;
|
|
|
|
"info")
|
|
|
|
info "${__arg}"
|
|
|
|
;;
|
|
|
|
"report")
|
|
|
|
perf_report ${__arg}
|
|
|
|
;;
|
|
|
|
"th")
|
|
|
|
table_header ${__arg}
|
|
|
|
;;
|
|
|
|
"tr")
|
|
|
|
table_row "${__arg}"
|
|
|
|
;;
|
|
|
|
"tl")
|
|
|
|
table_line "${__arg}"
|
|
|
|
;;
|
|
|
|
"te")
|
|
|
|
table_end
|
|
|
|
;;
|
2022-10-31 10:41:40 +01:00
|
|
|
"td")
|
|
|
|
table_value ${__arg} || TEST_ONE_perf_nok=1
|
|
|
|
;;
|
2022-01-26 07:42:05 +01:00
|
|
|
"bw")
|
|
|
|
table_value_throughput ${__arg} || TEST_ONE_perf_nok=1
|
|
|
|
;;
|
|
|
|
"lat")
|
|
|
|
table_value_latency ${__arg} || TEST_ONE_perf_nok=1
|
|
|
|
;;
|
2022-09-02 04:04:32 +02:00
|
|
|
"iperf3")
|
|
|
|
test_iperf3 ${__arg}
|
2022-01-26 07:42:05 +01:00
|
|
|
;;
|
|
|
|
"set")
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__${__arg%% *}__" "${__arg#* }")"
|
|
|
|
;;
|
|
|
|
|
|
|
|
# Demo commands
|
|
|
|
"say")
|
|
|
|
text_write "${__arg}"
|
|
|
|
;;
|
|
|
|
"em")
|
|
|
|
em_write "${__arg}"
|
|
|
|
;;
|
|
|
|
"nl")
|
|
|
|
info_nolog ""
|
|
|
|
;;
|
|
|
|
"hl")
|
|
|
|
pane_highlight "${__arg}"
|
|
|
|
;;
|
|
|
|
"bsp")
|
|
|
|
text_backspace "${__arg}"
|
|
|
|
;;
|
|
|
|
"killp")
|
|
|
|
pane_kill "${__arg}"
|
|
|
|
;;
|
2022-02-21 13:35:45 +01:00
|
|
|
"resize")
|
|
|
|
pane_resize ${__arg}
|
|
|
|
;;
|
2022-01-26 07:42:05 +01:00
|
|
|
*)
|
|
|
|
__def_body="$(eval printf \"\$TEST_ONE_DEF_$__cmd\")"
|
|
|
|
if [ -n "${__def_body}" ]; then
|
|
|
|
__ifs="${IFS}"
|
|
|
|
IFS='
|
|
|
|
'
|
|
|
|
for __def_line in ${__def_body}; do
|
2022-09-20 21:56:49 +02:00
|
|
|
IFS="${__ifs}" test_one_line "${__def_line}"
|
2022-01-26 07:42:05 +01:00
|
|
|
done
|
|
|
|
IFS="${__ifs}"
|
|
|
|
fi
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
}
|
|
|
|
|
2021-09-27 15:10:35 +02:00
|
|
|
# test_one() - Run a single test file evaluating directives
|
|
|
|
# $1: Name of test file, relative to test/ directory
|
|
|
|
test_one() {
|
2022-01-26 07:42:05 +01:00
|
|
|
TEST_ONE_dirclean=
|
2021-09-27 15:10:35 +02:00
|
|
|
__test_file="test/${1}"
|
|
|
|
|
|
|
|
__type="$(file -b --mime-type ${__test_file})"
|
|
|
|
if [ "${__type}" = "text/x-shellscript" ]; then
|
|
|
|
status_file_start "${1}" 1
|
|
|
|
"${__test_file}" && status_test_ok || status_test_fail
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
2022-05-19 01:23:45 +02:00
|
|
|
if [ ${DEMO} -eq 0 ]; then
|
|
|
|
__ntests="$(grep -c "^test$(printf '\t')" "${__test_file}")"
|
|
|
|
status_file_start "${1}" "${__ntests}"
|
|
|
|
fi
|
2021-09-27 15:10:35 +02:00
|
|
|
|
|
|
|
[ ${CI} -eq 1 ] && video_link "${1}"
|
|
|
|
|
2022-07-06 09:28:59 +02:00
|
|
|
TEST_ONE_subs="$(list_add_pair "" "__BASEPATH__" "${BASEPATH}")"
|
2022-09-13 06:35:19 +02:00
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__STATESETUP__" "${STATESETUP}")"
|
|
|
|
STATEDIR="${STATEBASE}/${1}"
|
|
|
|
mkdir -p "${STATEDIR}"
|
|
|
|
TEST_ONE_subs="$(list_add_pair "${TEST_ONE_subs}" "__STATEDIR__" "${STATEDIR}")"
|
2022-01-26 07:42:05 +01:00
|
|
|
TEST_ONE_nok=-1
|
|
|
|
TEST_ONE_perf_nok=0
|
|
|
|
TEST_ONE_skip=0
|
|
|
|
TEST_ONE_in_def=
|
2021-09-27 15:10:35 +02:00
|
|
|
while IFS= read -r __line; do
|
2022-01-26 07:42:05 +01:00
|
|
|
test_one_line "${__line}"
|
|
|
|
[ ${TEST_ONE_skip} -eq 1 ] && break
|
2021-09-27 15:10:35 +02:00
|
|
|
done < "${__test_file}"
|
|
|
|
|
2022-01-26 07:42:05 +01:00
|
|
|
for __d in ${TEST_ONE_dirclean}; do
|
2021-09-27 15:10:35 +02:00
|
|
|
rm -rf ${__d}
|
|
|
|
done
|
|
|
|
|
|
|
|
[ ${DEMO} -eq 1 ] && return
|
|
|
|
|
2022-01-26 07:42:05 +01:00
|
|
|
[ ${TEST_ONE_skip} -eq 1 ] && status_test_skip && return
|
|
|
|
[ ${TEST_ONE_perf_nok} -eq 0 ] || TEST_ONE_nok=1
|
|
|
|
[ ${TEST_ONE_nok} -eq 0 ] && status_test_ok || status_test_fail
|
2021-09-27 15:10:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
# test() - Build list of tests to run, in order, then issue test_one()
|
2022-07-06 09:29:05 +02:00
|
|
|
# $@: Test files to run, relative to test/
|
2021-09-27 15:10:35 +02:00
|
|
|
test() {
|
|
|
|
__list=
|
|
|
|
|
|
|
|
cd test
|
2022-07-06 09:29:05 +02:00
|
|
|
for __f; do
|
2022-07-06 09:29:04 +02:00
|
|
|
__type="$(file -b --mime-type ${__f})"
|
|
|
|
if [ "${__type}" = "text/x-shellscript" ]; then
|
|
|
|
__list="$(list_add "${__list}" "${__f}")"
|
|
|
|
continue
|
|
|
|
fi
|
|
|
|
__list="$(list_add "${__list}" "${__f}")"
|
2021-09-27 15:10:35 +02:00
|
|
|
done
|
|
|
|
cd ..
|
|
|
|
|
|
|
|
for __f in ${__list}; do
|
|
|
|
test_one "${__f}"
|
|
|
|
done
|
|
|
|
}
|