#!/bin/sh # # Copyright (C) Internet Systems Consortium, Inc. ("ISC") # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # See the COPYRIGHT file distributed with this work for additional # information regarding copyright ownership. # # Run a system test. # top_builddir=@top_builddir@ builddir=@abs_builddir@ srcdir=@abs_srcdir@ # shellcheck source=conf.sh . ${builddir}/conf.sh SYSTEMTESTTOP="$(cd -P -- "${builddir}" && pwd -P)" export SYSTEMTESTTOP export builddir export srcdir date_with_args() ( date -R ) stopservers=true # baseport == 0 means random baseport=0 if [ "${SYSTEMTEST_NO_CLEAN:-0}" -eq 1 ]; then clean=false else clean=true fi do_run=false log_flags="-r" while getopts "sknp:r-:" OPT; do log_flags="$log_flags -$OPT$OPTARG" if [ "$OPT" = "-" ] && [ -n "$OPTARG" ]; then OPT="${OPTARG%%=*}" OPTARG="${OPTARG#$OPT}" OPTARG="${OPTARG#=}" fi # shellcheck disable=SC2214 case "$OPT" in k | keep) stopservers=false ;; n | noclean) clean=false ;; p | port) baseport=$OPTARG ;; r | run) do_run=true ;; s | skip) exit 77 ;; -) break ;; *) echo "invalid option" >&2; exit 1 ;; esac done shift $((OPTIND-1)) if ! $do_run; then if [ "$baseport" -eq 0 ]; then log_flags="$log_flags -p 5300" fi env - TESTS="$*" TEST_SUITE_LOG=run.log LOG_DRIVER_FLAGS="--verbose yes --color-tests yes" LOG_FLAGS="$log_flags" make -e check exit $? fi if [ $# -eq 0 ]; then echofail "Usage: $0 [-k] [-n] [-p ] test-directory [test-options]" >&2; exit 1 fi systest=$(basename "${1%%/}") shift if [ ! -d "${srcdir}/$systest" ]; then echofail "$0: $systest: no such test" >&2 exit 1 fi if [ "${srcdir}" != "${builddir}" ]; then if [ ! -d common ] || [ ! -r common/.prepared ]; then cp -a "${srcdir}/common" "${builddir}" fi if [ ! -d "$systest" ] || [ ! -r "$systest/.prepared" ]; then mkdir -p "${builddir}/$systest" cp -a "${srcdir}/$systest" "${builddir}/" touch "$systest/.prepared" fi fi if [ ! -d "${systest}" ]; then echofail "$0: $systest: no such test" >&2 exit 1 fi # Get the first 10 ports in the set (it is assumed that each test has access # to ten or more ports): the query port, the control port and eight extra # ports. Since the lowest numbered port (specified in the command line) # will usually be a multiple of 10, the names are chosen so that if this is # true, the last digit of EXTRAPORTn is "n". eval "$(${srcdir}/get_ports.sh -p "$baseport")" restart=false start_servers_failed() { echoinfo "I:$systest:starting servers failed" echofail "R:$systest:FAIL" echoend "E:$systest:$(date_with_args)" exit 1 } start_servers() { echoinfo "I:$systest:starting servers" if $restart; then $PERL start.pl --restart --port "$PORT" "$systest" || start_servers_failed else restart=true $PERL start.pl --port "$PORT" "$systest" || start_servers_failed fi } stop_servers_failed() { echoinfo "I:$systest:stopping servers failed" echofail "R:$systest:FAIL" echoend "E:$systest:$(date_with_args)" exit 1 } stop_servers() { if $stopservers; then echoinfo "I:$systest:stopping servers" $PERL stop.pl "$systest" || stop_servers_failed fi } echostart "S:$systest:$(date_with_args)" echoinfo "T:$systest:1:A" echoinfo "A:$systest:System test $systest" echoinfo "I:$systest:PORTS:${PORT},${EXTRAPORT1},${EXTRAPORT2},${EXTRAPORT3},${EXTRAPORT4},${EXTRAPORT5},${EXTRAPORT6},${EXTRAPORT7},${EXTRAPORT8},${CONTROLPORT}" $PERL ${srcdir}/testsock.pl -p "$PORT" || { echowarn "I:$systest:Network interface aliases not set up. Skipping test." echowarn "R:$systest:FAIL" echoend "E:$systest:$(date_with_args)" exit 1; } # Check for test-specific prerequisites. test ! -f "$systest/prereq.sh" || ( cd "${systest}" && $SHELL prereq.sh "$@" ) result=$? if [ $result -eq 0 ]; then : prereqs ok else echowarn "I:$systest:Prerequisites missing, skipping test." if [ $result -eq 255 ]; then echowarn "R:$systest:SKIPPED"; else echowarn "R:$systest:UNTESTED" fi echoend "E:$systest:$(date_with_args)" exit 0 fi # Check for PKCS#11 support if test ! -f "$systest/usepkcs11" || $SHELL cleanpkcs11.sh then : pkcs11 ok else echowarn "I:$systest:Need PKCS#11, skipping test." echowarn "R:$systest:PKCS11ONLY" echoend "E:$systest:$(date_with_args)" exit 0 fi # Clean up files left from any potential previous runs if test -f "$systest/clean.sh" then ( cd "${systest}" && $SHELL clean.sh "$@" ) ret=$? if [ $ret -ne 0 ]; then echowarn "I:$systest:clean.sh script failed with $ret" fi fi # Set up any dynamically generated test data if test -f "$systest/setup.sh" then ( cd "${systest}" && $SHELL setup.sh "$@" ) ret=$? if [ $ret -ne 0 ]; then echowarn "I:$systest:clean.sh script failed with $ret" fi fi status=0 run=0 # Run the tests if [ -r "$systest/tests.sh" ]; then start_servers ( cd "$systest" && $SHELL tests.sh "$@" ) status=$? run=$((run+1)) stop_servers fi if [ -n "$PYTEST" ]; then run=$((run+1)) for test in $(cd "${systest}" && find . -name "tests*.py"); do start_servers rm -f "$systest/$test.status" test_status=0 (cd "$systest" && "$PYTEST" -v "$test" "$@" || echo "$?" > "$test.status") | SYSTESTDIR="$systest" cat_d if [ -f "$systest/$test.status" ]; then echo_i "FAILED" test_status=$(cat "$systest/$test.status") fi status=$((status+test_status)) stop_servers done else echoinfo "I:$systest:pytest not installed, skipping python tests" fi if [ "$run" -eq "0" ]; then echoinfo "I:$systest:No tests were found and run" status=255 fi if $stopservers then : else exit $status fi if [ $status != 0 ]; then echofail "R:$systest:FAIL" # Do not clean up - we need the evidence. else core_dumps="$(find "$systest/" -name 'core*' -or -name '*.core' | sort | tr '\n' ' ')" assertion_failures=$(find "$systest/" -name named.run -print0 | xargs -0 grep "assertion failure" | wc -l) sanitizer_summaries=$(find "$systest/" -name 'tsan.*' | wc -l) if [ -n "$core_dumps" ]; then echoinfo "I:$systest:Test claims success despite crashes: $core_dumps" echofail "R:$systest:FAIL" # Do not clean up - we need the evidence. find "$systest/" -name 'core*' -or -name '*.core' | while read -r coredump; do export SYSTESTDIR="$systest" echoinfo "D:$systest:backtrace from $coredump start" binary=$(gdb --batch --core="$coredump" | sed -ne "s/Core was generated by \`//;s/ .*'.$//p;") "${top_builddir}/libtool" --mode=execute gdb \ --batch \ --command=run.gdb \ --core="$coredump" \ -- \ "$binary" echoinfo "D:$systest:backtrace from $coredump end" done elif [ "$assertion_failures" -ne 0 ]; then SYSTESTDIR="$systest" echoinfo "I:$systest:Test claims success despite $assertion_failures assertion failure(s)" find "$systest/" -name 'tsan.*' -print0 | xargs -0 grep "SUMMARY: " | sort -u | cat_d echofail "R:$systest:FAIL" # Do not clean up - we need the evidence. elif [ "$sanitizer_summaries" -ne 0 ]; then echoinfo "I:$systest:Test claims success despite $sanitizer_summaries sanitizer reports(s)" echofail "R:$systest:FAIL" else echopass "R:$systest:PASS" if $clean then ( cd "${systest}" && $SHELL clean.sh "$@" ) if [ "${srcdir}" != "${builddir}" ]; then rm -rf "./${systest}" ## FIXME (this also removes compiled binaries) fi if test -d ${srcdir}/../../../.git then git status -su --ignored "${systest}" 2>/dev/null | \ sed -n -e 's|^?? \(.*\)|I:file \1 not removed|p' \ -e 's|^!! \(.*/named.run\)$|I:file \1 not removed|p' \ -e 's|^!! \(.*/named.memstats\)$|I:file \1 not removed|p' fi fi fi fi echoend "E:$systest:$(date_with_args)" exit $status