debian-cis/tests/launch_tests.sh
Charles Herlin e8ae07c2e8 IMP: new tag in file to tell that the script should pass shellcheck
The `# run-shellchek` tag must be placed in the first 10 lines of the
file
2019-01-24 11:11:08 +01:00

202 lines
5.4 KiB
Bash
Executable File

#!/bin/bash
# run-shellcheck
# stop on any error
set -e
# stop on undefined variable
set -u
# debug
#set -x
mytmpdir=$(mktemp -d -t debian-cis-test.XXXXXX)
totalerrors=255
cleanup_and_exit() {
rm -rf "$mytmpdir"
exit $totalerrors
}
trap "cleanup_and_exit" EXIT HUP INT
outdir="$mytmpdir/out"
mkdir -p "$outdir" || exit 1
tests_list=""
testno=0
testcount=0
dismiss_count=0
nbfailedret=0
nbfailedgrep=0
nbfailedconsist=0
listfailedret=""
listfailedgrep=""
listfailedconsist=""
usecase=""
usecase_name=""
usecase_name_root=""
usecase_name_sudo=""
declare -a REGISTERED_TESTS
#####################
# Utility functions #
#####################
# in case a fatal event occurs, fatal logs and exits with return code 1
fatal() {
printf "%b %b\n" "\033[1;91mFATAL\033[0m" "$*" >&2
printf "%b \n" "\033[1;91mEXIT TEST SUITE WITH FAILURE\033[0m" >&2
exit 1
}
# prints that a test failed
fail() {
printf "%b %b\n" "\033[1;30m\033[41m[FAIL]\033[0m" "$*" >&2
}
# prints that a test succeded
ok() {
printf "%b %b\n" "\033[30m\033[42m[ OK ]\033[0m" "$*" >&2
}
# retrieves audit script logfile
get_stdout()
{
cat "$outdir"/"$usecase_name".log
}
# Reset the list of test assertions
clear_registered_tests() {
unset REGISTERED_TESTS
declare -a REGISTERED_TESTS
dismiss_count=0
}
# Generates a formated test name
make_usecase_name() {
usecase=$1
shift
role=$1
usecase_name=$(printf '%03d-%s-%s-%s' "$testno" "$name" "$usecase" "$role" | sed -re "s=/=_=g")
echo -n "$usecase_name"
}
# Plays the registered test suite
play_registered_tests() {
usecase_name=$1
if [[ "${REGISTERED_TESTS[*]}" ]]; then
export numtest=${#REGISTERED_TESTS[@]}
for t in "${!REGISTERED_TESTS[@]}"; do
${REGISTERED_TESTS[$t]}
done
fi
}
# Plays comparison tests to ensure that root and sudo exection have the same output
play_consistency_tests() {
consist_test=0
printf "\033[34m*** [%03d] %s::%s Root/Sudo Consistency Tests\033[0m\n" "$testno" "$test_file" "$usecase"
retfile_root=$outdir/${usecase_name_root}.retval
retfile_sudo=$outdir/${usecase_name_sudo}.retval
ret=$(eval cmp "$retfile_root" "$retfile_sudo")
if [[ ! 0 -eq $ret ]] ; then
fail "$name" return values differ
diff "$retfile_root" "$retfile_sudo"
consist_test=1
else
ok "$name return values are equal"
fi
retfile_root=$outdir/${usecase_name_root}.log
retfile_sudo=$outdir/${usecase_name_sudo}.log
cmp "$retfile_root" "$retfile_sudo" && ret=0 || ret=1
if [[ ! 0 -eq $ret ]] ; then
fail "$name" logs differ
diff "$retfile_root" "$retfile_sudo" || true
consist_test=1
else
ok "$name logs are identical"
fi
if [ 1 -eq $consist_test ]; then
if [ 0 -eq $dismiss_count ]; then
nbfailedconsist=$(( nbfailedconsist + 1 ))
listfailedconsist="$listfailedconsist $(make_usecase_name consist)"
fi
fi
}
# Actually runs one signel audit script
_run()
{
usecase_name=$1
shift
printf "\033[34m*** [%03d] %s \033[0m(%s)\n" "$testno" "$usecase_name" "$*"
bash -c "$*" >"$outdir/$usecase_name.log" && true; echo $? > "$outdir/$usecase_name.retval"
ret=$(< "$outdir"/"$usecase_name".retval)
get_stdout
}
# Load assertion functions for functionnal tests
if [ ! -f "$(dirname "$0")"/lib.sh ]; then
fatal "Cannot locate lib.sh"
fi
# shellcheck source=/opt/debian-cis/tests/lib.sh
. "$(dirname "$0")"/lib.sh
###################
# Execution start #
###################
printf "\033[1;36m###\n### %s\n### \033[0m\n" "Starting debian-cis functional testing"
# if no scripts were passed as arguments, list all available test scenarii to be played
if [ $# -eq 0 ]; then
tests_list=$(ls -v "$(dirname "$0")"/hardening/)
testcount=$(wc -l <<< "$tests_list")
else
tests_list="$*"
testcount=$#
fi
for test_file in $tests_list; do
test_file_path=$(dirname "$0")/hardening/"$test_file"
if [ ! -f "$test_file_path" ]; then
fatal "Test file \"$test_file\" does not exist"
fi
# script var is used inside test files
# shellcheck disable=2034
script="$(basename "$test_file" .sh)"
# source test scenario file to add `test_audit` func
# shellcheck disable=1090
. "$test_file_path"
testno=$(( testno + 1 ))
# shellcheck disable=2001
name="$(echo "${test_file%%.sh}" | sed 's/\d+\.\d+_//' )"
printf "\033[1;36m### [%03d/%03d] %s \033[0m\n" "$testno" "$testcount" "$test_file"
# test_audit is the function defined in $test_file, that carries the actual functional tests for this script
test_audit
# reset var names
usecase_name=""
usecase_name_root=""
usecase_name_sudo=""
unset -f test_audit
echo ""
done
printf "\033[1;36m###\n### %s \033[0m\n" "Test report"
if [ $((nbfailedret + nbfailedgrep + nbfailedconsist )) -eq 0 ] ; then
echo -e "\033[42m\033[30mAll tests succeeded :)\033[0m"
else
(
echo -e "\033[41mOne or more tests failed :(\033[0m"
echo -e "- $nbfailedret unexpected return values ${listfailedret}"
echo -e "- $nbfailedgrep unexpected text values $listfailedgrep"
echo -e "- $nbfailedconsist root/sudo consistency $listfailedconsist"
) | tee "$outdir"/summary
fi
echo
set +e
set +u
let totalerrors=$((nbfailedret + nbfailedgrep + nbfailedconsist ))
# leave `exit 255` for runtime errors
[ $totalerrors -ge 255 ] && totalerrors=254
exit $totalerrors