debian-cis/tests/launch_tests.sh
Charles Herlin 863adc9c84 IMP(test): Add feature to run functional tests in docker instance
Add usecase in basename
Add test files for checks with find command
Always show logs
FIX: run void script to generate config and avoid sed failure
Update README with functional test description
Add skeleton for functional test
Add argument to launch only specific test suite
Add support for debian8 and compulsory mention of debian version at
launch
Improve README
Simplify test file syntax to avoid copy/paste mistake
Add script that runs tests on all debian targets
Improve run_all_target script with nowait and nodel options
Add dockerfile for Buster pre-version
Chore: Use getopt for options and reviewed code by shellcheck
Add trap to ensure cleanup on exit/interrupt
Remove quotes that lead to `less` misinterpretation of the filenames
Set `local` for variables inside `test_audit` func
Move functional assertion functions to dedicated file
Add cleanup for logs and containers
Improve cleanup, and now exits
Apply shellcheck recommendations
FIX: allow script to be run from anywhere (dirname $0)

 Changes to be committed:
	modified:   README.md
	new file:   src/skel.test
	new file:   tests/docker/Dockerfile.debian10_20181226
	new file:   tests/docker/Dockerfile.debian8
	new file:   tests/docker/Dockerfile.debian9
	new file:   tests/docker_build_and_run_tests.sh
	new file:   tests/hardening/12.10_find_suid_files.sh
	new file:   tests/hardening/12.11_find_sgid_files.sh
	new file:   tests/hardening/12.7_find_world_writable_file.sh
	new file:   tests/hardening/12.8_find_unowned_files.sh
	new file:   tests/hardening/12.9_find_ungrouped_files.sh
	new file:   tests/hardening/2.17_sticky_bit_world_writable_folder.sh
	new file:   tests/launch_tests.sh
	new file:   tests/lib.sh
	new file:   tests/run_all_targets.sh
2018-12-24 14:12:59 +01:00

200 lines
5.3 KiB
Bash
Executable File

#!/bin/bash
# stop on any error
set -e
# stop on undefined variable
set -u
# debug
#set -x
mytmpdir=$(mktemp -d -t debian-cis-test.XXXXXX)
cleanup_and_exit() {
rm -rf "$mytmpdir"
exit 255
}
trap "cleanup_and_exit" EXIT HUP INT
outdir="$mytmpdir/out"
mkdir -p "$outdir" || exit 1
tests_list=""
testno=0
testcount=0
dismiss_count=0
nbfailedret=0
nbfailedgrep=0
nbfailedconsist=0
listfailedret=""
listfailedgrep=""
listfailedconsist=""
usecase=""
usecase_name=""
usecase_name_root=""
usecase_name_sudo=""
declare -a REGISTERED_TESTS
#####################
# Utility functions #
#####################
# in case a fatal event occurs, fatal logs and exits with return code 1
fatal() {
printf "%b %b\n" "\033[1;91mFATAL\033[0m" "$*" >&2
printf "%b \n" "\033[1;91mEXIT TEST SUITE WITH FAILURE\033[0m" >&2
exit 1
}
# prints that a test failed
fail() {
printf "%b %b\n" "\033[1;30m\033[41m[FAIL]\033[0m" "$*" >&2
}
# prints that a test succeded
ok() {
printf "%b %b\n" "\033[30m\033[42m[ OK ]\033[0m" "$*" >&2
}
# retrieves audit script logfile
get_stdout()
{
cat "$outdir"/"$usecase_name".log
}
# Reset the list of test assertions
clear_registered_tests() {
unset REGISTERED_TESTS
declare -a REGISTERED_TESTS
dismiss_count=0
}
# Generates a formated test name
make_usecase_name() {
usecase=$1
shift
role=$1
usecase_name=$(printf '%03d-%s-%s-%s' "$testno" "$name" "$usecase" "$role" | sed -re "s=/=_=g")
echo -n "$usecase_name"
}
# Plays the registered test suite
play_registered_tests() {
usecase_name=$1
if [[ "${REGISTERED_TESTS[*]}" ]]; then
export numtest=${#REGISTERED_TESTS[@]}
for t in "${!REGISTERED_TESTS[@]}"; do
${REGISTERED_TESTS[$t]}
done
fi
}
# Plays comparison tests to ensure that root and sudo exection have the same output
play_consistency_tests() {
consist_test=0
printf "\033[34m*** [%03d] %s::%s Root/Sudo Consistency Tests\033[0m\n" "$testno" "$test_file" "$usecase"
retfile_root=$outdir/${usecase_name_root}.retval
retfile_sudo=$outdir/${usecase_name_sudo}.retval
ret=$(eval cmp "$retfile_root" "$retfile_sudo")
if [[ ! 0 -eq $ret ]] ; then
fail "$name" return values differ
diff "$retfile_root" "$retfile_sudo"
consist_test=1
else
ok "$name return values are equal"
fi
retfile_root=$outdir/${usecase_name_root}.log
retfile_sudo=$outdir/${usecase_name_sudo}.log
cmp "$retfile_root" "$retfile_sudo" && ret=0 || ret=1
if [[ ! 0 -eq $ret ]] ; then
fail "$name" logs differ
diff "$retfile_root" "$retfile_sudo" || true
consist_test=1
else
ok "$name logs are identical"
fi
if [ 1 -eq $consist_test ]; then
if [ 0 -eq $dismiss_count ]; then
nbfailedconsist=$(( nbfailedconsist + 1 ))
listfailedconsist="$listfailedconsist $(make_usecase_name consist)"
fi
fi
}
# Actually runs one signel audit script
_run()
{
usecase_name=$1
shift
printf "\033[34m*** [%03d] %s \033[0m(%s)\n" "$testno" "$usecase_name" "$*"
bash -c "$*" >"$outdir/$usecase_name.log" && true; echo $? > "$outdir/$usecase_name.retval"
ret=$(< "$outdir"/"$usecase_name".retval)
get_stdout
}
# Load assertion functions for functionnal tests
if [ ! -f "$(dirname "$0")"/lib.sh ]; then
fatal "Cannot locate lib.sh"
fi
# shellcheck source=/opt/debian-cis/tests/lib.sh
. "$(dirname "$0")"/lib.sh
###################
# Execution start #
###################
printf "\033[1;36m###\n### %s\n### \033[0m\n" "Starting debian-cis functional testing"
# if no scripts were passed as arguments, list all available test scenarii to be played
if [ $# -eq 0 ]; then
tests_list=$(ls -v "$(dirname "$0")"/hardening/)
testcount=$(wc -l <<< "$tests_list")
else
tests_list="$*"
testcount=$#
fi
for test_file in $tests_list; do
test_file_path=$(dirname "$0")/hardening/"$test_file"
if [ ! -f "$test_file_path" ]; then
fatal "Test file \"$test_file\" does not exist"
fi
# script var is used inside test files
# shellcheck disable=2034
script="$(basename "$test_file" .sh)"
# source test scenario file to add `test_audit` func
# shellcheck disable=1090
. "$test_file_path"
testno=$(( testno + 1 ))
# shellcheck disable=2001
name="$(echo "${test_file%%.sh}" | sed 's/\d+\.\d+_//' )"
printf "\033[1;36m### [%03d/%03d] %s \033[0m\n" "$testno" "$testcount" "$test_file"
# test_audit is the function defined in $test_file, that carries the actual functional tests for this script
test_audit
# reset var names
usecase_name=""
usecase_name_root=""
usecase_name_sudo=""
unset -f test_audit
echo ""
done
printf "\033[1;36m###\n### %s \033[0m\n" "Test report"
if [ $((nbfailedret + nbfailedgrep + nbfailedconsist )) -eq 0 ] ; then
echo -e "\033[42m\033[30mAll tests succeeded :)\033[0m"
else
(
echo -e "\033[41mOne or more tests failed :(\033[0m"
echo -e "- $nbfailedret unexpected return values ${listfailedret}"
echo -e "- $nbfailedgrep unexpected text values $listfailedgrep"
echo -e "- $nbfailedconsist root/sudo consistency $listfailedconsist"
) | tee "$outdir"/summary
fi
echo
set +e
set +u
let totalerrors=$((nbfailedret + nbfailedgrep + nbfailedconsist ))
# leave `exit 255` for runtime errors
[ $totalerrors -ge 255 ] && totalerrors=254
exit $totalerrors