mirror of
https://github.com/linux-apfs/apfstests.git
synced 2026-05-01 15:01:44 -07:00
c0be121e12
TLDR: If systemd is available, run each test in its own temporary systemd scope. This enables the test harness to forcibly clean up all of the test's child processes (if it does not do so itself) so that we can move into the post-test unmount and check cleanly. I frequently run fstests in "low" memory situations (2GB!) to force the kernel to do interesting things. There are a few tests like generic/224 and generic/561 that put processes in the background and occasionally trigger the OOM killer. Most of the time the OOM killer correctly shoots down fsstress or duperemove, but once in a while it's stupid enough to shoot down the test control process (i.e. tests/generic/224) instead. fsstress is still running in the background, and the one process that knew about that is dead. When the control process dies, ./check moves on to the post-test fsck, which fails because fsstress is still running and we can't unmount. After fsck fails, ./check moves on to the next test, which fails because fsstress is /still/ writing to the filesystem and we can't unmount or format. The end result is that that one OOM kill causes cascading test failures, and I have to re-start fstests to see if I get a clean(er) run. So, the solution I present in this patch is to teach ./check to try to run the test script in a systemd scope. If that succeeds, ./check will tell systemd to kill the scope when the test script exits and returns control to ./check. Concretely, this means that systemd creates a new cgroup, stuffs the processes in that cgroup, and when we kill the scope, systemd kills all the processes in that cgroup and deletes the cgroup. The end result is that fstests now has an easy way to ensure that /all/ child processes of a test are dead before we try to unmount the test and scratch devices. I've designed this to be optional, because not everyone does or wants or likes to run systemd, but it makes QA easier. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Eryu Guan <guaneryu@gmail.com> Signed-off-by: Eryu Guan <guaneryu@gmail.com>
931 lines
22 KiB
Bash
Executable File
931 lines
22 KiB
Bash
Executable File
#!/bin/bash
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
|
|
#
|
|
# Control script for QA
|
|
#
|
|
tmp=/tmp/$$
|
|
status=0
|
|
needwrap=true
|
|
needsum=true
|
|
n_try=0
|
|
try=""
|
|
n_bad=0
|
|
sum_bad=0
|
|
bad=""
|
|
n_notrun=0
|
|
notrun=""
|
|
interrupt=true
|
|
diff="diff -u"
|
|
showme=false
|
|
have_test_arg=false
|
|
randomize=false
|
|
export here=`pwd`
|
|
xfile=""
|
|
subdir_xfile=""
|
|
brief_test_summary=false
|
|
do_report=false
|
|
DUMP_OUTPUT=false
|
|
iterations=1
|
|
|
|
# This is a global variable used to pass test failure text to reporting gunk
|
|
_err_msg=""
|
|
|
|
# start the initialisation work now
|
|
iam=check
|
|
|
|
export MSGVERB="text:action"
|
|
export QA_CHECK_FS=${QA_CHECK_FS:=true}
|
|
|
|
# number of diff lines from a failed test, 0 for whole output
|
|
export DIFF_LENGTH=${DIFF_LENGTH:=10}
|
|
|
|
# by default don't output timestamps
|
|
timestamp=${TIMESTAMP:=false}
|
|
|
|
rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
|
|
|
|
SRC_GROUPS="generic shared"
|
|
export SRC_DIR="tests"
|
|
|
|
usage()
|
|
{
|
|
echo "Usage: $0 [options] [testlist]"'
|
|
|
|
check options
|
|
-nfs test NFS
|
|
-glusterfs test GlusterFS
|
|
-cifs test CIFS
|
|
-9p test 9p
|
|
-virtiofs test virtiofs
|
|
-overlay test overlay
|
|
-pvfs2 test PVFS2
|
|
-tmpfs test TMPFS
|
|
-ubifs test ubifs
|
|
-l line mode diff
|
|
-udiff show unified diff (default)
|
|
-n show me, do not run tests
|
|
-T output timestamps
|
|
-r randomize test order
|
|
-i <n> iterate the test list <n> times
|
|
-d dump test output to stdout
|
|
-b brief test summary
|
|
-R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
|
|
--large-fs optimise scratch device for large filesystems
|
|
-s section run only specified section from config file
|
|
-S section exclude the specified section from the config file
|
|
|
|
testlist options
|
|
-g group[,group...] include tests from these groups
|
|
-x group[,group...] exclude tests from these groups
|
|
-X exclude_file exclude individual tests
|
|
-E external_file exclude individual tests
|
|
[testlist] include tests matching names in testlist
|
|
|
|
testlist argument is a list of tests in the form of <test dir>/<test name>.
|
|
|
|
<test dir> is a directory under tests that contains a group file,
|
|
with a list of the names of the tests in that directory.
|
|
|
|
<test name> may be either a specific test file name (e.g. xfs/001) or
|
|
a test file name match pattern (e.g. xfs/*).
|
|
|
|
group argument is either a name of a tests group to collect from all
|
|
the test dirs (e.g. quick) or a name of a tests group to collect from
|
|
a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
|
|
If you want to run all the tests in the test suite, use "-g all" to specify all
|
|
groups.
|
|
|
|
exclude_file argument refers to a name of a file inside each test directory.
|
|
for every test dir where this file is found, the listed test names are
|
|
excluded from the list of tests to run from that test dir.
|
|
|
|
external_file argument is a path to a single file containing a list of tests
|
|
to exclude in the form of <test dir>/<test name>.
|
|
|
|
examples:
|
|
check xfs/001
|
|
check -g quick
|
|
check -g xfs/quick
|
|
check -x stress xfs/*
|
|
check -X .exclude -g auto
|
|
check -E ~/.xfstests.exclude
|
|
'
|
|
exit 0
|
|
}
|
|
|
|
get_sub_group_list()
|
|
{
|
|
local d=$1
|
|
local grp=$2
|
|
|
|
test -s "$SRC_DIR/$d/group" || return 1
|
|
|
|
local grpl=$(sed -n < $SRC_DIR/$d/group \
|
|
-e 's/#.*//' \
|
|
-e 's/$/ /' \
|
|
-e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
|
|
echo $grpl
|
|
}
|
|
|
|
get_group_list()
|
|
{
|
|
local grp=$1
|
|
local grpl=""
|
|
local sub=$(dirname $grp)
|
|
local fsgroup="$FSTYP"
|
|
|
|
if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
|
|
# group is given as <subdir>/<group> (e.g. xfs/quick)
|
|
grp=$(basename $grp)
|
|
get_sub_group_list $sub $grp
|
|
return
|
|
fi
|
|
|
|
if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
|
|
fsgroup=ext4
|
|
fi
|
|
for d in $SRC_GROUPS $fsgroup; do
|
|
if ! test -d "$SRC_DIR/$d" ; then
|
|
continue
|
|
fi
|
|
grpl="$grpl $(get_sub_group_list $d $grp)"
|
|
done
|
|
echo $grpl
|
|
}
|
|
|
|
# Find all tests, excluding files that are test metadata such as group files.
|
|
# It matches test names against $VALID_TEST_NAME defined in common/rc
|
|
get_all_tests()
|
|
{
|
|
touch $tmp.list
|
|
for d in $SRC_GROUPS $FSTYP; do
|
|
if ! test -d "$SRC_DIR/$d" ; then
|
|
continue
|
|
fi
|
|
ls $SRC_DIR/$d/* | \
|
|
grep -v "\..*" | \
|
|
grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
|
|
grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
|
|
done
|
|
}
|
|
|
|
# takes the list of tests to run in $tmp.list, and removes the tests passed to
|
|
# the function from that list.
|
|
trim_test_list()
|
|
{
|
|
test_list="$*"
|
|
|
|
rm -f $tmp.grep
|
|
numsed=0
|
|
for t in $test_list
|
|
do
|
|
if [ $numsed -gt 100 ]; then
|
|
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
|
|
mv $tmp.tmp $tmp.list
|
|
numsed=0
|
|
rm -f $tmp.grep
|
|
fi
|
|
echo "^$t\$" >>$tmp.grep
|
|
numsed=`expr $numsed + 1`
|
|
done
|
|
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
|
|
mv $tmp.tmp $tmp.list
|
|
rm -f $tmp.grep
|
|
}
|
|
|
|
|
|
_wallclock()
|
|
{
|
|
date "+%s"
|
|
}
|
|
|
|
_timestamp()
|
|
{
|
|
now=`date "+%T"`
|
|
echo -n " [$now]"
|
|
}
|
|
|
|
_prepare_test_list()
|
|
{
|
|
unset list
|
|
# Tests specified on the command line
|
|
if [ -s $tmp.arglist ]; then
|
|
cat $tmp.arglist > $tmp.list
|
|
else
|
|
touch $tmp.list
|
|
fi
|
|
|
|
# Specified groups to include
|
|
# Note that the CLI processing adds a leading space to the first group
|
|
# parameter, so we have to catch that here checking for "all"
|
|
if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
|
|
# no test numbers, do everything
|
|
get_all_tests
|
|
else
|
|
for group in $GROUP_LIST; do
|
|
list=$(get_group_list $group)
|
|
if [ -z "$list" ]; then
|
|
echo "Group \"$group\" is empty or not defined?"
|
|
exit 1
|
|
fi
|
|
|
|
for t in $list; do
|
|
grep -s "^$t\$" $tmp.list >/dev/null || \
|
|
echo "$t" >>$tmp.list
|
|
done
|
|
done
|
|
fi
|
|
|
|
# Specified groups to exclude
|
|
for xgroup in $XGROUP_LIST; do
|
|
list=$(get_group_list $xgroup)
|
|
if [ -z "$list" ]; then
|
|
echo "Group \"$xgroup\" is empty or not defined?"
|
|
exit 1
|
|
fi
|
|
|
|
trim_test_list $list
|
|
done
|
|
|
|
# sort the list of tests into numeric order
|
|
if $randomize; then
|
|
if type shuf >& /dev/null; then
|
|
sorter="shuf"
|
|
else
|
|
sorter="awk -v seed=$RANDOM -f randomize.awk"
|
|
fi
|
|
else
|
|
sorter="cat"
|
|
fi
|
|
list=`sort -n $tmp.list | uniq | $sorter`
|
|
rm -f $tmp.list
|
|
}
|
|
|
|
# Process command arguments first.
|
|
while [ $# -gt 0 ]; do
|
|
case "$1" in
|
|
-\? | -h | --help) usage ;;
|
|
|
|
-nfs) FSTYP=nfs ;;
|
|
-glusterfs) FSTYP=glusterfs ;;
|
|
-cifs) FSTYP=cifs ;;
|
|
-9p) FSTYP=9p ;;
|
|
-virtiofs) FSTYP=virtiofs ;;
|
|
-overlay) FSTYP=overlay; export OVERLAY=true ;;
|
|
-pvfs2) FSTYP=pvfs2 ;;
|
|
-tmpfs) FSTYP=tmpfs ;;
|
|
-ubifs) FSTYP=ubifs ;;
|
|
|
|
-g) group=$2 ; shift ;
|
|
GROUP_LIST="$GROUP_LIST ${group//,/ }"
|
|
;;
|
|
|
|
-x) xgroup=$2 ; shift ;
|
|
XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
|
|
;;
|
|
|
|
-X) subdir_xfile=$2; shift ;
|
|
;;
|
|
-E) xfile=$2; shift ;
|
|
if [ -f $xfile ]; then
|
|
sed "s/#.*$//" "$xfile" >> $tmp.xlist
|
|
fi
|
|
;;
|
|
-s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
|
|
-S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
|
|
-l) diff="diff" ;;
|
|
-udiff) diff="$diff -u" ;;
|
|
|
|
-n) showme=true ;;
|
|
-r) randomize=true ;;
|
|
-i) iterations=$2; shift ;;
|
|
-T) timestamp=true ;;
|
|
-d) DUMP_OUTPUT=true ;;
|
|
-b) brief_test_summary=true;;
|
|
-R) report_fmt=$2 ; shift ;
|
|
REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
|
|
do_report=true
|
|
;;
|
|
--large-fs) export LARGE_SCRATCH_DEV=yes ;;
|
|
--extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
|
|
|
|
-*) usage ;;
|
|
*) # not an argument, we've got tests now.
|
|
have_test_arg=true ;;
|
|
esac
|
|
|
|
# if we've found a test specification, the break out of the processing
|
|
# loop before we shift the arguments so that this is the first argument
|
|
# that we process in the test arg loop below.
|
|
if $have_test_arg; then
|
|
break;
|
|
fi
|
|
|
|
shift
|
|
done
|
|
|
|
# we need common/rc, that also sources common/config. We need to source it
|
|
# after processing args, overlay needs FSTYP set before sourcing common/config
|
|
if ! . ./common/rc; then
|
|
echo "check: failed to source common/rc"
|
|
exit 1
|
|
fi
|
|
|
|
if [ -n "$subdir_xfile" ]; then
|
|
for d in $SRC_GROUPS $FSTYP; do
|
|
[ -f $SRC_DIR/$d/$subdir_xfile ] || continue
|
|
for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
|
|
echo $d/$f >> $tmp.xlist
|
|
done
|
|
done
|
|
fi
|
|
|
|
# Process tests from command line now.
|
|
if $have_test_arg; then
|
|
while [ $# -gt 0 ]; do
|
|
case "$1" in
|
|
-*) echo "Arguments before tests, please!"
|
|
status=1
|
|
exit $status
|
|
;;
|
|
*) # Expand test pattern (e.g. xfs/???, *fs/001)
|
|
list=$(cd $SRC_DIR; echo $1)
|
|
for t in $list; do
|
|
test_dir=`dirname $t`
|
|
test_dir=${test_dir#$SRC_DIR/*}
|
|
test_name=`basename $t`
|
|
group_file=$SRC_DIR/$test_dir/group
|
|
|
|
if egrep -q "^$test_name" $group_file; then
|
|
# in group file ... OK
|
|
echo $SRC_DIR/$test_dir/$test_name \
|
|
>>$tmp.arglist
|
|
else
|
|
# oops
|
|
echo "$t - unknown test, ignored"
|
|
fi
|
|
done
|
|
;;
|
|
esac
|
|
|
|
shift
|
|
done
|
|
elif [ -z "$GROUP_LIST" ]; then
|
|
# default group list is the auto group. If any other group or test is
|
|
# specified, we use that instead.
|
|
GROUP_LIST="auto"
|
|
fi
|
|
|
|
if [ `id -u` -ne 0 ]
|
|
then
|
|
echo "check: QA must be run as root"
|
|
exit 1
|
|
fi
|
|
|
|
_wipe_counters()
|
|
{
|
|
n_try="0"
|
|
n_bad="0"
|
|
n_notrun="0"
|
|
unset try notrun bad
|
|
}
|
|
|
|
_global_log() {
|
|
echo "$1" >> $check.log
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
echo "$1" >> ${REPORT_DIR}/check.log
|
|
fi
|
|
}
|
|
|
|
_wrapup()
|
|
{
|
|
seq="check"
|
|
check="$RESULT_BASE/check"
|
|
|
|
if $showme; then
|
|
if $needwrap; then
|
|
if $do_report; then
|
|
_make_section_report
|
|
fi
|
|
needwrap=false
|
|
fi
|
|
elif $needwrap; then
|
|
if [ -f $check.time -a -f $tmp.time ]; then
|
|
cat $check.time $tmp.time \
|
|
| $AWK_PROG '
|
|
{ t[$1] = $2 }
|
|
END {
|
|
if (NR > 0) {
|
|
for (i in t) print i " " t[i]
|
|
}
|
|
}' \
|
|
| sort -n >$tmp.out
|
|
mv $tmp.out $check.time
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
cp $check.time ${REPORT_DIR}/check.time
|
|
fi
|
|
fi
|
|
|
|
_global_log ""
|
|
_global_log "$(date)"
|
|
|
|
echo "SECTION -- $section" >>$tmp.summary
|
|
echo "=========================" >>$tmp.summary
|
|
if [ ! -z "$n_try" -a $n_try != 0 ]; then
|
|
if [ $brief_test_summary == "false" ]; then
|
|
echo "Ran:$try"
|
|
echo "Ran:$try" >>$tmp.summary
|
|
fi
|
|
_global_log "Ran:$try"
|
|
fi
|
|
|
|
$interrupt && echo "Interrupted!" | tee -a $check.log
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
$interrupt && echo "Interrupted!" | tee -a \
|
|
${REPORT_DIR}/check.log
|
|
fi
|
|
|
|
if [ ! -z "$notrun" ]; then
|
|
if [ $brief_test_summary == "false" ]; then
|
|
echo "Not run:$notrun"
|
|
echo "Not run:$notrun" >>$tmp.summary
|
|
fi
|
|
_global_log "Not run:$notrun"
|
|
fi
|
|
|
|
if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
|
|
echo "Failures:$bad"
|
|
echo "Failed $n_bad of $n_try tests"
|
|
_global_log "Failures:$bad"
|
|
_global_log "Failed $n_bad of $n_try tests"
|
|
echo "Failures:$bad" >>$tmp.summary
|
|
echo "Failed $n_bad of $n_try tests" >>$tmp.summary
|
|
else
|
|
echo "Passed all $n_try tests"
|
|
_global_log "Passed all $n_try tests"
|
|
echo "Passed all $n_try tests" >>$tmp.summary
|
|
fi
|
|
echo "" >>$tmp.summary
|
|
if $do_report; then
|
|
_make_section_report
|
|
fi
|
|
needwrap=false
|
|
fi
|
|
|
|
sum_bad=`expr $sum_bad + $n_bad`
|
|
_wipe_counters
|
|
rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
|
|
if ! $OPTIONS_HAVE_SECTIONS; then
|
|
rm -f $tmp.*
|
|
fi
|
|
}
|
|
|
|
_summary()
|
|
{
|
|
_wrapup
|
|
if $showme; then
|
|
:
|
|
elif $needsum; then
|
|
count=`wc -L $tmp.summary | cut -f1 -d" "`
|
|
cat $tmp.summary
|
|
needsum=false
|
|
fi
|
|
rm -f $tmp.*
|
|
}
|
|
|
|
_check_filesystems()
|
|
{
|
|
if [ -f ${RESULT_DIR}/require_test ]; then
|
|
_check_test_fs || err=true
|
|
rm -f ${RESULT_DIR}/require_test*
|
|
else
|
|
_test_unmount 2> /dev/null
|
|
fi
|
|
if [ -f ${RESULT_DIR}/require_scratch ]; then
|
|
_check_scratch_fs || err=true
|
|
rm -f ${RESULT_DIR}/require_scratch*
|
|
fi
|
|
_scratch_unmount 2> /dev/null
|
|
}
|
|
|
|
_expunge_test()
|
|
{
|
|
local TEST_ID="$1"
|
|
if [ -s $tmp.xlist ]; then
|
|
if grep -q $TEST_ID $tmp.xlist; then
|
|
echo " [expunged]"
|
|
return 1
|
|
fi
|
|
fi
|
|
return 0
|
|
}
|
|
|
|
# Can we run systemd scopes?
|
|
HAVE_SYSTEMD_SCOPES=
|
|
systemctl reset-failed "fstests-check" &>/dev/null
|
|
systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
|
|
test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
|
|
|
|
# Make the check script unattractive to the OOM killer...
|
|
OOM_SCORE_ADJ="/proc/self/oom_score_adj"
|
|
test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
|
|
|
|
# ...and make the tests themselves somewhat more attractive to it, so that if
|
|
# the system runs out of memory it'll be the test that gets killed and not the
|
|
# test framework.
|
|
#
|
|
# If systemd is available, run the entire test script in a scope so that we can
|
|
# kill all subprocesses of the test if it fails to clean up after itself. This
|
|
# is essential for ensuring that the post-test unmount succeeds. Note that
|
|
# systemd doesn't automatically remove transient scopes that fail to terminate
|
|
# when systemd tells them to terminate (e.g. programs stuck in D state when
|
|
# systemd sends SIGKILL), so we use reset-failed to tear down the scope.
|
|
_run_seq() {
|
|
local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
|
|
|
|
if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
|
|
local unit="$(systemd-escape "fs$seq").scope"
|
|
systemctl reset-failed "${unit}" &> /dev/null
|
|
systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
|
|
res=$?
|
|
systemctl stop "${unit}" &> /dev/null
|
|
return "${res}"
|
|
else
|
|
"${cmd[@]}"
|
|
fi
|
|
}
|
|
|
|
_detect_kmemleak
|
|
_prepare_test_list
|
|
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
trap "_summary; exit \$status" 0 1 2 3 15
|
|
else
|
|
trap "_wrapup; exit \$status" 0 1 2 3 15
|
|
fi
|
|
|
|
function run_section()
|
|
{
|
|
local section=$1
|
|
|
|
OLD_FSTYP=$FSTYP
|
|
OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
|
|
get_next_config $section
|
|
|
|
# Do we need to run only some sections ?
|
|
if [ ! -z "$RUN_SECTION" ]; then
|
|
skip=true
|
|
for s in $RUN_SECTION; do
|
|
if [ $section == $s ]; then
|
|
skip=false
|
|
break;
|
|
fi
|
|
done
|
|
if $skip; then
|
|
return
|
|
fi
|
|
fi
|
|
|
|
# Did this section get excluded?
|
|
if [ ! -z "$EXCLUDE_SECTION" ]; then
|
|
skip=false
|
|
for s in $EXCLUDE_SECTION; do
|
|
if [ $section == $s ]; then
|
|
skip=true
|
|
break;
|
|
fi
|
|
done
|
|
if $skip; then
|
|
return
|
|
fi
|
|
fi
|
|
|
|
mkdir -p $RESULT_BASE
|
|
if [ ! -d $RESULT_BASE ]; then
|
|
echo "failed to create results directory $RESULT_BASE"
|
|
status=1
|
|
exit
|
|
fi
|
|
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
echo "SECTION -- $section"
|
|
fi
|
|
|
|
sect_start=`_wallclock`
|
|
if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
|
|
echo "RECREATING -- $FSTYP on $TEST_DEV"
|
|
_test_unmount 2> /dev/null
|
|
if ! _test_mkfs >$tmp.err 2>&1
|
|
then
|
|
echo "our local _test_mkfs routine ..."
|
|
cat $tmp.err
|
|
echo "check: failed to mkfs \$TEST_DEV using specified options"
|
|
status=1
|
|
exit
|
|
fi
|
|
if ! _test_mount
|
|
then
|
|
echo "check: failed to mount $TEST_DEV on $TEST_DIR"
|
|
status=1
|
|
exit
|
|
fi
|
|
_prepare_test_list
|
|
elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
|
|
_test_unmount 2> /dev/null
|
|
if ! _test_mount
|
|
then
|
|
echo "check: failed to mount $TEST_DEV on $TEST_DIR"
|
|
status=1
|
|
exit
|
|
fi
|
|
fi
|
|
|
|
init_rc
|
|
|
|
seq="check"
|
|
check="$RESULT_BASE/check"
|
|
|
|
# don't leave old full output behind on a clean run
|
|
rm -f $check.full
|
|
|
|
[ -f $check.time ] || touch $check.time
|
|
|
|
# print out our test configuration
|
|
echo "FSTYP -- `_full_fstyp_details`"
|
|
echo "PLATFORM -- `_full_platform_details`"
|
|
if [ ! -z "$SCRATCH_DEV" ]; then
|
|
echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
|
|
echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
|
|
fi
|
|
echo
|
|
needwrap=true
|
|
|
|
if [ ! -z "$SCRATCH_DEV" ]; then
|
|
_scratch_unmount 2> /dev/null
|
|
# call the overridden mkfs - make sure the FS is built
|
|
# the same as we'll create it later.
|
|
|
|
if ! _scratch_mkfs >$tmp.err 2>&1
|
|
then
|
|
echo "our local _scratch_mkfs routine ..."
|
|
cat $tmp.err
|
|
echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
|
|
status=1
|
|
exit
|
|
fi
|
|
|
|
# call the overridden mount - make sure the FS mounts with
|
|
# the same options that we'll mount with later.
|
|
if ! _try_scratch_mount >$tmp.err 2>&1
|
|
then
|
|
echo "our local mount routine ..."
|
|
cat $tmp.err
|
|
echo "check: failed to mount \$SCRATCH_DEV using specified options"
|
|
status=1
|
|
exit
|
|
else
|
|
_scratch_unmount
|
|
fi
|
|
fi
|
|
|
|
seqres="$check"
|
|
_check_test_fs
|
|
|
|
err=false
|
|
first_test=true
|
|
prev_seq=""
|
|
for seq in $list ; do
|
|
# Run report for previous test!
|
|
if $err ; then
|
|
bad="$bad $seqnum"
|
|
n_bad=`expr $n_bad + 1`
|
|
tc_status="fail"
|
|
fi
|
|
if $do_report && ! $first_test ; then
|
|
if [ $tc_status != "expunge" ] ; then
|
|
_make_testcase_report "$prev_seq" "$tc_status"
|
|
fi
|
|
fi
|
|
first_test=false
|
|
|
|
err=false
|
|
prev_seq="$seq"
|
|
if [ ! -f $seq ]; then
|
|
# Try to get full name in case the user supplied only
|
|
# seq id and the test has a name. A bit of hassle to
|
|
# find really the test and not its sample output or
|
|
# helping files.
|
|
bname=$(basename $seq)
|
|
full_seq=$(find $(dirname $seq) -name $bname* -executable |
|
|
awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
|
|
END { print shortest }')
|
|
if [ -f $full_seq ] && \
|
|
[ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
|
|
seq=$full_seq
|
|
fi
|
|
fi
|
|
|
|
# the filename for the test and the name output are different.
|
|
# we don't include the tests/ directory in the name output.
|
|
export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
|
|
|
|
# Similarly, the result directory needs to replace the tests/
|
|
# part of the test location.
|
|
group=`dirname $seq`
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
|
|
REPORT_DIR="$RESULT_BASE/$section"
|
|
else
|
|
export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
|
|
REPORT_DIR="$RESULT_BASE"
|
|
fi
|
|
seqres="$REPORT_DIR/$seqnum"
|
|
|
|
mkdir -p $RESULT_DIR
|
|
rm -f ${RESULT_DIR}/require_scratch*
|
|
rm -f ${RESULT_DIR}/require_test*
|
|
echo -n "$seqnum"
|
|
|
|
if $showme; then
|
|
_expunge_test $seqnum
|
|
if [ $? -eq 1 ]; then
|
|
tc_status="expunge"
|
|
continue
|
|
fi
|
|
echo
|
|
start=0
|
|
stop=0
|
|
tc_status="list"
|
|
n_notrun=`expr $n_notrun + 1`
|
|
continue
|
|
fi
|
|
|
|
tc_status="pass"
|
|
if [ ! -f $seq ]; then
|
|
echo " - no such test?"
|
|
continue
|
|
fi
|
|
|
|
# really going to try and run this one
|
|
rm -f $seqres.out.bad
|
|
|
|
# check if we really should run it
|
|
_expunge_test $seqnum
|
|
if [ $? -eq 1 ]; then
|
|
tc_status="expunge"
|
|
continue
|
|
fi
|
|
|
|
# record that we really tried to run this test.
|
|
try="$try $seqnum"
|
|
n_try=`expr $n_try + 1`
|
|
|
|
# slashes now in names, sed barfs on them so use grep
|
|
lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
|
|
if [ "X$lasttime" != X ]; then
|
|
echo -n " ${lasttime}s ... "
|
|
else
|
|
echo -n " " # prettier output with timestamps.
|
|
fi
|
|
rm -f core $seqres.notrun
|
|
|
|
start=`_wallclock`
|
|
$timestamp && echo -n " ["`date "+%T"`"]"
|
|
[ ! -x $seq ] && chmod u+x $seq # ensure we can run it
|
|
$LOGGER_PROG "run xfstest $seqnum"
|
|
if [ -w /dev/kmsg ]; then
|
|
export date_time=`date +"%F %T"`
|
|
echo "run fstests $seqnum at $date_time" > /dev/kmsg
|
|
# _check_dmesg depends on this log in dmesg
|
|
touch ${RESULT_DIR}/check_dmesg
|
|
fi
|
|
_try_wipe_scratch_devs > /dev/null 2>&1
|
|
|
|
# clear the WARN_ONCE state to allow a potential problem
|
|
# to be reported for each test
|
|
(echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
|
|
|
|
if [ "$DUMP_OUTPUT" = true ]; then
|
|
_run_seq 2>&1 | tee $tmp.out
|
|
# Because $? would get tee's return code
|
|
sts=${PIPESTATUS[0]}
|
|
else
|
|
_run_seq >$tmp.out 2>&1
|
|
sts=$?
|
|
fi
|
|
|
|
if [ -f core ]; then
|
|
_dump_err_cont "[dumped core]"
|
|
mv core $RESULT_BASE/$seqnum.core
|
|
err=true
|
|
fi
|
|
|
|
if [ -f $seqres.notrun ]; then
|
|
$timestamp && _timestamp
|
|
stop=`_wallclock`
|
|
$timestamp || echo -n "[not run] "
|
|
$timestamp && echo " [not run]" && \
|
|
echo -n " $seqnum -- "
|
|
cat $seqres.notrun
|
|
notrun="$notrun $seqnum"
|
|
n_notrun=`expr $n_notrun + 1`
|
|
tc_status="notrun"
|
|
continue;
|
|
fi
|
|
|
|
if [ $sts -ne 0 ]; then
|
|
_dump_err_cont "[failed, exit status $sts]"
|
|
_test_unmount 2> /dev/null
|
|
_scratch_unmount 2> /dev/null
|
|
rm -f ${RESULT_DIR}/require_test*
|
|
rm -f ${RESULT_DIR}/require_scratch*
|
|
err=true
|
|
else
|
|
# the test apparently passed, so check for corruption
|
|
# and log messages that shouldn't be there.
|
|
_check_filesystems
|
|
_check_dmesg || err=true
|
|
fi
|
|
|
|
# Reload the module after each test to check for leaks or
|
|
# other problems.
|
|
if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
|
|
_test_unmount 2> /dev/null
|
|
_scratch_unmount 2> /dev/null
|
|
modprobe -r fs-$FSTYP
|
|
modprobe fs-$FSTYP
|
|
fi
|
|
|
|
# Scan for memory leaks after every test so that associating
|
|
# a leak to a particular test will be as accurate as possible.
|
|
_check_kmemleak || err=true
|
|
|
|
# test ends after all checks are done.
|
|
$timestamp && _timestamp
|
|
stop=`_wallclock`
|
|
|
|
if [ ! -f $seq.out ]; then
|
|
_dump_err "no qualified output"
|
|
err=true
|
|
continue;
|
|
fi
|
|
|
|
# coreutils 8.16+ changed quote formats in error messages
|
|
# from `foo' to 'foo'. Filter old versions to match the new
|
|
# version.
|
|
sed -i "s/\`/\'/g" $tmp.out
|
|
if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
|
|
if ! $err ; then
|
|
echo "$seqnum `expr $stop - $start`" >>$tmp.time
|
|
echo -n " `expr $stop - $start`s"
|
|
fi
|
|
echo ""
|
|
else
|
|
_dump_err "- output mismatch (see $seqres.out.bad)"
|
|
mv $tmp.out $seqres.out.bad
|
|
$diff $seq.out $seqres.out.bad | {
|
|
if test "$DIFF_LENGTH" -le 0; then
|
|
cat
|
|
else
|
|
head -n "$DIFF_LENGTH"
|
|
echo "..."
|
|
echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
|
|
" to see the entire diff)"
|
|
fi; } | sed -e 's/^\(.\)/ \1/'
|
|
err=true
|
|
fi
|
|
done
|
|
|
|
# make sure we record the status of the last test we ran.
|
|
if $err ; then
|
|
bad="$bad $seqnum"
|
|
n_bad=`expr $n_bad + 1`
|
|
tc_status="fail"
|
|
fi
|
|
if $do_report && ! $first_test ; then
|
|
if [ $tc_status != "expunge" ] ; then
|
|
_make_testcase_report "$prev_seq" "$tc_status"
|
|
fi
|
|
fi
|
|
|
|
sect_stop=`_wallclock`
|
|
interrupt=false
|
|
_wrapup
|
|
interrupt=true
|
|
echo
|
|
|
|
_test_unmount 2> /dev/null
|
|
_scratch_unmount 2> /dev/null
|
|
}
|
|
|
|
for ((iters = 0; iters < $iterations; iters++)) do
|
|
for section in $HOST_OPTIONS_SECTIONS; do
|
|
run_section $section
|
|
done
|
|
done
|
|
|
|
interrupt=false
|
|
status=`expr $sum_bad != 0`
|
|
exit
|