Files
apfstests/check
T
Dave Chinner 1686f9abfc xfstests: Introduce a results directory
Currently each test gets it's sequence number from it's name. It
separates this from the path via basename, and uses it for
outputting full, notrun and other status/log files. Hence these end
up in the top level directory.

All these output files need to go somewhere other than the top level
directory. Right now the check script is looking for them in the new
test directories (e.g. for the notrun files), but it would be good
to be able to separate the test source form the test output.

Hence create an output directory which has a similar heirarchy to
the test source directory. Create it on demand when we build the
list of tests to run if it doesn't already exist.

Change the high level check script to set up this variable
appropriately for each test that is being run, and to use this new
output directory for it's result files as well. The next commit will
change all the tests themselves to use the new output directory.

This is the first (small) step in being able to store test results
in an external location for archival/data mining purposes.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Phil White <pwhite@sgi.com>
[rjohnston@sgi.com rm whitespace and fix typos in commit message]
Signed-off-by: Rich Johnston <rjohnston@sgi.com>
2013-03-26 20:52:08 -05:00

512 lines
11 KiB
Bash
Executable File

#!/bin/bash
#
# Control script for QA
#
# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
tmp=/tmp/$$
status=0
needwrap=true
n_try=0
try=""
n_bad=0
bad=""
notrun=""
interrupt=true
diff="diff -u"
showme=false
have_test_arg=false
randomize=false
here=`pwd`
FSTYP=xfs
SUPPORTED_TESTS="[0-9][0-9][0-9] [0-9][0-9][0-9][0-9]"
SRC_GROUPS="generic shared"
export SRC_DIR="tests"
export RESULT_BASE=${RESULT_BASE:="results"}
# generic initialization
iam=check
export QA_CHECK_FS=${QA_CHECK_FS:=true}
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
# number of diff lines from a failed test, 0 for whole output
export DIFF_LENGTH=${DIFF_LENGTH:=10}
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
usage()
{
echo "Usage: $0 [options] [testlist]"'
check options
-xfs test XFS (default)
-udf test UDF
-nfs test NFS
-l line mode diff
-udiff show unified diff (default)
-n show me, do not run tests
-T output timestamps
-r randomize test order
--large-fs optimise scratch device for large filesystems
testlist options
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
[testlist] include tests matching names in testlist
'
exit 0
}
_setenvironment()
{
MSGVERB="text:action"
export MSGVERB
}
get_group_list()
{
grp=$1
for d in $SRC_GROUPS $FSTYP; do
l=$(sed -n < $SRC_DIR/$d/group \
-e 's/#.*//' \
-e 's/$/ /' \
-e "s;\(^[0-9][0-9][0-9]\).* $grp .*;$SRC_DIR/$d/\1;p")
grpl="$grpl $l"
done
echo $grpl
}
# find all tests, excluding files that are test metadata such as group files.
# This assumes that tests are defined purely by alphanumeric filenames with no
# ".xyz" extensions in the name.
get_all_tests()
{
touch $tmp.list
for d in $SRC_GROUPS $FSTYP; do
ls $SRC_DIR/$d/* | \
grep -v "\..*" | \
grep -v group >> $tmp.list 2>/dev/null
done
}
_wallclock()
{
date "+%H %M %S" | $AWK_PROG '{ print $1*3600 + $2*60 + $3 }'
}
_timestamp()
{
now=`date "+%T"`
echo -n " [$now]"
}
# start the initialisation work now
_setenvironment
rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out
# Autodetect fs type based on what's on $TEST_DEV
if [ "$HOSTOS" == "Linux" ]; then
FSTYP=`blkid -c /dev/null -s TYPE -o value $TEST_DEV`
fi
export FSTYP
# we need common.config
if ! . ./common.config
then
echo "$iam: failed to source common.config"
exit 1
fi
# Process command arguments first.
while [ $# -gt 0 ]; do
case "$1" in
-\? | -h | --help) usage ;;
-udf) FSTYP=udf ;;
-xfs) FSTYP=xfs ;;
-nfs) FSTYP=nfs ;;
-g) group=$2 ; shift ;
group_list=$(get_group_list $group)
if [ -z "$group_list" ]; then
echo "Group \"$group\" is empty or not defined?"
exit 1
fi
[ ! -s $tmp.list ] && touch $tmp.list
for t in $group_list; do
grep -s "^$t\$" $tmp.list >/dev/null || \
echo "$t" >>$tmp.list
done
;;
-x) xgroup=$2 ; shift ;
# Note: behaviour is dependent on command line ordering of
# -g and -x parameters. If there are no preceding -g commands,
# this works on all tests, otherwise just the tests specified by
# the early -g inclusions.
[ ! -s $tmp.list ] && get_all_tests
group_list=$(get_group_list $xgroup)
if [ -z "$group_list" ]; then
echo "Group \"$xgroup\" is empty or not defined?"
exit 1
fi
rm -f $tmp.grep
numsed=0
for t in $group_list
do
if [ $numsed -gt 100 ]; then
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
mv $tmp.tmp $tmp.list
numsed=0
rm -f $tmp.grep
fi
echo "^$t\$" >>$tmp.grep
numsed=`expr $numsed + 1`
done
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
mv $tmp.tmp $tmp.list
;;
-l) diff="diff" ;;
-udiff) diff="$diff -u" ;;
-n) showme=true ;;
-r) randomize=true ;;
-T) timestamp=true ;;
--large-fs) export LARGE_SCRATCH_DEV=yes ;;
--extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
-*) usage ;;
*) # not an argument, we've got tests now.
have_test_arg=true ;;
esac
# if we've found a test specification, the break out of the processing
# loop before we shift the arguments so that this is the first argument
# that we process in the test arg loop below.
if $have_test_arg; then
break;
fi
shift
done
# Process tests from command line now.
if $have_test_arg; then
while [ $# -gt 0 ]; do
case "$1" in
-*) echo "Argments before tests, please!"
status=1
exit $status
;;
*) test_dir=`dirname $1`
test_name=`basename $1`
group_file=$SRC_DIR/$test_dir/group
if grep "^$testname" $group_file >/dev/null ; then
# in group file ... OK
echo $SRC_DIR/$1 >>$tmp.list
else
# oops
echo "$1 - unknown test, ignored"
fi
;;
esac
shift
done
fi
if [ -s $tmp.list ]; then
# found some valid test numbers ... this is good
:
elif $have_test_arg; then
# had test numbers, but none in group file ... do nothing
touch $tmp.list
else
# no test numbers, do everything from group file
sed -n -e '/^[0-9][0-9][0-9]*/s/[ ].*//p' <group >$tmp.list
fi
# sort the list of tests into numeric order
list=`sort -n $tmp.list`
rm -f $tmp.list $tmp.tmp $tmp.grep
if $randomize
then
list=`echo $list | awk -f randomize.awk`
fi
# we need common.rc
if ! . ./common.rc
then
echo "check: failed to source common.rc"
exit 1
fi
if [ `id -u` -ne 0 ]
then
echo "check: QA must be run as root"
exit 1
fi
# Ok, time to start running...
_wrapup()
{
if $showme
then
:
elif $needwrap
then
if [ -f check.time -a -f $tmp.time ]
then
cat check.time $tmp.time \
| $AWK_PROG '
{ t[$1] = $2 }
END { if (NR > 0) {
for (i in t) print i " " t[i]
}
}' \
| sort -n >$tmp.out
mv $tmp.out check.time
fi
echo "" >>check.log
date >>check.log
echo $list | fmt | sed -e 's/^/ /' -e "s;$SRC_DIR/;;g" >>check.log
$interrupt && echo "Interrupted!" >>check.log
if [ ! -z "$n_try" -a $n_try != 0 ]
then
echo "Ran:$try"
fi
if [ ! -z "$notrun" ]
then
echo "Not run:$notrun"
echo "Not run:$notrun" >>check.log
fi
if [ ! -z "$n_bad" -a $n_bad != 0 ]
then
echo "Failures:$bad"
echo "Failed $n_bad of $n_try tests"
echo "Failures:$bad" | fmt >>check.log
echo "Failed $n_bad of $n_try tests" >>check.log
else
echo "Passed all $n_try tests"
echo "Passed all $n_try tests" >>check.log
fi
needwrap=false
fi
rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
rm -f $tmp.*
}
trap "_wrapup; exit \$status" 0 1 2 3 15
# don't leave old full output behind on a clean run
rm -f check.full
[ -f check.time ] || touch check.time
# print out our test configuration
echo "FSTYP -- `_full_fstyp_details`"
echo "PLATFORM -- `_full_platform_details`"
if [ ! -z "$SCRATCH_DEV" ]; then
echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
fi
echo
if [ ! -z "$SCRATCH_DEV" ]; then
umount $SCRATCH_DEV 2>/dev/null
# call the overridden mkfs - make sure the FS is built
# the same as we'll create it later.
if ! _scratch_mkfs $flag >$tmp.err 2>&1
then
echo "our local _scratch_mkfs routine ..."
cat $tmp.err
echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
exit 1
fi
# call the overridden mount - make sure the FS mounts with
# the same options that we'll mount with later.
if ! _scratch_mount >$tmp.err 2>&1
then
echo "our local mount routine ..."
cat $tmp.err
echo "check: failed to mount \$SCRATCH_DEV using specified options"
exit 1
fi
fi
mkdir -p $RESULT_BASE
if [ ! -d $RESULT_BASE ]; then
echo "failed to create results directory $RESULTS_BASE"
exit 1;
fi
seq="check"
_check_test_fs
for seq in $list
do
err=false
# the filename for the test and the name output are different.
# we don't include the tests/ directory in the name output.
seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
# Similarly, the result directory needs to replace the tests/
# part of the test location.
group=`dirname $seq`
export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
mkdir -p $RESULT_DIR
seqres="$RESULT_BASE/$seqnum"
echo -n "$seqnum"
if $showme
then
echo
continue
elif [ ! -f $seq ]
then
echo " - no such test?"
else
# really going to try and run this one
#
rm -f $seqres.out.bad
# slashes now in names, sed barfs on them so use grep
lasttime=`grep -w ^$seq check.time | awk '// {print $2}'`
if [ "X$lasttime" != X ]; then
echo -n " ${lasttime}s ..."
else
echo -n " " # prettier output with timestamps.
fi
rm -f core $seqres.notrun
start=`_wallclock`
$timestamp && echo -n " ["`date "+%T"`"]"
[ ! -x $seq ] && chmod u+x $seq # ensure we can run it
$LOGGER_PROG "run xfstest $seqnum"
./$seq >$tmp.rawout 2>&1
sts=$?
$timestamp && _timestamp
stop=`_wallclock`
_fix_malloc <$tmp.rawout >$tmp.out
rm -f $tmp.rawout
if [ -f core ]
then
echo -n " [dumped core]"
mv core $RESULT_BASE/$seqnum.core
err=true
fi
if [ -f $seqres.notrun ]
then
$timestamp || echo -n " [not run] "
$timestamp && echo " [not run]" && echo -n " $seqnum -- "
cat $seqres.notrun
notrun="$notrun $seqnum"
else
if [ $sts -ne 0 ]
then
echo -n " [failed, exit status $sts]"
err=true
fi
if [ ! -f $seq.out ]
then
echo " - no qualified output"
err=true
else
if diff $seq.out $tmp.out >/dev/null 2>&1
then
if $err
then
:
else
echo "$seqnum `expr $stop - $start`" >>$tmp.time
echo -n " `expr $stop - $start`s"
fi
echo ""
else
echo " - output mismatch (see $seqres.out.bad)"
mv $tmp.out $seqres.out.bad
$diff $seq.out $seqres.out.bad | {
if test "$DIFF_LENGTH" -le 0; then
cat
else
head -n "$DIFF_LENGTH"
fi; } | \
sed -e 's/^\(.\)/ \1/'
echo " ..."
echo " (Run '$diff $seq.out $seqres.out.bad' to see the" \
"entire diff)"
err=true
fi
fi
fi
fi
# come here for each test, except when $showme is true
#
if $err
then
bad="$bad $seqnum"
n_bad=`expr $n_bad + 1`
quick=false
fi
if [ ! -f $seqres.notrun ]
then
try="$try $seqnum"
n_try=`expr $n_try + 1`
_check_test_fs
fi
seq="after_$seqnum"
done
interrupt=false
status=`expr $n_bad`
exit